Add /proc/zoneinfo file to display information about memory zones. fs/proc/proc_misc.c | 15 +++++++++ mm/page_alloc.c | 86 +++++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 87 insertions(+), 14 deletions(-) diff -puN fs/proc/proc_misc.c~zoneinfo fs/proc/proc_misc.c --- bk-linux/fs/proc/proc_misc.c~zoneinfo 2004-11-08 15:08:12.139197144 +0300 +++ bk-linux-nikita/fs/proc/proc_misc.c 2004-11-08 15:08:12.146196080 +0300 @@ -255,6 +255,20 @@ static struct file_operations fragmentat .release = seq_release, }; +extern struct seq_operations zoneinfo_op; +static int zoneinfo_open(struct inode *inode, struct file *file) +{ + (void)inode; + return seq_open(file, &zoneinfo_op); +} + +static struct file_operations proc_zoneinfo_file_operations = { + .open = zoneinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + static int version_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -621,6 +635,7 @@ void __init proc_misc_init(void) create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations); create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); + create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations); create_seq_entry("diskstats", 0, &proc_diskstats_operations); #ifdef CONFIG_MODULES create_seq_entry("modules", 0, &proc_modules_operations); diff -puN mm/page_alloc.c~zoneinfo mm/page_alloc.c --- bk-linux/mm/page_alloc.c~zoneinfo 2004-11-08 15:08:12.143196536 +0300 +++ bk-linux-nikita/mm/page_alloc.c 2004-11-08 15:08:12.158194256 +0300 @@ -171,10 +171,10 @@ static void destroy_compound_page(struct * At each level, we keep one bit for each pair of blocks, which * is set to 1 iff only one of the pair is allocated. So when we * are allocating or freeing one, we can derive the state of the - * other. That is, if we allocate a small block, and both were - * free, the remainder of the region must be split into blocks. + * other. That is, if we allocate a small block, and both were + * free, the remainder of the region must be split into blocks. * If a block is freed, and its buddy is also free, then this - * triggers coalescing into a block of larger size. + * triggers coalescing into a block of larger size. * * -- wli */ @@ -238,7 +238,7 @@ static inline void free_pages_check(cons } /* - * Frees a list of pages. + * Frees a list of pages. * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free, or 0 for all on the list. * @@ -362,7 +362,7 @@ static void prep_new_page(struct page *p set_page_refs(page, order); } -/* +/* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */ @@ -390,12 +390,12 @@ static struct page *__rmqueue(struct zon return NULL; } -/* +/* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. * Returns the number of new pages which were placed at *list. */ -static int rmqueue_bulk(struct zone *zone, unsigned int order, +static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list) { unsigned long flags; @@ -1316,7 +1316,7 @@ static void __init build_zonelists(pg_da j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); for (node = 0; node < local_node; node++) j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); - + zonelist->zones[j] = NULL; } } @@ -1650,8 +1650,8 @@ static void frag_stop(struct seq_file *m { } -/* - * This walks the freelist for each zone. Whilst this is slow, I'd rather +/* + * This walks the freelist for each zone. Whilst this is slow, I'd rather * be slow here than slow down the fast path by keeping stats - mjbligh */ static int frag_show(struct seq_file *m, void *arg) @@ -1689,6 +1689,64 @@ struct seq_operations fragmentation_op = .show = frag_show, }; +/* + * Output information about zones in @pgdat. + */ +static int zoneinfo_show(struct seq_file *m, void *arg) +{ + pg_data_t *pgdat = (pg_data_t *)arg; + struct zone *zone; + struct zone *node_zones = pgdat->node_zones; + unsigned long flags; + + for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { + if (!zone->present_pages) + continue; + + spin_lock_irqsave(&zone->lock, flags); + seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); + seq_printf(m, + "\n\tpages free %lu" + "\n\tpages min %lu" + "\n\tpages low %lu" + "\n\tpages high %lu" + "\n\tpages active %lu" + "\n\tpages inactive %lu" + "\n\tpages scanned %lu" + "\n\tpages spanned %lu" + "\n\tpages present %lu", + zone->free_pages, + zone->pages_min, + zone->pages_low, + zone->pages_high, + zone->nr_active, + zone->nr_inactive, + zone->pages_scanned, + zone->spanned_pages, + zone->present_pages); + seq_printf(m, + "\n\tnr_scan_active %lu" + "\n\tnr_scan_inactive %lu" + "\n\tall_unreclaimable: %u" + "\n\tprev_priority: %i" + "\n\ttemp_priority: %i", + zone->nr_scan_active, zone->nr_scan_inactive, + zone->all_unreclaimable, + zone->prev_priority, zone->temp_priority); + spin_unlock_irqrestore(&zone->lock, flags); + seq_putc(m, '\n'); + } + return 0; +} + +struct seq_operations zoneinfo_op = { + .start = frag_start, /* iterate over all zones. The same as in + * fragmentation. */ + .next = frag_next, + .stop = frag_stop, + .show = zoneinfo_show, +}; + static char *vmstat_text[] = { "nr_dirty", "nr_writeback", @@ -1897,8 +1955,8 @@ static void setup_per_zone_protection(vo } /* - * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures - * that the pages_{min,low,high} values for each zone are set correctly + * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures + * that the pages_{min,low,high} values for each zone are set correctly * with respect to min_free_kbytes. */ static void setup_per_zone_pages_min(void) @@ -1932,10 +1990,10 @@ static void setup_per_zone_pages_min(voi min_pages = 128; zone->pages_min = min_pages; } else { - /* if it's a lowmem zone, reserve a number of pages + /* if it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->pages_min = (pages_min * zone->present_pages) / + zone->pages_min = (pages_min * zone->present_pages) / lowmem_pages; } _