Index: linux-2.6.15-rc5/mm/page_alloc.c =================================================================== --- linux-2.6.15-rc5.orig/mm/page_alloc.c 2005-12-08 16:17:04.000000000 -0800 +++ linux-2.6.15-rc5/mm/page_alloc.c 2005-12-08 17:05:39.000000000 -0800 @@ -556,7 +556,6 @@ static int rmqueue_bulk(struct zone *zon return allocated; } -#ifdef CONFIG_NUMA atomic_long_t vm_stat[NR_STAT_ITEMS]; /* @@ -565,11 +564,13 @@ atomic_long_t vm_stat[NR_STAT_ITEMS]; * * Preemption is disabled. */ -void mod_node_page_state_update(long x, struct zone *zone, enum node_stat_item item) +void zone_page_state_update(long x, struct zone *zone, enum zone_stat_item item) { + struct per_cpu_pageset *pcp = zone_pcp(zone, raw_smp_processor_id()); + atomic_long_add(x, &zone->vm_stat[item]); atomic_long_add(x, &vm_stat[item]); - __local_add(-x, &zone->pageset[raw_processor_id()]->vm_stat_diff[item]); + local_set(&pcp->vm_stat_diff[item], 0); } /* @@ -581,14 +582,19 @@ void refresh_cpu_vm_stats(void) struct zone *zone; int i; - for_each_zone(zone) + preempt_disable(); + for_each_zone(zone) { + struct per_cpu_pageset *pcp = zone_pcp(zone, raw_smp_processor_id()); + for(i = 0; i < NR_STAT_ITEMS; i++) { long v; - v = local_read(zone->vm_stat_diff[smp_processor_id()][i]); + v = local_read(&pcp->vm_stat_diff[i]); if (v) - mod_node_page_state_update(v, smp_processor_id(), i); + zone_page_state_update(v, zone, i); } + } + preempt_enable(); } static void __refresh_cpu_vm_stats(void *dummy) @@ -608,6 +614,19 @@ void refresh_vm_stats(void) schedule_on_each_cpu(__refresh_cpu_vm_stats, NULL); } +unsigned long get_vm_stat_node_counts(enum zone_stat_item item, int node) +{ + struct zone *zones = NODE_DATA(node)->node_zones; + int i; + unsigned long v = 0; + + for (i = 0; i < MAX_NR_ZONES; i++) + v += atomic_long_read(&zones[i].vm_stat[item]); + return v; +} + +#ifdef CONFIG_NUMA + /* Called from the slab reaper to drain remote pagesets */ void drain_remote_pages(void) { Index: linux-2.6.15-rc5/include/linux/page-flags.h =================================================================== --- linux-2.6.15-rc5.orig/include/linux/page-flags.h 2005-12-08 16:17:02.000000000 -0800 +++ linux-2.6.15-rc5/include/linux/page-flags.h 2005-12-08 16:57:51.000000000 -0800 @@ -172,12 +172,14 @@ extern atomic_long_t vm_stat[NR_STAT_ITE #define global_page_state(__x) atomic_read(&vm_stat[__x]) #define zone_page_state(__z,__x) atomic_read(&zone_pcp(__z, smp_processor_id())->vm_stat[__x]) -extern void zone_page_state_update(long x, struct zone *zone, enum node_stat_item item); +extern unsigned long get_vm_stat_node_counts(enum zone_stat_item, int node); +extern void zone_page_state_update(long x, struct zone *zone, enum zone_stat_item item); + /* * For use when we know that preemption is disabled. Avoids a potential atomic * operations on platforms without the magic inc/dec of i386 and x86_64. */ -static inline void __mod_zone_page_state(struct zone *zone, enum node_stat_item item, int delta) +static inline void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { local_t *p = &zone_pcp(zone, raw_smp_processor_id())->vm_stat_diff[item]; long x; @@ -192,22 +194,22 @@ static inline void __mod_zone_page_state /* * For use when preemption is enabled. */ -static inline void mod_zone_page_state(struct zone *zone, enum node_stat_item item, int delta) +static inline void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { preempt_disable(); __mod_zone_page_state(zone, item, delta); preempt_disable(); } -#define inc_zone_page_state(node, item) mod_zone_page_state(node, item, 1) -#define dec_zone_page_state(node, item) mod_zone_page_state(node, item, -1) -#define add_zone_page_state(node, item) mod_zone_page_state(node, item, delta) -#define sub_zone_page_state(node, item) mod_zone_page_state(node, item, -(delta)) - -#define __inc_zone_page_state(node, item) __mod_zone_page_state(node, item, 1) -#define __dec_zone_page_state(node, item) __mod_zone_page_state(node, item, -1) -#define __add_zone_page_state(node, item) __mod_zone_page_state(node, item, delta) -#define __sub_zone_page_state(node, item) __mod_zone_page_state(node, item, -(delta)) +#define inc_zone_page_state(zone, item) mod_zone_page_state(zone, item, 1) +#define dec_zone_page_state(zone, item) mod_zone_page_state(zone, item, -1) +#define add_zone_page_state(zone, item) mod_zone_page_state(zone, item, delta) +#define sub_zone_page_state(zone, item) mod_zone_page_state(zone, item, -(delta)) + +#define __inc_zone_page_state(zone, item) __mod_zone_page_state(zone, item, 1) +#define __dec_zone_page_state(zone, item) __mod_zone_page_state(zone, item, -1) +#define __add_zone_page_state(zone, item) __mod_zone_page_state(zone, item, delta) +#define __sub_zone_page_state(zone, item) __mod_zone_page_state(zone, item, -(delta)) /* * Manipulation of page state flags Index: linux-2.6.15-rc5/drivers/base/node.c =================================================================== --- linux-2.6.15-rc5.orig/drivers/base/node.c 2005-12-08 16:17:02.000000000 -0800 +++ linux-2.6.15-rc5/drivers/base/node.c 2005-12-08 17:01:24.000000000 -0800 @@ -43,10 +43,12 @@ static ssize_t node_read_meminfo(struct unsigned long inactive; unsigned long active; unsigned long free; + unsigned long nr_mapped; si_meminfo_node(&i, nid); get_page_state_node(&ps, nid); __get_zone_counts(&active, &inactive, &free, NODE_DATA(nid)); + nr_mapped = get_vm_stat_node_counts(NR_MAPPED, nid); /* Check for negative values in these approximate counters */ if ((long)ps.nr_dirty < 0) @@ -81,7 +83,7 @@ static ssize_t node_read_meminfo(struct nid, K(i.freeram - i.freehigh), nid, K(ps.nr_dirty), nid, K(ps.nr_writeback), - nid, K(node_page_state(nid, NR_MAPPED)), + nid, K(nr_mapped), nid, K(ps.nr_slab)); n += hugetlb_report_node_meminfo(nid, buf + n); return n; Index: linux-2.6.15-rc5/include/linux/mmzone.h =================================================================== --- linux-2.6.15-rc5.orig/include/linux/mmzone.h 2005-12-08 16:12:07.000000000 -0800 +++ linux-2.6.15-rc5/include/linux/mmzone.h 2005-12-08 16:58:24.000000000 -0800 @@ -45,7 +45,7 @@ struct zone_padding { #define ZONE_PADDING(name) #endif -enum node_stat_item { NR_MAPPED, NR_PAGECACHE }; +enum zone_stat_item { NR_MAPPED, NR_PAGECACHE }; #define NR_STAT_ITEMS 2 struct per_cpu_pages { @@ -58,17 +58,15 @@ struct per_cpu_pages { struct per_cpu_pageset { struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ -#ifdef CONFIG_NUMA local_t vm_stat_diff[NR_STAT_ITEMS]; +#ifdef CONFIG_NUMA unsigned long numa_hit; /* allocated in intended node */ unsigned long numa_miss; /* allocated in non intended node */ unsigned long numa_foreign; /* was intended here, hit elsewhere */ unsigned long interleave_hit; /* interleaver prefered this zone */ unsigned long local_node; /* allocation from local node */ unsigned long other_node; /* allocation from other node */ - - atomic_long_t vm_stat[NR_STAT_ITEMS]; #endif } ____cacheline_aligned_in_smp; @@ -158,6 +156,8 @@ struct zone { unsigned long pages_scanned; /* since last reclaim */ int all_unreclaimable; /* All pages pinned */ + /* Zone statistics */ + atomic_long_t vm_stat[NR_STAT_ITEMS]; /* * Does the allocator try to reclaim pages from the zone as soon * as it fails a watermark_ok() in __alloc_pages? Index: linux-2.6.15-rc5/mm/filemap.c =================================================================== --- linux-2.6.15-rc5.orig/mm/filemap.c 2005-12-08 16:17:04.000000000 -0800 +++ linux-2.6.15-rc5/mm/filemap.c 2005-12-08 16:36:17.000000000 -0800 @@ -115,7 +115,7 @@ void __remove_from_page_cache(struct pag radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; - __dec_node_page_state(page_zone(page), NR_PAGECACHE); + __dec_zone_page_state(page_zone(page), NR_PAGECACHE); } void remove_from_page_cache(struct page *page)