Index: linux-2.6.17-mm2/include/linux/vmstat.h =================================================================== --- linux-2.6.17-mm2.orig/include/linux/vmstat.h 2006-06-24 17:25:22.342006571 -0700 +++ linux-2.6.17-mm2/include/linux/vmstat.h 2006-06-26 18:24:13.638105694 -0700 @@ -105,6 +105,78 @@ static inline unsigned long zone_page_st return x; } +#ifdef CONFIG_SMP +/* + * A higher cpu count means a higher possbility for contention. + * We increase the threshhold as the number of processors increase. + */ +#if NR_CPUS <= 4 +#define STAT_THRESHOLD 32 +#else +#if NR_CPUS <= 16 +#define STAT_THRESHOLD 64 +#else +#define STAT_THRESHOLD 126 +#endif +#endif + +extern void zone_state_update_counters(struct zone *z, + struct per_cpu_pageset *pcp); + +static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) +{ + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); + s8 *p = &pcp->vm_stat_diff[item]; + + (*p)++; + if (unlikely(*p > STAT_THRESHOLD)) + zone_state_update_counters(zone, pcp); +} + +static inline void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +{ + __inc_zone_state(page_zone(page), item); +} + +void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +{ + struct zone *zone = page_zone(page); + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); + s8 *p = &pcp->vm_stat_diff[item]; + + (*p)--; + if (unlikely(*p < -STAT_THRESHOLD)) + zone_state_update_counters(zone, pcp); +} + +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); + +#else + +static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) +{ + atomic_long_add(x, &zone->vm_stat[item]); + atomic_long_add(x, &vm_stat[item]); +} + +void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, + int delta) +{ + zone_page_state_add(delta, zone, item); +} + +void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +{ + zone_page_state_add(1, page_zone(page), item); +} + +void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +{ + zone_page_state_add(-1, page_zone(page), item); +} +#endif + #ifdef CONFIG_NUMA /* * Determine the per node value of a stat item. This function @@ -138,9 +210,6 @@ extern void zone_statistics(struct zonel #endif /* CONFIG_NUMA */ -void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); -void __inc_zone_page_state(struct page *, enum zone_stat_item); -void __dec_zone_page_state(struct page *, enum zone_stat_item); #define __add_zone_page_state(__z, __i, __d) \ __mod_zone_page_state(__z, __i, __d) Index: linux-2.6.17-mm2/mm/vmstat.c =================================================================== --- linux-2.6.17-mm2.orig/mm/vmstat.c 2006-06-26 18:11:02.582542076 -0700 +++ linux-2.6.17-mm2/mm/vmstat.c 2006-06-26 18:17:10.837109533 -0700 @@ -113,23 +113,9 @@ atomic_long_t vm_stat[NR_VM_ZONE_STAT_IT #ifdef CONFIG_SMP /* - * A higher cpu count means a higher possbility for contention. - * We increase the threshhold as the number of processors increase. - */ -#if NR_CPUS <= 4 -#define STAT_THRESHOLD 32 -#else -#if NR_CPUS <= 16 -#define STAT_THRESHOLD 64 -#else -#define STAT_THRESHOLD 126 -#endif -#endif - -/* * Add a value to the global and zone counter. */ -static inline void consolidate_counter(struct zone *zone, +static inline void zone_state_update_counter(struct zone *zone, enum zone_stat_item item, long x) { atomic_long_add(x, zone->vm_stat + item); @@ -143,7 +129,7 @@ static inline void consolidate_counter(s * * Caller must have disabled interrupts. */ -static void consolidate_counters(struct zone *z, struct per_cpu_pageset *pcp) +void zone_state_update_counters(struct zone *z, struct per_cpu_pageset *pcp) { /* * The global cachelines are may be heavily contended. @@ -159,7 +145,7 @@ static void consolidate_counters(struct p = pcp->vm_stat_diff; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { - consolidate_counter(z, i, p[i]); + zone_state_update_counter(z, i, p[i]); p[i] = 0; } } @@ -185,9 +171,9 @@ void __mod_zone_page_state(struct zone * * add the large count directly. */ if (*p) - consolidate_counters(zone, pcp); + zone_state_update_counters(zone, pcp); - consolidate_counter(zone, item, delta); + zone_state_update_counter(zone, item, delta); } else *p = x; } @@ -207,48 +193,6 @@ void mod_zone_page_state(struct zone *zo } EXPORT_SYMBOL(mod_zone_page_state); -/* - * Optimized increment and decrement functions. - * - * These are only for a single page and therefore can take a struct page * - * argument instead of struct zone *. This allows the inclusion of the code - * generated for page_zone(page) into the optimized functions. - * - * No overflow check is necessary and therefore the differential can be - * incremented or decremented in place which may allow the compilers to - * generate better code. - * - * The increment or decrement is known and therefore one boundary check can - * be omitted. - */ -static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = &pcp->vm_stat_diff[item]; - - (*p)++; - if (unlikely(*p > STAT_THRESHOLD)) - consolidate_counters(zone, pcp); -} - -void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - __inc_zone_state(page_zone(page), item); -} -EXPORT_SYMBOL(__inc_zone_page_state); - -void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - struct zone *zone = page_zone(page); - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = &pcp->vm_stat_diff[item]; - - (*p)--; - if (unlikely(*p < -STAT_THRESHOLD)) - consolidate_counters(zone, pcp); -} -EXPORT_SYMBOL(__dec_zone_page_state); - void inc_zone_state(struct zone *zone, enum zone_stat_item item) { unsigned long flags; @@ -285,7 +229,7 @@ void dec_zone_page_state(struct page *pa (*p)--; if (unlikely(*p < -STAT_THRESHOLD)) - consolidate_counters(zone, pcp); + zone_state_update_counters(zone, pcp); local_irq_restore(flags); } @@ -311,7 +255,7 @@ void refresh_cpu_vm_stats(int cpu) x = pcp->vm_stat_diff[i]; if (x) { local_irq_save(flags); - consolidate_counter(zone, i, x); + zone_state_update_counter(zone, i, x); local_irq_restore(flags); } } @@ -337,25 +281,6 @@ EXPORT_SYMBOL(refresh_vm_stats); #else /* CONFIG_SMP */ -static inline void zone_page_state_add(long x, struct zone *zone, - enum zone_stat_item item) -{ - atomic_long_add(x, &zone->vm_stat[item]); - atomic_long_add(x, &vm_stat[item]); -} - - -/* - * We do not maintain differentials in a single processor configuration. - * The functions directly modify the zone and global counters. - */ -void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, - int delta) -{ - zone_page_state_add(delta, zone, item); -} -EXPORT_SYMBOL(__mod_zone_page_state); - void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { @@ -367,18 +292,6 @@ void mod_zone_page_state(struct zone *zo } EXPORT_SYMBOL(mod_zone_page_state); -void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - zone_page_state_add(1, page_zone(page), item); -} -EXPORT_SYMBOL(__inc_zone_page_state); - -void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - zone_page_state_add(-1, page_zone(page), item); -} -EXPORT_SYMBOL(__dec_zone_page_state); - void inc_zone_page_state(struct page *page, enum zone_stat_item item) { unsigned long flags;