Index: linux-2.6.17-mm3/mm/vmstat.c =================================================================== --- linux-2.6.17-mm3.orig/mm/vmstat.c 2006-06-27 12:34:32.103258120 -0700 +++ linux-2.6.17-mm3/mm/vmstat.c 2006-06-27 12:40:50.236094748 -0700 @@ -113,27 +113,13 @@ atomic_long_t vm_stat[NR_VM_ZONE_STAT_IT #ifdef CONFIG_SMP /* - * A higher cpu count means a higher possbility for contention. - * We increase the threshhold as the number of processors increase. - */ -#if NR_CPUS <= 4 -#define STAT_THRESHOLD 32 -#else -#if NR_CPUS <= 16 -#define STAT_THRESHOLD 64 -#else -#define STAT_THRESHOLD 125 -#endif -#endif - -/* * Bring the global and zone counters up to date from counters in a pcp. * This function is called when we know that some counters have to be * updated. * * Preemption must be disabled. */ -static void zone_page_state_consolidate(struct zone *z, struct per_cpu_pageset *pcp) +void zone_page_state_consolidate(struct zone *z, struct per_cpu_pageset *pcp) { /* * The global cachelines are may be heavily contended. @@ -152,6 +138,7 @@ static void zone_page_state_consolidate( if (p[i]) zone_page_state_add(xchg(p + i, 0), z, i); } +EXPORT_SYMBOL(zone_page_state_consolidate); /* * For use when we know that preemption is disabled. @@ -189,88 +176,6 @@ void mod_zone_page_state(struct zone *zo EXPORT_SYMBOL(mod_zone_page_state); /* - * Optimized increment and decrement functions. - * - * These are only for a single page and therefore can take a struct page * - * argument instead of struct zone *. This allows the inclusion of the code - * generated for page_zone(page) into the optimized functions. - * - * No overflow check is necessary and therefore the differential can be - * incremented or decremented in place which may allow the compilers to - * generate better code. - * - * The increment or decrement is known and therefore one boundary check can - * be omitted. - */ -static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = &pcp->vm_stat_diff[item]; - - (*p)++; - if (unlikely(*p > STAT_THRESHOLD)) - zone_page_state_consolidate(zone, pcp); -} - -void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - __inc_zone_state(page_zone(page), item); -} -EXPORT_SYMBOL(__inc_zone_page_state); - -void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - struct zone *zone = page_zone(page); - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = &pcp->vm_stat_diff[item]; - - (*p)--; - if (unlikely(*p < -STAT_THRESHOLD)) - zone_page_state_consolidate(zone, pcp); -} -EXPORT_SYMBOL(__dec_zone_page_state); - -void inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - unsigned long flags; - - local_irq_save(flags); - __inc_zone_state(zone, item); - local_irq_restore(flags); -} - -void inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - struct zone *zone; - - zone = page_zone(page); - preempt_disable(); - __inc_zone_state(zone, item); - preempt_enable(); -} -EXPORT_SYMBOL(inc_zone_page_state); - -void dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - struct zone *zone = page_zone(page); - struct per_cpu_pageset *pcp; - s8 *p; - - zone = page_zone(page); - preempt_disable(); - pcp = zone_pcp(zone, smp_processor_id()); - p = &pcp->vm_stat_diff[item]; - - (*p)--; - - if (unlikely(*p < -STAT_THRESHOLD)) - zone_page_state_consolidate(zone, pcp); - - preempt_enable(); -} -EXPORT_SYMBOL(dec_zone_page_state); - -/* * Update the zone counters for one cpu. */ void refresh_cpu_vm_stats(int cpu) Index: linux-2.6.17-mm3/include/linux/vmstat.h =================================================================== --- linux-2.6.17-mm3.orig/include/linux/vmstat.h 2006-06-27 12:07:19.842119270 -0700 +++ linux-2.6.17-mm3/include/linux/vmstat.h 2006-06-27 12:44:11.432621437 -0700 @@ -159,19 +159,95 @@ static inline void zap_zone_vm_stats(str } #ifdef CONFIG_SMP -void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); -void __inc_zone_page_state(struct page *, enum zone_stat_item); -void __dec_zone_page_state(struct page *, enum zone_stat_item); -void mod_zone_page_state(struct zone *, enum zone_stat_item, int); -void inc_zone_page_state(struct page *, enum zone_stat_item); -void dec_zone_page_state(struct page *, enum zone_stat_item); +/* + * A higher cpu count means a higher possbility for contention. + * We increase the threshhold as the number of processors increase. + */ +#if NR_CPUS <= 4 +#define STAT_THRESHOLD 32 +#else +#if NR_CPUS <= 16 +#define STAT_THRESHOLD 64 +#else +#define STAT_THRESHOLD 125 +#endif +#endif -extern void inc_zone_state(struct zone *, enum zone_stat_item); +extern void zone_page_state_consolidate(struct zone *, + struct per_cpu_pageset *); +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); +void mod_zone_page_state(struct zone *, enum zone_stat_item, int); void refresh_cpu_vm_stats(int); void refresh_vm_stats(void); +/* + * Optimized increment and decrement functions. + * + * These are only for a single page and therefore can take a struct page * + * argument instead of struct zone *. This allows the inclusion of the code + * generated for page_zone(page) into the optimized functions. + * + * No overflow check is necessary and therefore the differential can be + * incremented or decremented in place which may allow the compilers to + * generate better code. + * + * The increment or decrement is known and therefore one boundary check can + * be omitted. + */ +static inline void __inc_zone_state(struct zone *zone, + enum zone_stat_item item) +{ + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); + s8 *p = &pcp->vm_stat_diff[item]; + + (*p)++; + if (unlikely(*p > STAT_THRESHOLD)) + zone_page_state_consolidate(zone, pcp); +} + +static inline void __inc_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + __inc_zone_state(page_zone(page), item); +} + +static inline void __dec_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + struct zone *zone = page_zone(page); + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); + s8 *p = &pcp->vm_stat_diff[item]; + + (*p)--; + if (unlikely(*p < -STAT_THRESHOLD)) + zone_page_state_consolidate(zone, pcp); +} + +static inline void inc_zone_state(struct zone *zone, enum zone_stat_item item) +{ + preempt_disable(); + __inc_zone_state(zone, item); + preempt_enable(); +} + +static inline void inc_zone_page_state(struct page *page, enum zone_stat_item item) +{ + struct zone *zone; + + zone = page_zone(page); + preempt_disable(); + __inc_zone_state(zone, item); + preempt_enable(); +} + +static inline void dec_zone_page_state(struct page *page, enum zone_stat_item item) +{ + preempt_disable(); + __dec_zone_page_state(page, item); + preempt_enable(); +} #else /* CONFIG_SMP */ /*