Drop requirement that we need to disable interrupts for counter updates This is done with some magic atomic operations. However, we now need two xchg operations for a counter update instead of one increment. In that case we have to cut out all the special casing for increments and decrements since they would be of the same complexity as the generic add. The question is: Is it worth it? Index: linux-2.6.17-mm3/mm/vmstat.c =================================================================== --- linux-2.6.17-mm3.orig/mm/vmstat.c 2006-06-27 20:57:11.590715903 -0700 +++ linux-2.6.17-mm3/mm/vmstat.c 2006-06-27 21:10:40.178355265 -0700 @@ -154,123 +154,46 @@ static void zone_page_state_consolidate( p = pcp->vm_stat_diff; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - if (p[i]) { - zone_page_state_add(p[i], z, i); - p[i] = 0; - } + if (p[i]) + zone_page_state_add(xchg(p + i, 0), z, i); } /* - * For use when we know that interrupts are disabled. - * - * Interrupts must be disabled since we rely on the differential - * not changing between initial retrieval and final store. + * Preemption must be disabled. */ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); s8 *p = &pcp->vm_stat_diff[item]; - long x = delta + *p; + long x = delta; - if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) { - /* - * We cannot update the differential since the - * result is beyond the threshold. - * Consolidate all counters first. - */ - zone_page_state_consolidate(zone, pcp); - zone_page_state_add(delta, zone, item); - } else - *p = x; + do { + x += xchg(p, 0); + if (likely(x < STAT_THRESHOLD && x > -STAT_THRESHOLD)) + x = xchg(p, x); + else { + zone_page_state_consolidate(zone, pcp); + zone_page_state_add(x, zone, item); + return; + } + } while (x); } EXPORT_SYMBOL(__mod_zone_page_state); /* - * When running with interrupts enabled + * When running with preemption */ void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { - unsigned long flags; - - local_irq_save(flags); + preempt_disable(); __mod_zone_page_state(zone, item, delta); - local_irq_restore(flags); + preempt_enable(); } EXPORT_SYMBOL(mod_zone_page_state); /* - * Optimized increment and decrement functions. - * - * These are only for a single page and therefore can take a struct page * - * argument instead of struct zone *. This allows the inclusion of the code - * generated for page_zone(page) into the optimized functions. - * - * No overflow check is necessary and therefore the differential can be - * incremented or decremented in place which may allow the compilers to - * generate better code. - * - * The increment or decrement is known and therefore one boundary check can - * be omitted. - * - * Note that interrupts must be DISABLED since there cannot be any other counter - * operation between the increment and the checking of the threshhold condition. - */ -static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = &pcp->vm_stat_diff[item]; - - (*p)++; - if (unlikely(*p > STAT_THRESHOLD)) - zone_page_state_consolidate(zone, pcp); -} - -void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - __inc_zone_state(page_zone(page), item); -} -EXPORT_SYMBOL(__inc_zone_page_state); - -void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - struct zone *zone = page_zone(page); - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = &pcp->vm_stat_diff[item]; - - (*p)--; - if (unlikely(*p < -STAT_THRESHOLD)) - zone_page_state_consolidate(zone, pcp); -} -EXPORT_SYMBOL(__dec_zone_page_state); - -void inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - unsigned long flags; - - local_irq_save(flags); - __inc_zone_state(zone, item); - local_irq_restore(flags); -} - -void inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - inc_zone_state(page_zone(page), item); -} -EXPORT_SYMBOL(inc_zone_page_state); - -void dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - unsigned long flags; - - local_irq_save(flags); - __dec_zone_page_state(page, item); - local_irq_restore(flags); -} -EXPORT_SYMBOL(dec_zone_page_state); - -/* * Update the zone counters for one cpu. * Preemption must be disabled. */ @@ -286,14 +209,8 @@ void refresh_cpu_vm_stats(int cpu) for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { s8 *p = pcp->vm_stat_diff + i; - if (*p) { - unsigned long flags; - - local_irq_save(flags); - zone_page_state_add(*p, zone, i); - *p = 0; - local_irq_restore(flags); - } + if (*p) + zone_page_state_add(xchg(p + i, 0), zone, i); } } } @@ -318,6 +235,9 @@ EXPORT_SYMBOL(refresh_vm_stats); #endif #ifdef CONFIG_NUMA + +#define __inc_zone_state(__z, __i) __mod_zone_page_state((__z), (__i), 1) + /* * zonelist = the list of zones passed to the allocator * z = the zone from which the allocation occurred. Index: linux-2.6.17-mm3/include/linux/vmstat.h =================================================================== --- linux-2.6.17-mm3.orig/include/linux/vmstat.h 2006-06-27 12:07:19.842119270 -0700 +++ linux-2.6.17-mm3/include/linux/vmstat.h 2006-06-27 21:10:40.195932302 -0700 @@ -160,14 +160,12 @@ static inline void zap_zone_vm_stats(str #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); -void __inc_zone_page_state(struct page *, enum zone_stat_item); -void __dec_zone_page_state(struct page *, enum zone_stat_item); +#define __inc_zone_page_state(__p, __i) __mod_zone_page_state(page_zone(__p), (__i), 1); +#define __dec_zone_page_state(__p, __i) __mod_zone_page_state(page_zone(__p), (__i), -1); void mod_zone_page_state(struct zone *, enum zone_stat_item, int); -void inc_zone_page_state(struct page *, enum zone_stat_item); -void dec_zone_page_state(struct page *, enum zone_stat_item); - -extern void inc_zone_state(struct zone *, enum zone_stat_item); +#define inc_zone_page_state(__p, __i) mod_zone_page_state(page_zone(__p), (__i), 1); +#define dec_zone_page_state(__p, __i) mod_zone_page_state(page_zone(__p), (__i), -1); void refresh_cpu_vm_stats(int); void refresh_vm_stats(void);