Drop requirement that we need to disable interrupts for counter updates This is done with some magic atomic operations. However, we now need two xchg operations for a counter update instead of one increment. In that case we have to cut out all the special casing for increments and decrements since they would be of the same complexity as the generic add. The question is: Is it worth it? Index: linux-2.6.17-mm3/mm/vmstat.c =================================================================== --- linux-2.6.17-mm3.orig/mm/vmstat.c 2006-06-27 20:24:37.455840645 -0700 +++ linux-2.6.17-mm3/mm/vmstat.c 2006-06-27 21:35:42.851019006 -0700 @@ -135,14 +135,17 @@ void __mod_zone_page_state(struct zone * long x; p = diff_pointer(zone, item); - x = delta + *p; + x = delta; - if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) { - zone_page_state_add(x, zone, item); - x = 0; - } + do { + x += xchg(p, 0); - *p = x; + if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) { + zone_page_state_add(x, zone, item); + return; + } else + x = xchg(p, x); + } while (x); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -152,116 +155,19 @@ EXPORT_SYMBOL(__mod_zone_page_state); void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { - unsigned long flags; - - local_irq_save(flags); + preempt_disable(); __mod_zone_page_state(zone, item, delta); - local_irq_restore(flags); + preempt_enable(); } EXPORT_SYMBOL(mod_zone_page_state); /* - * Optimized increment and decrement functions. - * - * These are only for a single page and therefore can take a struct page * - * argument instead of struct zone *. This allows the inclusion of the code - * generated for page_zone(page) into the optimized functions. - * - * No overflow check is necessary and therefore the differential can be - * incremented or decremented in place which may allow the compilers to - * generate better code. - * - * The increment or decrement is known and therefore one boundary check can - * be omitted. - * - * Some processors have inc/dec instructions that are atomic vs an interrupt. - * However, the code must first determine the differential location in a zone - * based on the processor number and then inc/dec the counter. There is no - * guarantee without disabling preemption that the processor will not change - * in between and therefore the atomicity vs. interrupt cannot be exploited - * in a useful way here. - */ -static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - s8 *p = diff_pointer(zone, item); - - (*p)++; - - if (unlikely(*p > STAT_THRESHOLD)) { - zone_page_state_add(*p, zone, item); - *p = 0; - } -} - -void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - __inc_zone_state(page_zone(page), item); -} -EXPORT_SYMBOL(__inc_zone_page_state); - -void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - struct zone *zone = page_zone(page); - s8 *p = diff_pointer(zone, item); - - (*p)--; - - if (unlikely(*p < -STAT_THRESHOLD)) { - zone_page_state_add(*p, zone, item); - *p = 0; - } -} -EXPORT_SYMBOL(__dec_zone_page_state); - -void inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - unsigned long flags; - - local_irq_save(flags); - __inc_zone_state(zone, item); - local_irq_restore(flags); -} - -void inc_zone_page_state(struct page *page, enum zone_stat_item item) -{ - unsigned long flags; - struct zone *zone; - - zone = page_zone(page); - local_irq_save(flags); - __inc_zone_state(zone, item); - local_irq_restore(flags); -} -EXPORT_SYMBOL(inc_zone_page_state); - -void dec_zone_page_state(struct page *page, enum zone_stat_item item) -{ - unsigned long flags; - struct zone *zone; - s8 *p; - - zone = page_zone(page); - local_irq_save(flags); - p = diff_pointer(zone, item); - - (*p)--; - - if (unlikely(*p < -STAT_THRESHOLD)) { - zone_page_state_add(*p, zone, item); - *p = 0; - } - local_irq_restore(flags); -} -EXPORT_SYMBOL(dec_zone_page_state); - -/* * Update the zone counters for one cpu. */ void refresh_cpu_vm_stats(int cpu) { struct zone *zone; int i; - unsigned long flags; for_each_zone(zone) { struct per_cpu_pageset *pcp; @@ -269,13 +175,9 @@ void refresh_cpu_vm_stats(int cpu) pcp = zone_pcp(zone, cpu); for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - if (pcp->vm_stat_diff[i]) { - local_irq_save(flags); - zone_page_state_add(pcp->vm_stat_diff[i], + if (pcp->vm_stat_diff[i]) + zone_page_state_add(xchg(pcp->vm_stat_diff + i, 0), zone, i); - pcp->vm_stat_diff[i] = 0; - local_irq_restore(flags); - } } } @@ -299,6 +201,9 @@ EXPORT_SYMBOL(refresh_vm_stats); #endif #ifdef CONFIG_NUMA + +#define __inc_zone_state(__z, __i) __mod_zone_page_state((__z), (__i), 1) + /* * zonelist = the list of zones passed to the allocator * z = the zone from which the allocation occurred. Index: linux-2.6.17-mm3/include/linux/vmstat.h =================================================================== --- linux-2.6.17-mm3.orig/include/linux/vmstat.h 2006-06-27 12:07:19.842119270 -0700 +++ linux-2.6.17-mm3/include/linux/vmstat.h 2006-06-27 21:29:18.906983524 -0700 @@ -160,14 +160,12 @@ static inline void zap_zone_vm_stats(str #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); -void __inc_zone_page_state(struct page *, enum zone_stat_item); -void __dec_zone_page_state(struct page *, enum zone_stat_item); +#define __inc_zone_page_state(__p, __i) __mod_zone_page_state(page_zone(__p), (__i), 1); +#define __dec_zone_page_state(__p, __i) __mod_zone_page_state(page_zone(__p), (__i), -1); void mod_zone_page_state(struct zone *, enum zone_stat_item, int); -void inc_zone_page_state(struct page *, enum zone_stat_item); -void dec_zone_page_state(struct page *, enum zone_stat_item); - -extern void inc_zone_state(struct zone *, enum zone_stat_item); +#define inc_zone_page_state(__p, __i) mod_zone_page_state(page_zone(__p), (__i), 1); +#define dec_zone_page_state(__p, __i) mod_zone_page_state(page_zone(__p), (__i), -1); void refresh_cpu_vm_stats(int); void refresh_vm_stats(void);