Index: linux-2.6.15-rc3/mm/page_alloc.c =================================================================== --- linux-2.6.15-rc3.orig/mm/page_alloc.c 2005-11-30 18:22:34.000000000 -0800 +++ linux-2.6.15-rc3/mm/page_alloc.c 2005-11-30 18:29:00.000000000 -0800 @@ -379,7 +379,6 @@ free_pages_bulk(struct zone *zone, int c struct page *page = NULL; int ret = 0; - spin_lock_irqsave(&zone->lock, flags); zone->all_unreclaimable = 0; zone->pages_scanned = 0; while (!list_empty(list) && count--) { @@ -389,7 +388,6 @@ free_pages_bulk(struct zone *zone, int c __free_pages_bulk(page, zone, order); ret++; } - spin_unlock_irqrestore(&zone->lock, flags); return ret; } @@ -399,6 +397,8 @@ void __free_pages_ok(struct page *page, int i; int reserved = 0; int size = 1 << order; + struct zone *zone; + unsigned long flags; arch_free_page(page, order); @@ -413,10 +413,15 @@ void __free_pages_ok(struct page *page, if (reserved) return; + zone = page_zone(page); + spin_lock_irqsave(&zone->lock, flags); + list_add(&page->lru, &list); mod_page_state(pgfree, size); kernel_map_pages(page, size, 0); free_pages_bulk(page_zone(page), 1, &list, order); + + spin_unlock_irqrestore(&zone->lock, flags); } @@ -565,7 +570,6 @@ void drain_remote_pages(void) int i; unsigned long flags; - local_irq_save(flags); for_each_zone(zone) { struct per_cpu_pageset *pset; @@ -573,6 +577,7 @@ void drain_remote_pages(void) if (zone->zone_pgdat->node_id == numa_node_id()) continue; + spin_lock_irqsave(&zone->lock, flags); pset = zone->pageset[smp_processor_id()]; for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { struct per_cpu_pages *pcp; @@ -582,8 +587,8 @@ void drain_remote_pages(void) pcp->count -= free_pages_bulk(zone, pcp->count, &pcp->list, 0); } + spin_unlock_irqrestore(&zone->lock, flags); } - local_irq_restore(flags); } #endif @@ -595,7 +600,9 @@ static void __drain_pages(unsigned int c for_each_zone(zone) { struct per_cpu_pageset *pset; + unsigned long flags; + spin_lock_irqsave(&zone->lock, flags); pset = zone_pcp(zone, cpu); for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { struct per_cpu_pages *pcp; @@ -604,6 +611,7 @@ static void __drain_pages(unsigned int c pcp->count -= free_pages_bulk(zone, pcp->count, &pcp->list, 0); } + spin_unlock_irqrestore(&zone->lock, flags); } } #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ @@ -698,8 +706,11 @@ static void fastcall free_hot_cold_page( local_irq_save(flags); list_add(&page->lru, &pcp->list); pcp->count++; - if (pcp->count >= pcp->high) + if (pcp->count >= pcp->high) { + spin_lock(&zone->lock); pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); + spin_unlock(&zone->lock); + } local_irq_restore(flags); put_cpu(); }