Index: linux-2.6.15-rc4/mm/page_alloc.c =================================================================== --- linux-2.6.15-rc4.orig/mm/page_alloc.c 2005-11-30 22:25:15.000000000 -0800 +++ linux-2.6.15-rc4/mm/page_alloc.c 2005-12-03 13:21:17.000000000 -0800 @@ -842,7 +842,8 @@ get_page_from_freelist(gfp_t gfp_mask, u mark = (*z)->pages_high; if (!zone_watermark_ok(*z, order, mark, classzone_idx, alloc_flags)) - continue; + if (!arch_zone_reclaim(*z, gfp_mask, order)) + continue; } page = buffered_rmqueue(*z, order, gfp_mask); Index: linux-2.6.15-rc4/include/linux/swap.h =================================================================== --- linux-2.6.15-rc4.orig/include/linux/swap.h 2005-11-30 22:25:15.000000000 -0800 +++ linux-2.6.15-rc4/include/linux/swap.h 2005-12-05 10:12:55.000000000 -0800 @@ -172,7 +172,7 @@ extern void swap_setup(void); /* linux/mm/vmscan.c */ extern int try_to_free_pages(struct zone **, gfp_t); -extern int zone_reclaim(struct zone *, gfp_t, unsigned int); +extern int zone_reclaim(struct zone *, gfp_t, int, int); extern int shrink_all_memory(int); extern int vm_swappiness; Index: linux-2.6.15-rc4/mm/vmscan.c =================================================================== --- linux-2.6.15-rc4.orig/mm/vmscan.c 2005-11-30 22:25:15.000000000 -0800 +++ linux-2.6.15-rc4/mm/vmscan.c 2005-12-05 10:12:38.000000000 -0800 @@ -1354,47 +1354,45 @@ static int __init kswapd_init(void) module_init(kswapd_init) - /* * Try to free up some pages from this zone through reclaim. */ -int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) +#ifdef CONFIG_ARCH_ZONE_RECLAIM +int zone_reclaim(struct zone *z, gfp_t gfp_mask, int writepage, int swap) { + struct task_struct *p = current; struct scan_control sc; - int nr_pages = 1 << order; - int total_reclaimed = 0; + struct reclaim_state reclaim_state; - /* The reclaim may sleep, so don't do it if sleep isn't allowed */ - if (!(gfp_mask & __GFP_WAIT)) - return 0; - if (zone->all_unreclaimable) - return 0; - - sc.gfp_mask = gfp_mask; - sc.may_writepage = 0; - sc.may_swap = 0; - sc.nr_mapped = read_page_state(nr_mapped); sc.nr_scanned = 0; sc.nr_reclaimed = 0; - /* scan at the highest priority */ + sc.nr_mapped = read_page_state(nr_mapped); sc.priority = 0; - disable_swap_token(); - - if (nr_pages > SWAP_CLUSTER_MAX) - sc.swap_cluster_max = nr_pages; - else - sc.swap_cluster_max = SWAP_CLUSTER_MAX; + sc.gfp_mask = gfp_mask; + sc.may_writepage = writepage; + sc.may_swap = swap; + sc.swap_cluster_max = SWAP_CLUSTER_MAX; + /* The reclaim may sleep, so don't do it if sleep isn't allowed */ + if (!(gfp_mask & __GFP_WAIT)) + return 0; + if (z->all_unreclaimable) + return 0; /* Don't reclaim the zone if there are other reclaimers active */ - if (atomic_read(&zone->reclaim_in_progress) > 0) - goto out; - - shrink_zone(zone, &sc); - total_reclaimed = sc.nr_reclaimed; + if (atomic_read(&z->reclaim_in_progress) > 0) + return 0; - out: - return total_reclaimed; + cond_resched(); + p->flags |= PF_MEMALLOC; + reclaim_state.reclaimed_slab = 0; + p->reclaim_state = &reclaim_state; + shrink_zone(z, &sc); + p->reclaim_state = NULL; + current->flags &= ~PF_MEMALLOC; + cond_resched(); + return sc.nr_reclaimed; } +#endif asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, unsigned int state) Index: linux-2.6.15-rc4/include/linux/gfp.h =================================================================== --- linux-2.6.15-rc4.orig/include/linux/gfp.h 2005-11-30 22:25:15.000000000 -0800 +++ linux-2.6.15-rc4/include/linux/gfp.h 2005-12-05 10:14:25.000000000 -0800 @@ -100,6 +100,20 @@ static inline int gfp_zone(gfp_t gfp) static inline void arch_free_page(struct page *page, int order) { } #endif +#ifndef CONFIG_ARCH_ZONE_RECLAIM +static inline int arch_zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) +{ + return 0; +} +#endif + +#ifndef CONFIG_ARCH_ZONE_RECLAIM +static inline int arch_zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) +{ + return 0; +} +#endif + extern struct page * FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); Index: linux-2.6.15-rc4/include/asm-ia64/numa.h =================================================================== --- linux-2.6.15-rc4.orig/include/asm-ia64/numa.h 2005-11-30 22:25:15.000000000 -0800 +++ linux-2.6.15-rc4/include/asm-ia64/numa.h 2005-12-03 13:26:50.000000000 -0800 @@ -65,6 +65,8 @@ extern int paddr_to_nid(unsigned long pa #define local_nodeid (cpu_to_node_map[smp_processor_id()]) +extern int arch_zone_reclaim(struct zone *z, gfp_t mask, unsigned int order); + #else /* !CONFIG_NUMA */ #define paddr_to_nid(addr) 0 Index: linux-2.6.15-rc4/arch/ia64/mm/numa.c =================================================================== --- linux-2.6.15-rc4.orig/arch/ia64/mm/numa.c 2005-11-30 22:25:15.000000000 -0800 +++ linux-2.6.15-rc4/arch/ia64/mm/numa.c 2005-12-05 10:12:14.000000000 -0800 @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -71,3 +72,17 @@ int early_pfn_to_nid(unsigned long pfn) return 0; } #endif + +/* + * Reclaim is by doing a low level scan if the allocation on the local node + * failed. + */ +int arch_zone_reclaim(struct zone *z, gfp_t mask, + unsigned int order) +{ + if (z->zone_pgdat->node_id == numa_node_id()) { + if (zone_reclaim(z, mask, 0, 0) > (1 << order)) + return 1; + } + return 0; +} Index: linux-2.6.15-rc4/arch/ia64/Kconfig =================================================================== --- linux-2.6.15-rc4.orig/arch/ia64/Kconfig 2005-11-30 22:25:15.000000000 -0800 +++ linux-2.6.15-rc4/arch/ia64/Kconfig 2005-12-03 13:30:27.000000000 -0800 @@ -338,6 +338,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID def_bool y depends on NEED_MULTIPLE_NODES +config ARCH_ZONE_RECLAIM + def_bool y + depends on NUMA + config IA32_SUPPORT bool "Support for Linux/x86 binaries" help