Index: linux-2.6.15-rc2/include/linux/rmap.h =================================================================== --- linux-2.6.15-rc2.orig/include/linux/rmap.h 2005-11-23 02:16:00.000000000 +0000 +++ linux-2.6.15-rc2/include/linux/rmap.h 2005-11-23 02:17:25.000000000 +0000 @@ -72,7 +72,7 @@ void __anon_vma_link(struct vm_area_stru */ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_file_rmap(struct page *); -void page_remove_rmap(struct page *); +int page_remove_rmap(struct page *); /** * page_dup_rmap - duplicate pte mapping to a page Index: linux-2.6.15-rc2/mm/rmap.c =================================================================== --- linux-2.6.15-rc2.orig/mm/rmap.c 2005-11-23 02:16:00.000000000 +0000 +++ linux-2.6.15-rc2/mm/rmap.c 2005-11-23 02:45:16.000000000 +0000 @@ -484,8 +484,10 @@ void page_add_file_rmap(struct page *pag * @page: page to remove mapping from * * The caller needs to hold the pte lock. + * + * Return 1 if the page_zone(page)->nr_unmapped needs to be incremented. */ -void page_remove_rmap(struct page *page) +int page_remove_rmap(struct page *page) { if (atomic_add_negative(-1, &page->_mapcount)) { BUG_ON(page_mapcount(page) < 0); @@ -501,7 +503,9 @@ void page_remove_rmap(struct page *page) if (page_test_and_clear_dirty(page)) set_page_dirty(page); dec_page_state(nr_mapped); + return 1; } + return 0; } /* @@ -569,7 +573,9 @@ static int try_to_unmap_one(struct page } else dec_mm_counter(mm, file_rss); - page_remove_rmap(page); + if (page_remove_rmap(page)) + ret = SWAP_SUCCESS; + page_cache_release(page); out_unmap: @@ -687,7 +693,7 @@ static int try_to_unmap_anon(struct page list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { ret = try_to_unmap_one(page, vma); - if (ret == SWAP_FAIL || !page_mapped(page)) + if (ret == SWAP_FAIL || ret == SWAP_SUCCESS) break; } spin_unlock(&anon_vma->lock); @@ -718,7 +724,7 @@ static int try_to_unmap_file(struct page spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { ret = try_to_unmap_one(page, vma); - if (ret == SWAP_FAIL || !page_mapped(page)) + if (ret == SWAP_FAIL) goto out; } @@ -750,8 +756,10 @@ static int try_to_unmap_file(struct page * but even so use it as a guide to how hard we should try? */ mapcount = page_mapcount(page); - if (!mapcount) + if (!mapcount) { + ret = SWAP_AGAIN; goto out; + } cond_resched_lock(&mapping->i_mmap_lock); max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; @@ -816,8 +824,6 @@ int try_to_unmap(struct page *page) else ret = try_to_unmap_file(page); - if (!page_mapped(page)) - ret = SWAP_SUCCESS; return ret; } Index: linux-2.6.15-rc2/mm/vmscan.c =================================================================== --- linux-2.6.15-rc2.orig/mm/vmscan.c 2005-11-23 02:12:59.000000000 +0000 +++ linux-2.6.15-rc2/mm/vmscan.c 2005-11-23 02:28:23.000000000 +0000 @@ -61,7 +61,7 @@ struct scan_control { /* Incremented by the number of pages reclaimed */ unsigned long nr_reclaimed; - unsigned long nr_mapped; /* From page_state */ + unsigned long nr_unmapped; /* From page_state */ /* How many pages shrink_cache() should reclaim */ int nr_to_reclaim; @@ -379,6 +379,7 @@ static int shrink_list(struct list_head struct pagevec freed_pvec; int pgactivate = 0; int reclaimed = 0; + int unmapped = 0; cond_resched(); @@ -440,7 +441,7 @@ static int shrink_list(struct list_head case SWAP_AGAIN: goto keep_locked; case SWAP_SUCCESS: - ; /* try to free the page below */ + unmapped++; /* try to free the page below */ } } @@ -559,6 +560,7 @@ keep: __pagevec_release_nonlru(&freed_pvec); mod_page_state(pgactivate, pgactivate); sc->nr_reclaimed += reclaimed; + sc->nr_unmapped += unmapped; return reclaimed; } @@ -645,11 +647,14 @@ static void shrink_cache(struct zone *zo mod_page_state_zone(zone, pgscan_kswapd, nr_scan); else mod_page_state_zone(zone, pgscan_direct, nr_scan); + sc->nr_unmapped = 0; + nr_freed = shrink_list(&page_list, sc); if (current_is_kswapd()) mod_page_state(kswapd_steal, nr_freed); mod_page_state_zone(zone, pgsteal, nr_freed); sc->nr_to_reclaim -= nr_freed; + zone->nr_unmapped += sc->unmapped - nr_freed; spin_lock_irq(&zone->lru_lock); /* @@ -1358,6 +1363,7 @@ int zone_reclaim(struct zone *zone, gfp_ sc.nr_mapped = read_page_state(nr_mapped); sc.nr_scanned = 0; sc.nr_reclaimed = 0; + sc.nr_unmapped = 0; /* scan at the highest priority */ sc.priority = 0; Index: linux-2.6.15-rc2/mm/memory.c =================================================================== --- linux-2.6.15-rc2.orig/mm/memory.c 2005-11-23 02:12:59.000000000 +0000 +++ linux-2.6.15-rc2/mm/memory.c 2005-11-23 02:38:55.000000000 +0000 @@ -618,6 +618,8 @@ static unsigned long zap_pte_range(struc } page_remove_rmap(page); tlb_remove_page(tlb, page); + if (!page_mapcount(page)) + page_zone(page)->nr_unmapped++; continue; } /*