Index: linux-2.6.15-rc3/include/linux/mmzone.h =================================================================== --- linux-2.6.15-rc3.orig/include/linux/mmzone.h 2005-11-29 03:51:27.000000000 +0000 +++ linux-2.6.15-rc3/include/linux/mmzone.h 2005-11-30 00:35:48.000000000 +0000 @@ -122,6 +122,7 @@ struct zone { unsigned long lowmem_reserve[MAX_NR_ZONES]; #ifdef CONFIG_NUMA + atomic_t pages_unmapped; /* Unmapped pagecache pages */ struct per_cpu_pageset *pageset[NR_CPUS]; #else struct per_cpu_pageset pageset[NR_CPUS]; Index: linux-2.6.15-rc3/include/linux/rmap.h =================================================================== --- linux-2.6.15-rc3.orig/include/linux/rmap.h 2005-11-29 03:51:27.000000000 +0000 +++ linux-2.6.15-rc3/include/linux/rmap.h 2005-11-30 00:48:06.000000000 +0000 @@ -71,8 +71,8 @@ void __anon_vma_link(struct vm_area_stru * rmap interfaces called when adding or removing pte of page */ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); -void page_add_file_rmap(struct page *); -void page_remove_rmap(struct page *); +int page_add_file_rmap(struct page *); +int page_remove_rmap(struct page *); /** * page_dup_rmap - duplicate pte mapping to a page Index: linux-2.6.15-rc3/mm/fremap.c =================================================================== --- linux-2.6.15-rc3.orig/mm/fremap.c 2005-11-29 03:51:27.000000000 +0000 +++ linux-2.6.15-rc3/mm/fremap.c 2005-11-30 00:59:41.000000000 +0000 @@ -90,7 +90,8 @@ int install_page(struct mm_struct *mm, s flush_icache_page(vma, page); set_pte_at(mm, addr, pte, mk_pte(page, prot)); - page_add_file_rmap(page); + if (page_add_file_rmap(page)) + atomic_dec(&page_zone(page)->pages_unmapped); pte_val = *pte; update_mmu_cache(vma, addr, pte_val); err = 0; Index: linux-2.6.15-rc3/mm/memory.c =================================================================== --- linux-2.6.15-rc3.orig/mm/memory.c 2005-11-29 03:51:27.000000000 +0000 +++ linux-2.6.15-rc3/mm/memory.c 2005-11-30 01:40:35.000000000 +0000 @@ -640,16 +640,18 @@ static unsigned long zap_pte_range(struc addr) != page->index) set_pte_at(mm, addr, pte, pgoff_to_pte(page->index)); - if (PageAnon(page)) + if (PageAnon(page)) { anon_rss--; - else { + page_remove_rmap(page); + } else { if (pte_dirty(ptent)) set_page_dirty(page); if (pte_young(ptent)) mark_page_accessed(page); file_rss--; + if (page_remove_rmap(page)) + atomic_inc(&page_zone(page)->pages_unmapped); } - page_remove_rmap(page); tlb_remove_page(tlb, page); continue; } @@ -1382,10 +1384,13 @@ gotten: page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { if (old_page) { - page_remove_rmap(old_page); + int last = page_remove_rmap(old_page); + if (!PageAnon(old_page)) { dec_mm_counter(mm, file_rss); inc_mm_counter(mm, anon_rss); + if (last) + atomic_inc(&page_zone(old_page)->pages_unmapped); } } else inc_mm_counter(mm, anon_rss); @@ -1985,7 +1990,8 @@ retry: page_add_anon_rmap(new_page, vma, address); } else { inc_mm_counter(mm, file_rss); - page_add_file_rmap(new_page); + if (page_add_file_rmap(new_page)) + atomic_dec(&page_zone(new_page)->pages_unmapped); } } else { /* One of our sibling threads was faster, back out. */ Index: linux-2.6.15-rc3/mm/page_alloc.c =================================================================== --- linux-2.6.15-rc3.orig/mm/page_alloc.c 2005-11-29 03:51:27.000000000 +0000 +++ linux-2.6.15-rc3/mm/page_alloc.c 2005-11-30 02:35:13.000000000 +0000 @@ -1844,6 +1844,7 @@ static int __devinit process_zones(int c goto bad; setup_pageset(zone->pageset[cpu], zone_batchsize(zone)); + atomic_set(&zone->pages_unmapped, 0); } return 0; Index: linux-2.6.15-rc3/mm/rmap.c =================================================================== --- linux-2.6.15-rc3.orig/mm/rmap.c 2005-11-30 00:42:55.000000000 +0000 +++ linux-2.6.15-rc3/mm/rmap.c 2005-11-30 01:16:52.000000000 +0000 @@ -464,14 +464,19 @@ void page_add_anon_rmap(struct page *pag * @page: the page to add the mapping to * * The caller needs to hold the pte lock. + * + * Return 1 if a page changed from unmapped to mapped */ -void page_add_file_rmap(struct page *page) +int page_add_file_rmap(struct page *page) { BUG_ON(PageAnon(page)); BUG_ON(!pfn_valid(page_to_pfn(page))); - if (atomic_inc_and_test(&page->_mapcount)) + if (atomic_inc_and_test(&page->_mapcount)) { inc_page_state(nr_mapped); + return 1; + } + return 0; } /** @@ -479,8 +484,10 @@ void page_add_file_rmap(struct page *pag * @page: page to remove mapping from * * The caller needs to hold the pte lock. + * + * Return 1 if the removal resulted in the page to become unmapped. */ -void page_remove_rmap(struct page *page) +int page_remove_rmap(struct page *page) { if (atomic_add_negative(-1, &page->_mapcount)) { BUG_ON(page_mapcount(page) < 0); @@ -496,7 +503,9 @@ void page_remove_rmap(struct page *page) if (page_test_and_clear_dirty(page)) set_page_dirty(page); dec_page_state(nr_mapped); + return 1; } + return 0; } /* @@ -559,10 +568,13 @@ static int try_to_unmap_one(struct page set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); dec_mm_counter(mm, anon_rss); - } else + page_remove_rmap(page); + } else { dec_mm_counter(mm, file_rss); + if (page_remove_rmap(page)) + atomic_inc(&page_zone(page)->pages_unmapped); + } - page_remove_rmap(page); page_cache_release(page); out_unmap: