Display "locked" for an mlocked vma. Pages in mlocked VMAs are not subject to page migration, so its important to know if a VMA was mlocked. Signed-off-by: Christoph Lameter Index: linux-2.6.16-rc6/mm/mempolicy.c =================================================================== --- linux-2.6.16-rc6.orig/mm/mempolicy.c 2006-03-11 14:12:55.000000000 -0800 +++ linux-2.6.16-rc6/mm/mempolicy.c 2006-03-17 18:26:05.000000000 -0800 @@ -556,6 +556,16 @@ static void migrate_page_add(struct page } } +static int list_count(struct list_head *l) +{ + int c = 0; + struct list_head *p; + + list_for_each(p, l) + c++; + return c; +} + /* * Migrate the list 'pagelist' of pages to a certain destination. * @@ -592,8 +602,11 @@ redo: offset + vma->vm_start); offset += PAGE_SIZE; } - else + else { page = alloc_pages_node(dest, GFP_HIGHUSER, 0); + if (page && page_to_nid(page) != dest) + printk("Cannot get memory on node %d got memory on %d instead!\n",dest, page_to_nid(page)); + } if (!page) { err = -ENOMEM; @@ -617,6 +630,8 @@ out: list_del(&page->lru); __free_page(page); } + if (!list_empty(&failed) || !list_empty(pagelist)) + printk(KERN_ERR "migrate_pages_to(%d) busy=%d failed=%d\n", dest, list_count(&failed), list_count(pagelist)); list_splice(&failed, pagelist); if (err < 0) return err; @@ -638,12 +653,14 @@ int migrate_to_node(struct mm_struct *mm LIST_HEAD(pagelist); int err = 0; + printk(KERN_ERR "migrate_to_node(%p, %d, %d, %x) ",mm, source, dest, flags); nodes_clear(nmask); node_set(source, nmask); check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); + printk("Candidates=%d\n", list_count(&pagelist)); if (!list_empty(&pagelist)) { err = migrate_pages_to(&pagelist, NULL, dest); if (!list_empty(&pagelist)) @@ -666,6 +683,7 @@ int do_migrate_pages(struct mm_struct *m int err = 0; nodemask_t tmp; + printk(KERN_ERR "do_migrate_ppages(%p, %lx, %lx, %d\n", mm, from_nodes->bits[0], to_nodes->bits[0], flags); down_read(&mm->mmap_sem); /* @@ -1758,6 +1776,8 @@ struct numa_maps { unsigned long mapcount_max; unsigned long dirty; unsigned long swapcache; + unsigned long min_pagecount; + unsigned long max_pagecount; unsigned long node[MAX_NUMNODES]; }; @@ -1785,6 +1805,13 @@ static void gather_stats(struct page *pa if (count > md->mapcount_max) md->mapcount_max = count; + count = page_count(page); + if (count > md->max_pagecount) + md->max_pagecount = count; + + if (!md->min_pagecount || count < md->min_pagecount) + md->min_pagecount = count; + md->node[page_to_nid(page)]++; cond_resched(); } @@ -1863,9 +1890,13 @@ int show_numa_map(struct seq_file *m, vo &node_online_map, MPOL_MF_STATS, md); } + if (vma->vm_flags & VM_LOCKED) + seq_printf(m, " locked"); + if (!md->pages) goto out; + if (md->anon) seq_printf(m," anon=%lu",md->anon); @@ -1887,6 +1918,9 @@ int show_numa_map(struct seq_file *m, vo if (md->writeback) seq_printf(m," writeback=%lu", md->writeback); + if (md->min_pagecount > 2 || md->max_pagecount > 1 + md->mapcount_max) + seq_printf(m," pc=%lu-%lu", md->min_pagecount, md->max_pagecount); + for_each_online_node(n) if (md->node[n]) seq_printf(m, " N%d=%lu", n, md->node[n]);