--- mm/slub.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-17 14:32:37.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-17 16:16:45.000000000 -0700 @@ -828,8 +828,10 @@ static void trace(struct kmem_cache *s, /* * Tracking of fully allocated slabs for debugging purposes. */ -static void add_full(struct kmem_cache_node *n, struct page *page) +static void add_full(struct kmem_cache *s, struct page *page) { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + spin_lock(&n->list_lock); list_add(&page->lru, &n->full); spin_unlock(&n->list_lock); @@ -1223,9 +1225,11 @@ static __always_inline int slab_trylock( /* * Management of partially allocated slabs */ -static void add_partial(struct kmem_cache_node *n, +static void add_partial(struct kmem_cache *s, struct page *page, int tail) { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + spin_lock(&n->list_lock); n->nr_partial++; if (tail) @@ -1350,8 +1354,6 @@ static struct page *get_partial(struct k */ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) { - struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - ClearSlabFrozen(page); /* @@ -1360,9 +1362,9 @@ static void unfreeze_slab(struct kmem_ca * efficient way if all objects in a slab have been freed. */ if (page->freelist) - add_partial(n, page, tail); + add_partial(s, page, tail); else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) - add_full(n, page); + add_full(s, page); slab_unlock(page); } @@ -1625,7 +1627,7 @@ checks_ok: * We may have to move it to the partial list. */ if (unlikely(!prior)) - add_partial(get_node(s, page_to_nid(page)), page, 0); + add_partial(s, page, 0); out_unlock: slab_unlock(page); @@ -2021,7 +2023,7 @@ static struct kmem_cache_node *early_kme #endif init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); - add_partial(n, page, 0); + add_partial(kmalloc_caches, page, 0); return n; }