Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-20 14:19:16.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-20 14:49:09.000000000 -0700 @@ -1541,20 +1541,56 @@ local_irq_save(flags); preempt_enable_no_resched(); #endif - if (!c->page) - goto new_slab; + if (c->page) { + state = slab_lock(c->page); + /* + * If objects were freed remotely to this slab and the + * node of the slab is what we want then we + * can simply reload objects from the freelist. + */ + if (unlikely(node_match(c, node)) && + !is_end(c->page->freelist)) + goto load_freelist; + + /* + * Current cpu slab has no objects left or + * is not on the node we want. Drop it. + */ +another_slab: + deactivate_slab(s, c, state); + } + + /* + * No cpu slab exist. We need to find a slab to allocate + * from. + */ + state = get_partial(s, c, gfpflags, node); + if (unlikely(!state)) { + state = get_new_slab(s, &c, gfpflags, node); + if (unlikely(!state)) { + /* OOM situation */ + object = NULL; + goto out; + } + } - state = slab_lock(c->page); - if (unlikely(!node_match(c, node))) - goto another_slab; load_freelist: + /* + * Ok we got a slab. Now load the per cpu structure with the + * freelist. All objects in the slab are marked as full since + * we take all the object out of it. + */ object = c->page->freelist; - if (unlikely(is_end(object))) + if (unlikely(is_end(object))) { + /* + * Full slab on the partial list? + */ + WARN_ON(1); goto another_slab; + } if (unlikely(state & SLABDEBUG)) goto debug; - object = c->page->freelist; c->freelist = object[c->offset]; c->page->inuse = c->objects; c->page->freelist = end(c->page); @@ -1568,27 +1604,21 @@ #endif return object; -another_slab: - deactivate_slab(s, c, state); - -new_slab: - state = get_partial(s, c, gfpflags, node); - if (state) - goto load_freelist; - - state = get_new_slab(s, &c, gfpflags, node); - if (state) - goto load_freelist; - - object = NULL; - goto out; debug: + /* + * If the slab is in debug mode then take one object directly + * out of the slab instead of going through the per cpu logic. + * + * Any slab can be marked as in debug mode and then all the + * objects will go through here were we can do special processing. + */ object = c->page->freelist; if (!alloc_debug_processing(s, c->page, object, addr)) goto another_slab; c->page->inuse++; c->page->freelist = object[c->offset]; + /* A node < 0 deactivates the slab_free fast path */ c->node = -1; goto unlock_out; }