slab: Remove gotos from __alloc_node and cache_alloc_refill Extract the common get_slab and put_slab functions. Move the logic to put a slab into the freelists out of cache_grow() and make cache_grow return a pointer to the slab allocated. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc3/mm/slab.c =================================================================== --- linux-2.6.18-rc3.orig/mm/slab.c 2006-08-03 23:01:29.067381678 -0700 +++ linux-2.6.18-rc3/mm/slab.c 2006-08-03 23:04:27.497750829 -0700 @@ -2573,23 +2573,30 @@ static void slab_map_pages(struct kmem_c /* * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. + * When cache_grow is called the list_lock is held. We drop the lock + * before calling the page allocator. + * + * Return with the object and the list_lock held if successful. + * Otherwise return NULL and do not take the list_lock. */ -static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) +static struct slab *cache_grow(struct kmem_cache *cachep, struct kmem_list3 *l3, + gfp_t flags, int nodeid) { struct slab *slabp; void *objp; size_t offset; gfp_t local_flags; unsigned long ctor_flags; - struct kmem_list3 *l3; /* * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); - if (flags & SLAB_NO_GROW) - return 0; + if (flags & SLAB_NO_GROW) { + spin_unlock(&l3->list_lock); + return NULL; + } ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & SLAB_LEVEL_MASK); @@ -2602,8 +2609,6 @@ static int cache_grow(struct kmem_cache /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); - l3 = cachep->nodelists[nodeid]; - spin_lock(&l3->list_lock); /* Get colour for the slab, and cal the next value. */ offset = l3->colour_next; @@ -2648,18 +2653,15 @@ static int cache_grow(struct kmem_cache check_irq_off(); spin_lock(&l3->list_lock); - /* Make slab active. */ - list_add_tail(&slabp->list, &(l3->slabs_free)); STATS_INC_GROWN(cachep); l3->free_objects += cachep->num; - spin_unlock(&l3->list_lock); - return 1; + return slabp; opps1: kmem_freepages(cachep, objp); failed: if (local_flags & __GFP_WAIT) local_irq_disable(); - return 0; + return NULL; } #if DEBUG @@ -2802,15 +2804,49 @@ bad: #define check_slabp(x,y) do { } while(0) #endif +/* + * Get a slab from the indicated cache. + * We hold list_lock when called. + * If we fail to obtain a slab then the list lock is dropped. + */ +static struct slab *get_slab(struct kmem_list3 *l3) +{ + struct list_head *entry; + struct slab *slab; + + entry = l3->slabs_partial.next; + if (entry == &l3->slabs_partial) { + entry = l3->slabs_free.next; + if (entry == &l3->slabs_free) { + spin_unlock(&l3->list_lock); + return NULL; + } + } + l3->free_touched = 1; + slab = list_entry(entry, struct slab, list); + list_del(&slab->list); + l3->free_objects-= slab->inuse; + return slab; +} + +/* move slabp to correct slabp list: */ +static void put_slab(struct kmem_list3 *l3, struct slab *slabp) +{ + if (slabp->free == BUFCTL_END) + list_add(&slabp->list, &l3->slabs_full); + else + list_add(&slabp->list, &l3->slabs_partial); +} + static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; struct kmem_list3 *l3; struct array_cache *ac; + struct slab *slabp; check_irq_off(); ac = cpu_cache_get(cachep); -retry: batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* @@ -2823,25 +2859,22 @@ retry: l3 = cachep->nodelists[numa_node_id()]; BUG_ON(ac->avail > 0 || !l3); - spin_lock(&l3->list_lock); + spin_lock(&l3->list_lock); /* See if we can refill from the shared array */ if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) goto alloc_done; while (batchcount > 0) { - struct list_head *entry; - struct slab *slabp; - /* Get slab alloc is to come from. */ - entry = l3->slabs_partial.next; - if (entry == &l3->slabs_partial) { - l3->free_touched = 1; - entry = l3->slabs_free.next; - if (entry == &l3->slabs_free) - goto must_grow; + slabp = get_slab(l3); + if (unlikely(!slabp)) { + slabp = cache_grow(cachep, l3, flags, numa_node_id()); + if (!slabp) { + if (ac->avail) + goto done_no_lock; + return NULL; + } } - - slabp = list_entry(entry, struct slab, list); check_slabp(cachep, slabp); check_spinlock_acquired(cachep); while (slabp->inuse < cachep->num && batchcount--) { @@ -2853,32 +2886,12 @@ retry: numa_node_id()); } check_slabp(cachep, slabp); - - /* move slabp to correct slabp list: */ - list_del(&slabp->list); - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &l3->slabs_full); - else - list_add(&slabp->list, &l3->slabs_partial); + put_slab(l3, slabp); } -must_grow: - l3->free_objects -= ac->avail; alloc_done: spin_unlock(&l3->list_lock); - - if (unlikely(!ac->avail)) { - int x; - x = cache_grow(cachep, flags, numa_node_id()); - - /* cache_grow can reenable interrupts, then ac could change. */ - ac = cpu_cache_get(cachep); - if (!x && ac->avail == 0) /* no objects in sight? abort */ - return NULL; - - if (!ac->avail) /* objects refilled by interrupt? */ - goto retry; - } +done_no_lock: ac->touched = 1; return ac->entry[--ac->avail]; } @@ -3023,59 +3036,34 @@ static void *alternate_node_alloc(struct static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { - struct list_head *entry; struct slab *slabp; struct kmem_list3 *l3; void *obj; - int x; l3 = cachep->nodelists[nodeid]; BUG_ON(!l3); -retry: check_irq_off(); spin_lock(&l3->list_lock); - entry = l3->slabs_partial.next; - if (entry == &l3->slabs_partial) { - l3->free_touched = 1; - entry = l3->slabs_free.next; - if (entry == &l3->slabs_free) - goto must_grow; + slabp = get_slab(l3); + if (!slabp) { + slabp = cache_grow(cachep, l3, flags, nodeid); + if (!slabp) + return NULL; } - - slabp = list_entry(entry, struct slab, list); check_spinlock_acquired_node(cachep, nodeid); check_slabp(cachep, slabp); STATS_INC_NODEALLOCS(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - BUG_ON(slabp->inuse == cachep->num); obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); - l3->free_objects--; - /* move slabp to correct slabp list: */ - list_del(&slabp->list); - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &l3->slabs_full); - else - list_add(&slabp->list, &l3->slabs_partial); - - spin_unlock(&l3->list_lock); - goto done; - -must_grow: + put_slab(l3, slabp); spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags, nodeid); - - if (!x) - return NULL; - - goto retry; -done: return obj; } #endif