SLUB: Restructure __slab_alloc path Restructure slab alloc path so that the rarely used components are moved out of __slab_alloc entirely. Signed-off-by: Christoph Lameter --- mm/slub.c | 101 ++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 56 insertions(+), 45 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-25 19:49:55.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-25 19:50:22.000000000 -0700 @@ -1348,18 +1348,21 @@ static struct page *get_any_partial(stru } /* - * Get a partial page, lock it and return it. + * Get a partial page, lock it and make it the current cpu slab. */ -static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) +static noinline unsigned long get_partial(struct kmem_cache *s, + struct kmem_cache_cpu *c, gfp_t flags, int node) { struct page *page; int searchnode = (node == -1) ? numa_node_id() : node; page = get_partial_node(get_node(s, searchnode)); - if (page || (flags & __GFP_THISNODE)) - return page; - - return get_any_partial(s, flags); + if (!page && !(flags & __GFP_THISNODE)) + page = get_any_partial(s, flags); + if (!page) + return 0; + c->page = page; + return page->flags & ~LOCKED; } /* @@ -1485,6 +1488,48 @@ static inline int node_match(struct kmem return 1; } +/* Allocate a new slab and make it the current cpu slab */ +static noinline unsigned long get_new_slab(struct kmem_cache *s, + struct kmem_cache_cpu **pc, gfp_t gfpflags, int node) +{ + struct kmem_cache_cpu *c = *pc; + struct page *page; + + if (gfpflags & __GFP_WAIT) + local_irq_enable(); + + page = new_slab(s, gfpflags, node); + + if (gfpflags & __GFP_WAIT) + local_irq_disable(); + + if (!page) + return 0; + + *pc = c = get_cpu_slab(s, smp_processor_id()); + if (c->page) { + /* + * Someone else populated the cpu_slab while we + * enabled interrupts, or we have gotten scheduled + * on another cpu. The page may not be on the + * requested node even if __GFP_THISNODE was + * specified. So we need to recheck. + */ + if (node_match(c, node)) { + /* + * Current cpuslab is acceptable and we + * want the current one since its cache hot + */ + discard_slab(s, page); + return slab_lock(c->page); + } + /* New slab does not fit our expectations */ + flush_slab(s, c); + } + c->page = page; + return slab_lock(page) | FROZEN; +} + /* * Slow path. The lockless freelist is empty or we need to perform * debugging duties. @@ -1506,7 +1551,6 @@ static void *__slab_alloc(struct kmem_ca gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) { void **object; - struct page *new; unsigned long state; #ifdef CONFIG_FAST_CMPXCHG_LOCAL unsigned long flags; @@ -1545,47 +1589,14 @@ another_slab: deactivate_slab(s, c, state); new_slab: - new = get_partial(s, gfpflags, node); - if (new) { - c->page = new; - state = new->flags & ~LOCKED; + state = get_partial(s, c, gfpflags, node); + if (state) goto load_freelist; - } - - if (gfpflags & __GFP_WAIT) - local_irq_enable(); - - new = new_slab(s, gfpflags, node); - - if (gfpflags & __GFP_WAIT) - local_irq_disable(); - if (new) { - c = get_cpu_slab(s, smp_processor_id()); - if (c->page) { - /* - * Someone else populated the cpu_slab while we - * enabled interrupts, or we have gotten scheduled - * on another cpu. The page may not be on the - * requested node even if __GFP_THISNODE was - * specified. So we need to recheck. - */ - if (node_match(c, node)) { - /* - * Current cpuslab is acceptable and we - * want the current one since its cache hot - */ - discard_slab(s, new); - state = slab_lock(c->page); - goto load_freelist; - } - /* New slab does not fit our expectations */ - flush_slab(s, c); - } - state = slab_lock(new) | FROZEN; - c->page = new; + state = get_new_slab(s, &c, gfpflags, node); + if (state) goto load_freelist; - } + object = NULL; goto out; debug: