Index: linux-2.6.19-mm1/include/linux/slub_def.h =================================================================== --- linux-2.6.19-mm1.orig/include/linux/slub_def.h 2006-12-15 00:06:44.032097463 -0800 +++ linux-2.6.19-mm1/include/linux/slub_def.h 2006-12-15 16:27:24.640243292 -0800 @@ -12,8 +12,9 @@ /* * Per cpu structure to manage active slabs. + * Must be less than a cacheline for bootstrap to work. */ -struct active_slab { +struct cpu_slab { struct page *page; struct kmem_cache *slab; void **freelist; @@ -26,18 +27,25 @@ struct active_slab { } ____cacheline_aligned_in_smp; /* - * Slab cache management. + * Per node structure to manage partial slabs + * Must be less than a cacheline for bootstrap to work. */ -struct kmem_cache { - spinlock_t list_lock; /* Protecty partial list and nr_partial */ +struct node_slab { + spinlock_t list_lock; struct list_head partial; unsigned long nr_partial; atomic_long_t nr_slabs; /* Total slabs used */ +} ____cacheline_aligned_in_smp; + +/* + * Slab cache management. + */ +struct kmem_cache { int offset; /* Free pointer offset. */ int size; /* Total size of an object */ unsigned int order; /* Size of the slab page */ + int objects; /* Number of objects in a slab */ unsigned long flags; - int objects; /* Number of objects in slab */ atomic_t refcount; /* Refcount for destroy */ int align; void (*ctor)(void *, struct kmem_cache *, unsigned long); @@ -47,11 +55,19 @@ struct kmem_cache { int inuse; /* Used portion of the chunk */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slabs */ + +#ifdef CONFIG_NUMA + struct node_slab *node[MAX_NUMNODES]; +#else + struct node_slab node[MAX_NUMNODES]; +#endif + #ifdef CONFIG_NUMA - struct active_slab *active[NR_CPUS]; + struct cpu_slab *cpu[NR_CPUS]; #else - struct active_slab active[NR_CPUS] ____cacheline_aligned_in_smp; + struct cpu_slab cpu[NR_CPUS] ____cacheline_aligned_in_smp; #endif + }; /* Index: linux-2.6.19-mm1/mm/slub.c =================================================================== --- linux-2.6.19-mm1.orig/mm/slub.c 2006-12-15 13:09:33.071585755 -0800 +++ linux-2.6.19-mm1/mm/slub.c 2006-12-15 16:57:19.180351372 -0800 @@ -1,17 +1,16 @@ /* - * Uncached Slab allocator SLUB. + * Slab allocator SLUB. * * This allocator uses slabs of objects as caches and does not manage * lists of cached objects like the regular Linux SLAB allocator. * - * * The allocator synchronizes using slab based locks and only - * uses a centralized list lock to manage the pool of partial slabs. + * uses a centralized list lock to manage the pool of partial slabs + * per node. * * (C) 2006 Silicon Graphics Inc., Christoph Lameter * * TODO: - * - NUMA per node partial slab management * - Performance tests. */ @@ -23,6 +22,8 @@ #include #include #include +#include +#include #define SLUB_UNIMPLEMENTED (SLAB_DEBUG_FREE | SLAB_DEBUG_INITIAL | \ SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) @@ -66,14 +67,14 @@ #define ARCH_SLAB_MINALIGN sizeof(void *) #endif -/* We need to bootstrap the slab with the active slabs in a special way */ -#define ACTIVE_SLAB_NR kmalloc_index(sizeof(struct active_slab)) -#define ACTIVE_SLAB_SLAB &kmalloc_caches[ACTIVE_SLAB_NR - KMALLOC_SHIFT_LOW] +/* Special boot time slab for NUMA bootstrap */ +#define CACHELINE_SLAB_NR kmalloc_index(L1_CACHE_BYTES) +#define CACHELINE_SLAB_SLAB &kmalloc_caches[CACHELINE_SLAB_NR - KMALLOC_SHIFT_LOW] #ifdef CONFIG_NUMA -#define ACTIVE_SLAB(__s,__cpu) ((__s)->active[__cpu]) +#define CPU_SLAB(__s,__cpu) ((__s)->cpu[__cpu]) #else -#define ACTIVE_SLAB(__s,__cpu) (&(__s)->active[__cpu]) +#define CPU_SLAB(__s,__cpu) (&(__s)->cpu[__cpu]) #endif /********************************************************************* @@ -123,8 +124,8 @@ void unregister_slab(struct kmem_cache * * 1. slab_lock(page) * 2. slab->list_lock * - * SLUB assigns one "active" slab for allocation to each processor. - * Allocations only occur from these active slabs. + * SLUB assigns one cpu slab for allocation to each processor. + * Allocations only occur from these cpu slabs. * * If a slab is active then a workqueue thread checks every few seconds * seconds if the cpu slab is still in use. The cpu slab is pushed back @@ -171,19 +172,25 @@ static __always_inline int slab_trylock( */ static void __always_inline add_partial(struct kmem_cache *s, struct page *page) { - spin_lock(&s->list_lock); - s->nr_partial++; - list_add_tail(&page->lru, &s->partial); - spin_unlock(&s->list_lock); + int node = page_to_nid(page); + struct node_slab *n = s->node[node]; + + spin_lock(&n->list_lock); + n->nr_partial++; + list_add_tail(&page->lru, &n->partial); + spin_unlock(&n->list_lock); } static void __always_inline remove_partial(struct kmem_cache *s, struct page *page) { - spin_lock(&s->list_lock); + int node = page_to_nid(page); + struct node_slab *n = s->node[node]; + + spin_lock(&n->list_lock); list_del(&page->lru); - s->nr_partial--; - spin_unlock(&s->list_lock); + n->nr_partial--; + spin_unlock(&n->list_lock); } /* @@ -191,31 +198,65 @@ static void __always_inline remove_parti * * Must hold list_lock */ -static __always_inline int lock_and_del_slab(struct kmem_cache *s, - struct page *page) +static __always_inline int lock_and_del_slab(struct node_slab *n, + struct page *page) { if (slab_trylock(page)) { list_del(&page->lru); - s->nr_partial--; + n->nr_partial--; return 1; } return 0; } -struct page *numa_partial(struct kmem_cache *s, gfp_t flags, int node) +/* + * Try to get a partial slab from the indicated node + */ +static struct page *get_partial_node(struct node_slab *n) +{ + struct page *page; + + /* + * Racy check. If we mistakenly see no partial slabs then we + * just allocate an empty slab. If we mistakenly try to get a + * partial slab then get_partials() will return NULL. + */ + if (!n->nr_partial) + return NULL; + + spin_lock(&n->list_lock); + list_for_each_entry(page, &n->partial, lru) + if (lock_and_del_slab(n, page)) + goto out; + page = NULL; +out: + spin_unlock(&n->list_lock); + return page; +} + +struct page *get_any_partial(struct kmem_cache *s, int node, gfp_t flags) { #ifdef CONFIG_NUMA - int searchnode = (node == -1) ? numa_node_id() : node; + struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) + ->node_zonelists[gfp_zone(flags)]; + struct zone **z; struct page *page; + int nid; /* - * Search for slab on the right node + * Look through allowed nodes for objects available + * from existing per node queues. */ - list_for_each_entry(page, &s->partial, lru) - if (likely(page_to_nid(page) == searchnode) && - lock_and_del_slab(s, page)) - return page; + for (z = zonelist->zones; *z; z++) { + nid = zone_to_nid(*z); + if (cpuset_zone_allowed_hardwall(*z, flags) && + s->node[nid]) { + page = get_partial_node(s->node[node]); + if (page) + return page; + } + } #endif return NULL; } @@ -226,36 +267,14 @@ struct page *numa_partial(struct kmem_ca static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; + int searchnode = (node == -1) ? numa_node_id() : node; - /* - * Racy check. If we mistakenly see no partial slabs then we - * just allocate an empty slab. If we mistakenly try to get a - * partial slab then get_partials() will return NULL. - */ - if (!s->nr_partial) - return NULL; - - spin_lock(&s->list_lock); - - /* First find a partial slab that fits the preferred NUMA node */ - page = numa_partial(s, flags, node); - if (page) - goto out; - - /* If we cannot fall back then fail */ - if (NUMA_BUILD && !(flags & __GFP_THISNODE)) - goto out; - - /* Pick any partial slab */ - list_for_each_entry(page, &s->partial, lru) - if (likely(lock_and_del_slab(s, page))) - goto out; + page = get_partial_node(s->node[searchnode]); + if (page || (flags & __GFP_THISNODE)) + return page; - /* Nothing found */ - page = NULL; -out: - spin_unlock(&s->list_lock); - return page; + /* NUMA Fallback */ + return get_any_partial(s, node, flags); } /* @@ -300,7 +319,7 @@ static int check_valid_pointer(struct km /* * Determine if a certain object on a page is on the freelist and - * therefore free. Must hold the slab lock for active slabs to + * therefore free. Must hold the slab lock for cpu slabs to * guarantee that the chains are consistent. */ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) @@ -382,7 +401,7 @@ static void rcu_free_slab(struct rcu_hea static void discard_slab(struct kmem_cache *s, struct page *page) { - atomic_long_dec(&s->nr_slabs); + atomic_long_dec(&s->node[page_to_nid(page)]->nr_slabs); if (s->flags & SLAB_DESTROY_BY_RCU) { struct rcu_head *head = (void *)&page->lru; @@ -402,6 +421,7 @@ static struct page *new_slab(struct kmem int pages = 1 << s->order; void *start; void *end; + struct node_slab *n; if (flags & __GFP_NO_GROW) return NULL; @@ -420,7 +440,10 @@ static struct page *new_slab(struct kmem if (!page) return NULL; - atomic_long_inc(&s->nr_slabs); + n = s->node[page_to_nid(page)]; + if (n) + atomic_long_inc(&n->nr_slabs); + mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, @@ -484,9 +507,9 @@ static void __always_inline putback_slab } /* - * Remove the currently active slab + * Remove the currently cpu slab */ -static void deactivate_slab(struct active_slab *a) +static void deactivate_slab(struct cpu_slab *a) { struct page *page = a->page; struct kmem_cache *s = a->slab; @@ -498,7 +521,7 @@ static void deactivate_slab(struct activ * freelists. * * Merge the two freelists. The freelist in the - * active slab comes first. + * cpu slab comes first. */ void **freelist = page->freelist; void **p; @@ -525,14 +548,14 @@ static void deactivate_slab(struct activ } /* - * Unconditionally flush any active slabs back to partial lists. + * Unconditionally flush any cpu slabs back to partial lists. * * Called from IPI handler with interrupts disabled. */ -static void flush_active(void *d) +static void flush_cpu(void *d) { struct kmem_cache *s = d; - struct active_slab *a = ACTIVE_SLAB(s, smp_processor_id()); + struct cpu_slab *a = CPU_SLAB(s, smp_processor_id()); if (likely(a->page)) { slab_lock(a->page); @@ -545,14 +568,14 @@ static void flush_active(void *d) #ifdef CONFIG_SMP /* - * Check for a active slab and if it has not + * Check for a cpu slab and if it has not * been references flush it back to the partial list. * * Called from kevent workqueue. */ -void check_flush_active(struct work_struct *w) +void check_flush_cpu(struct work_struct *w) { - struct active_slab *a = container_of(w, struct active_slab, flush.work); + struct cpu_slab *a = container_of(w, struct cpu_slab, flush.work); if (!a->page) return; @@ -573,19 +596,19 @@ void check_flush_active(struct work_stru static void drain_all(struct kmem_cache *s) { - on_each_cpu(flush_active, s , 1, 1); + on_each_cpu(flush_cpu, s , 1, 1); } static __always_inline void *allocate(struct kmem_cache *s, gfp_t gfpflags, int node) { - struct active_slab *a; + struct cpu_slab *a; void **object; unsigned long flags; local_irq_save(flags); - a = ACTIVE_SLAB(s, smp_processor_id()); + a = CPU_SLAB(s, smp_processor_id()); if (unlikely(!a->page)) goto new_slab; @@ -640,7 +663,7 @@ new_slab: * We may have reenabled interrupts during the allocation * Verify the state of the slab. */ - a = ACTIVE_SLAB(s, smp_processor_id()); + a = CPU_SLAB(s, smp_processor_id()); if (a->page) /* * Someone else already allocated a page. Drop the @@ -689,25 +712,36 @@ EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA /* - * Bootstrap function to allow the allocation of active_slabs without - * having active slabs yet. + * Bootstrap function to allow the allocation of cpu_slabs without + * having cpu slabs yet. */ -static void * __init early_active_slab_alloc(int node) +static void * __init __early_cacheline_alloc(int node, struct page **pp) { - struct kmem_cache *s = ACTIVE_SLAB_SLAB; - struct page *page; + struct kmem_cache *s = CACHELINE_SLAB_SLAB; + struct page *page = *pp; void **object; - page = get_partial(s, GFP_KERNEL, node); if (!page) { page = new_slab(s, GFP_KERNEL, node); - + *pp = page; BUG_ON(!page); slab_lock(page); } object = page->freelist; page->freelist = object[s ->offset]; page->inuse++; + return object; +} + +static void * __init early_cacheline_alloc(int node) +{ + struct kmem_cache *s = CACHELINE_SLAB_SLAB; + struct page *page; + void *object; + struct node_slab *n = s->node[node]; + + page = get_partial_node(n); + object = __early_cacheline_alloc(node, &page); putback_slab(s, page); return object; } @@ -725,7 +759,7 @@ void kmem_cache_free(struct kmem_cache * void *prior; void **object = (void *)x; unsigned long flags; - struct active_slab *a; + struct cpu_slab *a; if (!object) return; @@ -754,7 +788,7 @@ void kmem_cache_free(struct kmem_cache * slab_unlock(page); #endif - a = ACTIVE_SLAB(s, smp_processor_id()); + a = CPU_SLAB(s, smp_processor_id()); if (a->page == page) { void **object = x; @@ -929,31 +963,62 @@ int slab_is_available(void) return slab_state == UP; } -static void alloc_active(struct kmem_cache *s, int cpu) +static void alloc_cpu(struct kmem_cache *s, int cpu) { - struct active_slab *a; + struct cpu_slab *a; + int node = cpu_to_node(cpu); #ifdef CONFIG_NUMA - if (slab_state == DOWN) { - BUG_ON(s != ACTIVE_SLAB_SLAB); - a = early_active_slab_alloc(cpu_to_node(cpu)); - } else - a = kmem_cache_alloc_node(ACTIVE_SLAB_SLAB, - GFP_KERNEL, cpu_to_node(cpu)); + if (slab_state == DOWN) + a = early_cacheline_alloc(node); + else + a = kmem_cache_alloc_node(CACHELINE_SLAB_SLAB, + GFP_KERNEL, node); BUG_ON(!a); - s->active[cpu] = a; + + s->cpu[cpu] = a; #else - a = ACTIVE_SLAB(s, cpu); + a = CPU _SLAB(s, cpu); #endif #ifdef CONFIG_SMP a->flush_active = 0; - INIT_DELAYED_WORK(&a->flush, check_flush_active); + INIT_DELAYED_WORK(&a->flush, check_flush_cpu); #endif a->page = NULL; a->slab = s; a->referenced = 0; } +static void alloc_node(struct kmem_cache *s, int node) +{ + struct node_slab *n; + struct page *page = NULL; + +#ifdef CONFIG_NUMA + if (slab_state == DOWN) { + page = new_slab(s, GFP_KERNEL, node); + BUG_ON(!page); + slab_lock(page); + n = __early_cacheline_alloc(node, &page); + } else + n = kmem_cache_alloc_node(CACHELINE_SLAB_SLAB, + GFP_KERNEL, node); + + BUG_ON(!n); + + s->node[node] = n; +#else + n = s->node[0]; +#endif + spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); + if (page) { + putback_slab(s, page); + atomic_long_set(&n->nr_slabs, 1); + } else + atomic_long_set(&n->nr_slabs, 0); +} + int kmem_cache_open(struct kmem_cache *s, const char *name, size_t size, size_t align, unsigned long flags, @@ -961,13 +1026,12 @@ int kmem_cache_open(struct kmem_cache *s void (*dtor)(void *, struct kmem_cache *, unsigned long)) { int cpu; + int node; BUG_ON(flags & SLUB_UNIMPLEMENTED); memset(s, 0, sizeof(struct kmem_cache)); - atomic_long_set(&s->nr_slabs, 0); atomic_set(&s->refcount, 1); - spin_lock_init(&s->list_lock); - INIT_LIST_HEAD(&s->partial); + s->name = name; s->ctor = ctor; s->dtor = dtor; @@ -1015,8 +1079,11 @@ int kmem_cache_open(struct kmem_cache *s if (!s->objects) goto error; + for_each_online_node(node) + alloc_node(s, node); + for_each_online_cpu(cpu) - alloc_active(s, cpu); + alloc_cpu(s, cpu); register_slab(s); return 1; @@ -1130,20 +1197,20 @@ static int move_slab_objects(struct kmem * * Returns the number of slabs freed. */ -int kmem_cache_defrag(struct kmem_cache *s, + +static unsigned int defrag_on_node(struct kmem_cache *s, int node, int (*move_object)(struct kmem_cache *, void *)) { - unsigned long flags; int slabs_freed = 0; int i; - - drain_all(s); + struct node_slab *n = s->node[node]; + unsigned long flags; local_irq_save(flags); - for(i = 0; s->nr_partial > 1 && i < s->nr_partial - 1; i++ ) { + for (i = 0; n->nr_partial > 1 && i < n->nr_partial - 1; i++ ) { struct page * page; - page = get_partial(s, GFP_KERNEL, -1); + page = get_partial_node(n); if (!page) break; @@ -1167,7 +1234,17 @@ int kmem_cache_defrag(struct kmem_cache } local_irq_restore(flags); return slabs_freed; +} +int kmem_cache_defrag(struct kmem_cache *s, + int (*move_object)(struct kmem_cache *, void *)) +{ + int node; + + drain_all(s); + for_each_online_node(node) + defrag_on_node(s, node, move_object); + return 0; } EXPORT_SYMBOL(kmem_cache_defrag); @@ -1177,37 +1254,37 @@ static struct kmem_cache *kmem_cache_dup return s; } -static int free_list(struct kmem_cache *s, struct list_head *list) +static int free_list(struct node_slab *n) { int slabs_inuse = 0; unsigned long flags; struct page *page, *h; - spin_lock_irqsave(&s->list_lock, flags); - list_for_each_entry_safe(page, h, list, lru) + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry_safe(page, h, &n->partial, lru) if (!page->inuse) { - list_del(&s->partial); - discard_slab(s, page); + list_del(&n->partial); + discard_slab(page->slab, page); } else slabs_inuse++; - spin_unlock_irqrestore(&s->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); return slabs_inuse; } -static void free_active(struct kmem_cache *s, int cpu) +static void free_cpu(struct kmem_cache *s, int cpu) { #ifdef CONFIG_NUMA - kfree(ACTIVE_SLAB(s, cpu)); - s->active[cpu] = NULL; + kfree(CPU_SLAB(s, cpu)); + s->cpu[cpu] = NULL; #endif } -static void release_active(struct kmem_cache *s) +static void release_cpu(struct kmem_cache *s) { int cpu; for_each_online_cpu(cpu) - free_active(s, cpu); + free_cpu(s, cpu); } /* @@ -1216,17 +1293,25 @@ static void release_active(struct kmem_c */ int kmem_cache_close(struct kmem_cache *s) { + unsigned long remainder = 0; + int node; + if (!atomic_dec_and_test(&s->refcount)) return 0; drain_all(s); - free_list(s, &s->partial); + for_each_online_node(node) { + struct node_slab *n = s->node[node]; + + free_list(n); + remainder += atomic_long_read(&n->nr_slabs); + } - if (atomic_long_read(&s->nr_slabs)) + if (remainder) return 1; unregister_slab(s); - release_active(s); + release_cpu(s); return 0; } EXPORT_SYMBOL(kmem_cache_close); @@ -1243,58 +1328,68 @@ void kmem_cache_destroy(struct kmem_cach EXPORT_SYMBOL(kmem_cache_destroy); static unsigned long count_objects(struct kmem_cache *s, - struct list_head *list, unsigned long *nodes) + unsigned long *nr_partial, unsigned long *nodes) { int count = 0; struct page *page; unsigned long flags; + unsigned long partial = 0; + int node; + + for_each_online_node(node) { + struct node_slab *n = s->node[node]; - spin_lock_irqsave(&s->list_lock, flags); - list_for_each_entry(page, list, lru) { - count += page->inuse; - nodes[page_to_nid(page)]++; + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) { + count += page->inuse; + nodes[page_to_nid(page)]++; + partial++; + } + spin_unlock_irqrestore(&n->list_lock, flags); } - spin_unlock_irqrestore(&s->list_lock, flags); + *nr_partial = partial; return count; } static unsigned long slab_objects(struct kmem_cache *s, - unsigned long *p_total, unsigned long *p_active, + unsigned long *p_total, unsigned long *p_cpu, unsigned long *p_partial, unsigned long *nodes) { - int partial; - int nr_slabs = atomic_read(&s->nr_slabs); - int active = 0; /* Active slabs */ - int nr_active = 0; /* Objects in active slabs */ + unsigned long nr_partial =0; /* Partial slabs */ + unsigned long nr_slabs = 0; /* Total slabs */ + unsigned long nr_cpu = 0; /* Cpu Slabs */ + unsigned long objects_partial = 0; /* Objects in partial slabs */ + unsigned long objects_cpu = 0; /* Objects in cpu slabs */ int cpu; + int node; - memset(nodes, 0, sizeof(void *) * MAX_NUMNODES); + for_each_online_node(node) + nr_slabs += nodes[node] = + atomic_read(&s->node[node]->nr_slabs); for_each_possible_cpu(cpu) { - struct active_slab *a = ACTIVE_SLAB(s, cpu); + struct cpu_slab *a = CPU_SLAB(s, cpu); if (a->page) { - nr_active++; - active += a->page->inuse; + nr_cpu++; + objects_cpu += a->page->inuse; nodes[page_to_nid(a->page)]++; } } - partial = count_objects(s, &s->partial, nodes); - - /* Missing the accounting of full slabs per node */ + objects_partial = count_objects(s, &nr_partial, nodes); if (p_partial) - *p_partial = s->nr_partial; + *p_partial = nr_partial; - if (p_active) - *p_active = nr_active; + if (p_cpu) + *p_cpu = nr_cpu; if (p_total) *p_total = nr_slabs; - return partial + active + - (nr_slabs - s->nr_partial - nr_active) * s->objects; + return objects_partial + objects_cpu + + (nr_slabs - nr_partial - nr_cpu) * s->objects; } /* @@ -1309,11 +1404,11 @@ static int __cpuinit slab_cpuup_callback switch (action) { case CPU_UP_PREPARE: - for_all_slabs(alloc_active, cpu); + for_all_slabs(alloc_cpu, cpu); break; case CPU_UP_CANCELED: case CPU_DEAD: - for_all_slabs(free_active, cpu); + for_all_slabs(free_cpu, cpu); break; default: break; @@ -1418,20 +1513,26 @@ void __init kmem_cache_init(void) { int i; +#ifdef CONFIG_NUMA /* - * NUMA Bootstrap only works if the slab for the active_slab - * structure does not use an EXTRA slab. + * NUMA Bootstrap only works if the slab for the cpu_slab + * structure does not use an EXTRA slab and if both are smaller + * than a cacheline. */ - BUG_ON(ACTIVE_SLAB_NR > KMALLOC_SHIFT_HIGH || ACTIVE_SLAB_NR < 0); + BUG_ON(CACHELINE_SLAB_NR > KMALLOC_SHIFT_HIGH || CACHELINE_SLAB_NR < 0); + BUG_ON(sizeof(struct cpu_slab) > L1_CACHE_BYTES); + BUG_ON(sizeof(struct node_slab) > L1_CACHE_BYTES); +#endif - kmem_cache_open(ACTIVE_SLAB_SLAB, "active_slab", 1 << ACTIVE_SLAB_NR, - ARCH_KMALLOC_MINALIGN, SLAB_PANIC, NULL, NULL); + kmem_cache_open(CACHELINE_SLAB_SLAB, "cpu_slab", + 1 << CACHELINE_SLAB_NR, ARCH_KMALLOC_MINALIGN, + SLAB_PANIC, NULL, NULL); slab_state = PARTIAL; /* Power of two sized caches */ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - if (i != ACTIVE_SLAB_NR) + if (i != CACHELINE_SLAB_NR) kmem_cache_open( &kmalloc_caches[i - KMALLOC_SHIFT_LOW], "kmalloc", 1 << i, @@ -1553,7 +1654,7 @@ static void print_slabinfo_header(struct */ seq_puts(m, "slabinfo - version: 3.0\n"); seq_puts(m, "# name /" - "/ "); + "/ "); seq_putc(m, '\n'); } @@ -1592,7 +1693,7 @@ static int s_show(struct seq_file *m, vo { struct kmem_cache *s = p; unsigned long total_slabs; - unsigned long active_slabs; + unsigned long cpu_slabs; unsigned long partial_slabs; unsigned long objects; unsigned long nodes[MAX_NUMNODES]; @@ -1600,7 +1701,7 @@ static int s_show(struct seq_file *m, vo int node; char options[13]; - objects = slab_objects(s, &total_slabs, &active_slabs, + objects = slab_objects(s, &total_slabs, &cpu_slabs, &partial_slabs, nodes); d = options; if (s->ctor) @@ -1631,7 +1732,7 @@ static int s_show(struct seq_file *m, vo *d = 0; seq_printf(m, "%-21s %7lu %2d %7u\t%lu/%lu/%lu\t%s", s->name, objects, s->order, s->size, total_slabs, - partial_slabs, active_slabs, options); + partial_slabs, cpu_slabs, options); for_each_online_node(node) if (nodes[node]) @@ -1664,7 +1765,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); /***************************************************************** * Generic reaper used to support the page allocator - * (the active slabs are reaped by a per processor workqueue). + * (the cpu slabs are reaped by a per processor workqueue). ****************************************************************/ /*