slab: put alien pointer array into the kmem_list3 structure. The alien array pointer arrayis always allocated when the kmem_list3 structure is allocated. It is easiest if we just put them together. That saves us the logic to do a special allocation for the alien cache array and makes it similar to the kmem_cache structure that contains arrays of per cpu caches. Plus we do not have to check if the alien pointer is NULL anymore. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc3/mm/slab.c =================================================================== --- linux-2.6.18-rc3.orig/mm/slab.c 2006-08-03 23:00:05.252253747 -0700 +++ linux-2.6.18-rc3/mm/slab.c 2006-08-03 23:01:29.067381678 -0700 @@ -301,9 +301,9 @@ struct kmem_list3 { unsigned int colour_next; /* Per-node cache coloring */ spinlock_t list_lock; struct array_cache *shared; /* shared per node */ - struct array_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ + struct array_cache *alien[MAX_NUMNODES]; }; /* @@ -963,44 +963,36 @@ static void *alternate_node_alloc(struct static void alloc_alien_cache(struct kmem_list3 *l3, int node) { - struct array_cache **ac_ptr; - int memsize = sizeof(void *) * MAX_NUMNODES; int i; - ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); - if (ac_ptr) { - for_each_node(i) { - if (i == node || !node_online(i)) { - ac_ptr[i] = NULL; - continue; - } - ac_ptr[i] = alloc_arraycache(node, ALIEN_LIMIT, 0xbaadf00d); - if (!ac_ptr[i]) { - for (i--; i <= 0; i--) - kfree(ac_ptr[i]); - kfree(ac_ptr); - goto fail; + for_each_node(i) { + if (i == node || !node_online(i)) { + l3->alien[i] = NULL; + continue; + } + l3->alien[i] = alloc_arraycache(node, ALIEN_LIMIT, 0xbaadf00d); + if (!l3->alien[i]) { + for (i--; i <= 0; i--) { + kfree(l3->alien[i]); + l3->alien[i] = NULL; } + goto fail; } } - l3->alien = ac_ptr; return; fail: printk(KERN_ERR "slab: alien cache alloc failed" "continuing without.\n"); - l3->alien = NULL; } static void free_alien_cache(struct kmem_list3 *l3) { int i; - if (!l3->alien) - return; - for_each_node(i) + for_each_node(i) { kfree(l3->alien[i]); - kfree(l3->alien); - l3->alien = NULL; + l3->alien[i] = NULL; + } } static void __drain_alien_cache(struct kmem_cache *cachep, @@ -1030,14 +1022,11 @@ static void __drain_alien_cache(struct k static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) { int node = __get_cpu_var(reap_node); + struct array_cache *ac = l3->alien[node]; - if (l3->alien) { - struct array_cache *ac = l3->alien[node]; - - if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { - __drain_alien_cache(cachep, ac, node); - spin_unlock_irq(&ac->lock); - } + if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { + __drain_alien_cache(cachep, ac, node); + spin_unlock_irq(&ac->lock); } } @@ -1074,7 +1063,7 @@ static inline int cache_free_alien(struc l3 = cachep->nodelists[numa_node_id()]; STATS_INC_NODEFREES(cachep); - if (l3->alien && l3->alien[nodeid]) { + if (l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) { @@ -1215,7 +1204,6 @@ static int __devinit cpuup_callback(stru list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; cpumask_t mask; mask = node_to_cpumask(node); @@ -1246,16 +1234,11 @@ static int __devinit cpuup_callback(stru l3->shared = NULL; } - alien = l3->alien; - l3->alien = NULL; - spin_unlock_irq(&l3->list_lock); kfree(shared); - if (alien) { - drain_alien_cache(cachep, l3); - free_alien_cache(l3); - } + drain_alien_cache(cachep, l3); + free_alien_cache(l3); free_array_cache: kfree(nc); } @@ -2302,7 +2285,7 @@ static void drain_cpu_caches(struct kmem check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; - if (l3 && l3->alien) + if (l3) drain_alien_cache(cachep, l3); }