From 9cb2e7af1a0d09623de6e6940c04a1339447760f Mon Sep 17 00:00:00 2001
From: Christoph Lameter <clameter@sgi.com>
Date: Sat, 27 Oct 2007 19:32:51 -0700
Subject: [PATCH] SLUB: Avoid referencing kmem_cache structure in __slab_alloc

There is the need to use the objects per slab in the first part of
__slab_alloc() which is still pretty hot. Copy the number of objects
per slab into the kmem_cache_cpu structure. That way we can get the
value from a cache line that we already need to touch. This brings
the kmem_cache_cpu structure up to 4 even words.

There is no increase in the size of kmem_cache_cpu since the size of object
is rounded to the next word.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
 include/linux/slub_def.h |    1 +
 mm/slub.c                |    3 ++-
 2 files changed, 3 insertions(+), 1 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 40801e7..9840c9c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -17,6 +17,7 @@ struct kmem_cache_cpu {
 	int node;
 	unsigned int offset;
 	unsigned int objsize;
+	unsigned int objects;
 };
 
 struct kmem_cache_node {
diff --git a/mm/slub.c b/mm/slub.c
index 1fefe23..9e630d9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1512,7 +1512,7 @@ load_freelist:
 
 	object = c->page->freelist;
 	c->freelist = object[c->offset];
-	c->page->inuse = s->objects;
+	c->page->inuse = c->objects;
 	c->page->freelist = c->page->end;
 	c->node = page_to_nid(c->page);
 unlock_out:
@@ -1878,6 +1878,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
 	c->node = 0;
 	c->offset = s->offset / sizeof(void *);
 	c->objsize = s->objsize;
+	c->objects = s->objects;
 }
 
 static void init_kmem_cache_node(struct kmem_cache_node *n)
-- 
1.5.3.6

