SLUB: Support virtual fallback via SLAB_VFALLBACK SLAB_VFALLBACK can be specified for selected slab caches. If fallback is available then the conservative settings for higher order allocations are overridden. We then request an order that can accomodate at mininum 100 objects. The size of an individual slab allocation is allowed to reach up to 64k (order 4 on i386, order 2 on IA64). Signed-off-by: Christoph Lameter --- include/linux/slab.h | 1 + include/linux/slub_def.h | 1 + mm/slub.c | 38 +++++++++++++++++++++++--------------- 3 files changed, 25 insertions(+), 15 deletions(-) Index: linux-2.6/include/linux/slab.h =================================================================== --- linux-2.6.orig/include/linux/slab.h 2007-10-03 18:30:45.000000000 -0700 +++ linux-2.6/include/linux/slab.h 2007-10-03 18:47:33.000000000 -0700 @@ -19,6 +19,7 @@ * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. */ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ +#define SLAB_VFALLBACK 0x00000200UL /* May fall back to vmalloc */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-03 18:47:27.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-03 18:47:33.000000000 -0700 @@ -1049,11 +1049,7 @@ static struct page *allocate_slab(struct struct page * page; int pages = 1 << s->order; - if (s->order) - flags |= __GFP_COMP; - - if (s->flags & SLAB_CACHE_DMA) - flags |= SLUB_DMA; + flags |= s->gfpflags; if (node == -1) page = alloc_pages(flags, s->order); @@ -1789,10 +1785,9 @@ static inline int slab_order(int size, i return order; } -static inline int calculate_order(int size) +static inline int calculate_order(int size, int min_objects, int max_order) { int order; - int min_objects; int fraction; /* @@ -1803,13 +1798,12 @@ static inline int calculate_order(int si * First we reduce the acceptable waste in a slab. Then * we reduce the minimum objects required in a slab. */ - min_objects = slub_min_objects; while (min_objects > 1) { fraction = 8; while (fraction >= 4) { order = slab_order(size, min_objects, - slub_max_order, fraction); - if (order <= slub_max_order) + max_order, fraction); + if (order <= max_order) return order; fraction /= 2; } @@ -1820,8 +1814,8 @@ static inline int calculate_order(int si * We were unable to place multiple objects in a slab. Now * lets see if we can place a single object there. */ - order = slab_order(size, 1, slub_max_order, 1); - if (order <= slub_max_order) + order = slab_order(size, 1, max_order, 1); + if (order <= max_order) return order; /* @@ -2068,10 +2062,24 @@ static int calculate_sizes(struct kmem_c size = ALIGN(size, align); s->size = size; - s->order = calculate_order(size); + if (s->flags & SLAB_VFALLBACK) + s->order = calculate_order(size, 100, 18 - PAGE_SHIFT); + else + s->order = calculate_order(size, slub_min_objects, + slub_max_order); + if (s->order < 0) return 0; + if (s->order) + s->gfpflags |= __GFP_COMP; + + if (s->flags & SLAB_VFALLBACK) + s->gfpflags |= __GFP_VFALLBACK; + + if (s->flags & SLAB_CACHE_DMA) + s->flags |= SLUB_DMA; + /* * Determine the number of objects per slab */ @@ -3057,7 +3065,7 @@ static int add_location(struct loc_track cpu_set(track->cpu, l->cpus); } - node_set(page_to_nid(virt_to_page(track)), l->nodes); + node_set(page_to_nid(virt_to_head_page(track)), l->nodes); return 1; } @@ -3088,7 +3096,7 @@ static int add_location(struct loc_track cpus_clear(l->cpus); cpu_set(track->cpu, l->cpus); nodes_clear(l->nodes); - node_set(page_to_nid(virt_to_page(track)), l->nodes); + node_set(page_to_nid(virt_to_head_page(track)), l->nodes); return 1; } Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2007-10-03 18:30:45.000000000 -0700 +++ linux-2.6/include/linux/slub_def.h 2007-10-03 18:47:33.000000000 -0700 @@ -31,6 +31,7 @@ struct kmem_cache { int objsize; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ int order; + int gfpflags; /* Allocation flags */ /* * Avoid an extra cache line for UP, SMP and for the node local to