Index: linux-2.6.19-mm1/mm/slub.c =================================================================== --- linux-2.6.19-mm1.orig/mm/slub.c 2006-12-14 22:10:02.649151293 -0800 +++ linux-2.6.19-mm1/mm/slub.c 2006-12-14 22:10:06.142100887 -0800 @@ -213,6 +213,7 @@ static void free_slab(struct kmem_cache __free_slab(s, page); } +static int lock_count = 0; /* * Locking for each individual slab using the pagelock */ @@ -221,6 +222,7 @@ static __always_inline void slab_lock(st #ifdef CONFIG_SMP bit_spin_lock(PG_locked, &page->flags); #endif + lock_count++; } static __always_inline void slab_unlock(struct page *page) @@ -228,6 +230,7 @@ static __always_inline void slab_unlock( #ifdef CONFIG_SMP bit_spin_unlock(PG_locked, &page->flags); #endif + lock_count--; } /* @@ -235,6 +238,10 @@ static __always_inline void slab_unlock( */ static void __always_inline add_partial(struct kmem_cache *s, struct page *page) { + if (page->inuse == s->objects) { + printk("Slab %s page=%p adding fully used slab\n", s->name, page); + dump_stack(); + } spin_lock(&s->list_lock); s->nr_partial++; list_add_tail(&page->lru, &s->partial); @@ -261,6 +268,7 @@ static __always_inline int lock_and_del_ if (bit_spin_trylock(PG_locked, &page->flags)) { list_del(&page->lru); s->nr_partial--; + lock_count++; return 1; } return 0; @@ -410,6 +418,8 @@ void check_free_chain(struct kmem_cache */ static void discard_slab(struct kmem_cache *s, struct page *page) { + printk(KERN_CRIT "discard_slab(%s, %p)\n", s->name, page); + atomic_long_dec(&s->nr_slabs); page->mapping = NULL; @@ -469,6 +479,7 @@ static struct page *new_slab(struct kmem */ static void __always_inline putback_slab(struct kmem_cache *s, struct page *page) { + printk(KERN_CRIT "putback_slab(%s,%p) inuse=%d objects=%d\n",s->name, page,page->inuse, s->objects); if (page->inuse) { if (page->inuse < s->objects) add_partial(s, page); @@ -550,6 +561,8 @@ void check_flush_active(struct work_stru { struct active_slab *a = container_of(w, struct active_slab, flush.work); + printk("check_flush_active: a=%p a->page=%p a->referenced=%d a->flush_active=%d\n", + a, a->page, a->referenced, a->flush_active); if (!a->page) return; @@ -592,6 +605,7 @@ static __always_inline void *allocate(st printk(KERN_CRIT "allocate(%s,%x,%d)\n", s->name, gfpflags, node); local_irq_save(flags); + BUG_ON(lock_count); a = ACTIVE_SLAB(s, smp_processor_id()); if (unlikely(!a->page)) goto new_slab; @@ -677,7 +691,16 @@ get_object: } #endif out: + check_free_chain(s, a->page); + if (lock_count) { + printk(KERN_CRIT "lock_count=%d\n", lock_count); + BUG(); + }; local_irq_restore(flags); +// printk(KERN_CRIT "return %p active freelist=%p nr_free=%d page " +// "inuse=%d freelist=%p\n", object, a->freelist, a->nr_free, +// a->page ? a->page->inuse : -1, +// a->page ? a->page->freelist : (void *)-1L); return object; } @@ -727,6 +750,7 @@ void kmem_cache_free(struct kmem_cache * unsigned long flags; struct active_slab *a; + BUG_ON(lock_count); if (!object) return; @@ -966,6 +990,9 @@ int kmem_cache_open(struct kmem_cache *s printk("kmem_cache_open(%p, %s, %ld, %ld, %lx, %p, %p)\n", s, name, (long)size, (long)align, flags, ctor, dtor); + printk("kmem_cache_open(%p, %s, %ld, %ld, %lx, %p, %p)\n", + s, name, (long)size, (long)align, flags, ctor, dtor); + BUG_ON(flags & SLUB_UNIMPLEMENTED); memset(s, 0, sizeof(struct kmem_cache)); atomic_long_set(&s->nr_slabs, 0);