diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 12:26:09 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 20:43:01 +0400 |
commit | 42a9fdbb12ac6c027b4b91ab9b5a60aa3a834489 (patch) | |
tree | d319573849af86fa405b93e5b93add0d34305db7 | |
parent | 4c93c355d5d563f300df7e61ef753d7a064411e9 (diff) | |
download | linux-42a9fdbb12ac6c027b4b91ab9b5a60aa3a834489.tar.xz |
SLUB: Optimize cacheline use for zeroing
We touch a cacheline in the kmem_cache structure for zeroing to get the
size. However, the hot paths in slab_alloc and slab_free do not reference
any other fields in kmem_cache, so we may have to just bring in the
cacheline for this one access.
Add a new field to kmem_cache_cpu that contains the object size. That
cacheline must already be used in the hotpaths. So we save one cacheline
on every slab_alloc if we zero.
We need to update the kmem_cache_cpu object size if an aliasing operation
changes the objsize of an non debug slab.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slub_def.h | 1 | ||||
-rw-r--r-- | mm/slub.c | 14 |
2 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f74716b59ce2..d65159d1d4f5 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -16,6 +16,7 @@ struct kmem_cache_cpu { struct page *page; int node; unsigned int offset; + unsigned int objsize; }; struct kmem_cache_node { diff --git a/mm/slub.c b/mm/slub.c index 6d4346ba0c29..1d48f383e97d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1576,7 +1576,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, local_irq_restore(flags); if (unlikely((gfpflags & __GFP_ZERO) && object)) - memset(object, 0, s->objsize); + memset(object, 0, c->objsize); return object; } @@ -1858,8 +1858,9 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, { c->page = NULL; c->freelist = NULL; - c->offset = s->offset / sizeof(void *); c->node = 0; + c->offset = s->offset / sizeof(void *); + c->objsize = s->objsize; } static void init_kmem_cache_node(struct kmem_cache_node *n) @@ -2852,12 +2853,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, down_write(&slub_lock); s = find_mergeable(size, align, flags, name, ctor); if (s) { + int cpu; + s->refcount++; /* * Adjust the object sizes so that we clear * the complete object on kzalloc. */ s->objsize = max(s->objsize, (int)size); + + /* + * And then we need to update the object size in the + * per cpu structures + */ + for_each_online_cpu(cpu) + get_cpu_slab(s, cpu)->objsize = s->objsize; s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); up_write(&slub_lock); if (sysfs_slab_alias(s, name)) |