summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2013-09-11 04:02:51 +0400
committerPekka Enberg <penberg@kernel.org>2014-02-08 14:19:02 +0400
commit5087c8229986cc502c807a15f8ea416b0ef22346 (patch)
treead1380499f4d5b8e732fef94ebc8fa248f374a38 /mm/slab.c
parent8fc9cf420b369ad1d8c2e66fb552a985c4676073 (diff)
downloadlinux-5087c8229986cc502c807a15f8ea416b0ef22346.tar.xz
slab: Make allocations with GFP_ZERO slightly more efficient
Use the likely mechanism already around valid pointer tests to better choose when to memset to 0 allocations with __GFP_ZERO Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 54eba8a65370..8347d803a23f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3278,11 +3278,11 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
flags);
- if (likely(ptr))
+ if (likely(ptr)) {
kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
-
- if (unlikely((flags & __GFP_ZERO) && ptr))
- memset(ptr, 0, cachep->object_size);
+ if (unlikely(flags & __GFP_ZERO))
+ memset(ptr, 0, cachep->object_size);
+ }
return ptr;
}
@@ -3343,11 +3343,11 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
flags);
prefetchw(objp);
- if (likely(objp))
+ if (likely(objp)) {
kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
-
- if (unlikely((flags & __GFP_ZERO) && objp))
- memset(objp, 0, cachep->object_size);
+ if (unlikely(flags & __GFP_ZERO))
+ memset(objp, 0, cachep->object_size);
+ }
return objp;
}