summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/slub.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2a952751bb50..23f9d8d26422 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2818,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void *object = c->freelist;
if (unlikely(!object)) {
- local_irq_enable();
/*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
*/
- p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
+ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
_RET_IP_, c);
- if (unlikely(!p[i])) {
- __kmem_cache_free_bulk(s, i, p);
- return false;
- }
- local_irq_disable();
+ if (unlikely(!p[i]))
+ goto error;
+
c = this_cpu_ptr(s->cpu_slab);
continue; /* goto for-loop */
}
/* kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
- if (unlikely(!s)) {
- __kmem_cache_free_bulk(s, i, p);
- c->tid = next_tid(c->tid);
- local_irq_enable();
- return false;
- }
+ if (unlikely(!s))
+ goto error;
c->freelist = get_freepointer(s, object);
p[i] = object;
@@ -2861,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
return true;
+
+error:
+ __kmem_cache_free_bulk(s, i, p);
+ local_irq_enable();
+ return false;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);