summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c40
1 files changed, 16 insertions, 24 deletions
diff --git a/mm/slub.c b/mm/slub.c
index f614b5dc396b..7cb4bf9ae320 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -459,8 +459,10 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
/*
* Debug settings:
*/
-#ifdef CONFIG_SLUB_DEBUG_ON
+#if defined(CONFIG_SLUB_DEBUG_ON)
static int slub_debug = DEBUG_DEFAULT_FLAGS;
+#elif defined(CONFIG_KASAN)
+static int slub_debug = SLAB_STORE_USER;
#else
static int slub_debug;
#endif
@@ -1263,7 +1265,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
{
flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
- might_sleep_if(flags & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(flags));
if (should_failslab(s->object_size, flags, s->flags))
return NULL;
@@ -1328,16 +1330,15 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
flags |= __GFP_NOTRACK;
- if (memcg_charge_slab(s, flags, order))
- return NULL;
-
if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
else
page = __alloc_pages_node(node, flags, order);
- if (!page)
- memcg_uncharge_slab(s, order);
+ if (page && memcg_charge_slab(page, flags, order, s)) {
+ __free_pages(page, order);
+ page = NULL;
+ }
return page;
}
@@ -1352,7 +1353,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
flags &= gfp_allowed_mask;
- if (flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(flags))
local_irq_enable();
flags |= s->allocflags;
@@ -1362,8 +1363,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* so we fall-back to the minimum order allocation.
*/
alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
- if ((alloc_gfp & __GFP_WAIT) && oo_order(oo) > oo_order(s->min))
- alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_WAIT;
+ if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
+ alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_DIRECT_RECLAIM;
page = alloc_slab_page(s, alloc_gfp, node, oo);
if (unlikely(!page)) {
@@ -1423,7 +1424,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->frozen = 1;
out:
- if (flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(flags))
local_irq_disable();
if (!page)
return NULL;
@@ -1476,8 +1477,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
page_mapcount_reset(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- __free_pages(page, order);
- memcg_uncharge_slab(s, order);
+ __free_kmem_pages(page, order);
}
#define need_reserve_slab_rcu \
@@ -1507,10 +1507,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
VM_BUG_ON(s->reserved != sizeof(*head));
head = page_address(page) + offset;
} else {
- /*
- * RCU free overloads the RCU head over the LRU
- */
- head = (void *)&page->lru;
+ head = &page->rcu_head;
}
call_rcu(head, rcu_free_slab);
@@ -2912,20 +2909,15 @@ static inline int slab_order(int size, int min_objects,
if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
- for (order = max(min_order,
- fls(min_objects * size - 1) - PAGE_SHIFT);
+ for (order = max(min_order, get_order(min_objects * size + reserved));
order <= max_order; order++) {
unsigned long slab_size = PAGE_SIZE << order;
- if (slab_size < min_objects * size + reserved)
- continue;
-
rem = (slab_size - reserved) % size;
if (rem <= slab_size / fract_leftover)
break;
-
}
return order;
@@ -2943,7 +2935,7 @@ static inline int calculate_order(int size, int reserved)
* works by first attempting to generate a layout with
* the best configuration and backing off gradually.
*
- * First we reduce the acceptable waste in a slab. Then
+ * First we increase the acceptable waste in a slab. Then
* we reduce the minimum objects required in a slab.
*/
min_objects = slub_min_objects;