summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c225
1 files changed, 111 insertions, 114 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2b1ce697fc4b..fdf0fe4da9a9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -403,7 +403,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
stat(s, CMPXCHG_DOUBLE_FAIL);
#ifdef SLUB_DEBUG_CMPXCHG
- printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
+ pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif
return 0;
@@ -444,7 +444,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
stat(s, CMPXCHG_DOUBLE_FAIL);
#ifdef SLUB_DEBUG_CMPXCHG
- printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
+ pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif
return 0;
@@ -546,14 +546,14 @@ static void print_track(const char *s, struct track *t)
if (!t->addr)
return;
- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
- s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+ pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
+ s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
#ifdef CONFIG_STACKTRACE
{
int i;
for (i = 0; i < TRACK_ADDRS_COUNT; i++)
if (t->addrs[i])
- printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
+ pr_err("\t%pS\n", (void *)t->addrs[i]);
else
break;
}
@@ -571,38 +571,37 @@ static void print_tracking(struct kmem_cache *s, void *object)
static void print_page_info(struct page *page)
{
- printk(KERN_ERR
- "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+ pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
page, page->objects, page->inuse, page->freelist, page->flags);
}
static void slab_bug(struct kmem_cache *s, char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
- char buf[100];
va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- printk(KERN_ERR "========================================"
- "=====================================\n");
- printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
- printk(KERN_ERR "----------------------------------------"
- "-------------------------------------\n\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_err("=============================================================================\n");
+ pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
+ pr_err("-----------------------------------------------------------------------------\n\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+ va_end(args);
}
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
- char buf[100];
va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_err("FIX %s: %pV\n", s->name, &vaf);
va_end(args);
- printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
}
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
@@ -614,8 +613,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
print_page_info(page);
- printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
- p, p - addr, get_freepointer(s, p));
+ pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
+ p, p - addr, get_freepointer(s, p));
if (p > addr + 16)
print_section("Bytes b4 ", p - 16, 16);
@@ -698,7 +697,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
end--;
slab_bug(s, "%s overwritten", what);
- printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
+ pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
fault, end - 1, fault[0], value);
print_trailer(s, page, object);
@@ -931,7 +930,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
int alloc)
{
if (s->flags & SLAB_TRACE) {
- printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
+ pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
s->name,
alloc ? "alloc" : "free",
object, page->inuse,
@@ -1134,9 +1133,8 @@ static noinline struct kmem_cache_node *free_debug_processing(
slab_err(s, page, "Attempt to free object(0x%p) "
"outside of slab", object);
} else if (!page->slab_cache) {
- printk(KERN_ERR
- "SLUB <none>: no slab for object 0x%p.\n",
- object);
+ pr_err("SLUB <none>: no slab for object 0x%p.\n",
+ object);
dump_stack();
} else
object_err(s, page, object,
@@ -1219,8 +1217,8 @@ static int __init setup_slub_debug(char *str)
slub_debug |= SLAB_FAILSLAB;
break;
default:
- printk(KERN_ERR "slub_debug option '%c' "
- "unknown. skipped\n", *str);
+ pr_err("slub_debug option '%c' unknown. skipped\n",
+ *str);
}
}
@@ -1314,17 +1312,26 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
/*
* Slab allocation and freeing
*/
-static inline struct page *alloc_slab_page(gfp_t flags, int node,
- struct kmem_cache_order_objects oo)
+static inline struct page *alloc_slab_page(struct kmem_cache *s,
+ gfp_t flags, int node, struct kmem_cache_order_objects oo)
{
+ struct page *page;
int order = oo_order(oo);
flags |= __GFP_NOTRACK;
+ if (memcg_charge_slab(s, flags, order))
+ return NULL;
+
if (node == NUMA_NO_NODE)
- return alloc_pages(flags, order);
+ page = alloc_pages(flags, order);
else
- return alloc_pages_exact_node(node, flags, order);
+ page = alloc_pages_exact_node(node, flags, order);
+
+ if (!page)
+ memcg_uncharge_slab(s, order);
+
+ return page;
}
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1346,7 +1353,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
*/
alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
- page = alloc_slab_page(alloc_gfp, node, oo);
+ page = alloc_slab_page(s, alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
alloc_gfp = flags;
@@ -1354,7 +1361,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
- page = alloc_slab_page(alloc_gfp, node, oo);
+ page = alloc_slab_page(s, alloc_gfp, node, oo);
if (page)
stat(s, ORDER_FALLBACK);
@@ -1415,7 +1422,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
order = compound_order(page);
inc_slabs_node(s, page_to_nid(page), page->objects);
- memcg_bind_pages(s, order);
page->slab_cache = s;
__SetPageSlab(page);
if (page->pfmemalloc)
@@ -1466,11 +1472,11 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
- memcg_release_pages(s, order);
page_mapcount_reset(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- __free_memcg_kmem_pages(page, order);
+ __free_pages(page, order);
+ memcg_uncharge_slab(s, order);
}
#define need_reserve_slab_rcu \
@@ -1770,19 +1776,19 @@ static inline void note_cmpxchg_failure(const char *n,
#ifdef SLUB_DEBUG_CMPXCHG
unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
- printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
+ pr_info("%s %s: cmpxchg redo ", n, s->name);
#ifdef CONFIG_PREEMPT
if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
- printk("due to cpu change %d -> %d\n",
+ pr_warn("due to cpu change %d -> %d\n",
tid_to_cpu(tid), tid_to_cpu(actual_tid));
else
#endif
if (tid_to_event(tid) != tid_to_event(actual_tid))
- printk("due to cpu running other code. Event %ld->%ld\n",
+ pr_warn("due to cpu running other code. Event %ld->%ld\n",
tid_to_event(tid), tid_to_event(actual_tid));
else
- printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
+ pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
actual_tid, tid, next_tid(tid));
#endif
stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
@@ -2121,11 +2127,19 @@ static inline int node_match(struct page *page, int node)
return 1;
}
+#ifdef CONFIG_SLUB_DEBUG
static int count_free(struct page *page)
{
return page->objects - page->inuse;
}
+static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
+{
+ return atomic_long_read(&n->total_objects);
+}
+#endif /* CONFIG_SLUB_DEBUG */
+
+#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
static unsigned long count_partial(struct kmem_cache_node *n,
int (*get_count)(struct page *))
{
@@ -2139,31 +2153,28 @@ static unsigned long count_partial(struct kmem_cache_node *n,
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
-
-static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
-{
-#ifdef CONFIG_SLUB_DEBUG
- return atomic_long_read(&n->total_objects);
-#else
- return 0;
-#endif
-}
+#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
static noinline void
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
{
+#ifdef CONFIG_SLUB_DEBUG
+ static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
int node;
- printk(KERN_WARNING
- "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
+ if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
+ return;
+
+ pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
nid, gfpflags);
- printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
- "default order: %d, min order: %d\n", s->name, s->object_size,
- s->size, oo_order(s->oo), oo_order(s->min));
+ pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
+ s->name, s->object_size, s->size, oo_order(s->oo),
+ oo_order(s->min));
if (oo_order(s->min) > get_order(s->object_size))
- printk(KERN_WARNING " %s debugging increased min order, use "
- "slub_debug=O to disable.\n", s->name);
+ pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
+ s->name);
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
@@ -2178,10 +2189,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
nr_slabs = node_nr_slabs(n);
nr_objs = node_nr_objs(n);
- printk(KERN_WARNING
- " node %d: slabs: %ld, objs: %ld, free: %ld\n",
+ pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
node, nr_slabs, nr_objs, nr_free);
}
+#endif
}
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
@@ -2198,7 +2209,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
page = new_slab(s, flags, node);
if (page) {
- c = __this_cpu_ptr(s->cpu_slab);
+ c = raw_cpu_ptr(s->cpu_slab);
if (c->page)
flush_slab(s, c);
@@ -2323,8 +2334,6 @@ redo:
if (freelist)
goto load_freelist;
- stat(s, ALLOC_SLOWPATH);
-
freelist = get_freelist(s, page);
if (!freelist) {
@@ -2360,9 +2369,7 @@ new_slab:
freelist = new_slab_objects(s, gfpflags, node, &c);
if (unlikely(!freelist)) {
- if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
- slab_out_of_memory(s, gfpflags, node);
-
+ slab_out_of_memory(s, gfpflags, node);
local_irq_restore(flags);
return NULL;
}
@@ -2418,7 +2425,7 @@ redo:
* and the retrieval of the tid.
*/
preempt_disable();
- c = __this_cpu_ptr(s->cpu_slab);
+ c = this_cpu_ptr(s->cpu_slab);
/*
* The transaction ids are globally unique per cpu and per operation on
@@ -2431,10 +2438,10 @@ redo:
object = c->freelist;
page = c->page;
- if (unlikely(!object || !node_match(page, node)))
+ if (unlikely(!object || !node_match(page, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c);
-
- else {
+ stat(s, ALLOC_SLOWPATH);
+ } else {
void *next_object = get_freepointer_safe(s, object);
/*
@@ -2674,7 +2681,7 @@ redo:
* during the cmpxchg then the free will succedd.
*/
preempt_disable();
- c = __this_cpu_ptr(s->cpu_slab);
+ c = this_cpu_ptr(s->cpu_slab);
tid = c->tid;
preempt_enable();
@@ -2894,10 +2901,8 @@ static void early_kmem_cache_node_alloc(int node)
BUG_ON(!page);
if (page_to_nid(page) != node) {
- printk(KERN_ERR "SLUB: Unable to allocate memory from "
- "node %d\n", node);
- printk(KERN_ERR "SLUB: Allocating a useless per node structure "
- "in order to be able to continue\n");
+ pr_err("SLUB: Unable to allocate memory from node %d\n", node);
+ pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
}
n = page->freelist;
@@ -3182,8 +3187,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
for_each_object(p, s, addr, page->objects) {
if (!test_bit(slab_index(p, s, addr), map)) {
- printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
- p, p - addr);
+ pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
print_tracking(s, p);
}
}
@@ -3305,8 +3309,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
struct page *page;
void *ptr = NULL;
- flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
- page = alloc_pages_node(node, flags, get_order(size));
+ flags |= __GFP_COMP | __GFP_NOTRACK;
+ page = alloc_kmem_pages_node(node, flags, get_order(size));
if (page)
ptr = page_address(page);
@@ -3375,7 +3379,7 @@ void kfree(const void *x)
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
kfree_hook(x);
- __free_memcg_kmem_pages(page, compound_order(page));
+ __free_kmem_pages(page, compound_order(page));
return;
}
slab_free(page->slab_cache, page, object, _RET_IP_);
@@ -3392,7 +3396,7 @@ EXPORT_SYMBOL(kfree);
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
-int kmem_cache_shrink(struct kmem_cache *s)
+int __kmem_cache_shrink(struct kmem_cache *s)
{
int node;
int i;
@@ -3448,7 +3452,6 @@ int kmem_cache_shrink(struct kmem_cache *s)
kfree(slabs_by_inuse);
return 0;
}
-EXPORT_SYMBOL(kmem_cache_shrink);
static int slab_mem_going_offline_callback(void *arg)
{
@@ -3456,7 +3459,7 @@ static int slab_mem_going_offline_callback(void *arg)
mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list)
- kmem_cache_shrink(s);
+ __kmem_cache_shrink(s);
mutex_unlock(&slab_mutex);
return 0;
@@ -3650,9 +3653,7 @@ void __init kmem_cache_init(void)
register_cpu_notifier(&slab_notifier);
#endif
- printk(KERN_INFO
- "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d,"
- " CPUs=%d, Nodes=%d\n",
+ pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
@@ -3934,8 +3935,8 @@ static int validate_slab_node(struct kmem_cache *s,
count++;
}
if (count != n->nr_partial)
- printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
- "counter=%ld\n", s->name, count, n->nr_partial);
+ pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
+ s->name, count, n->nr_partial);
if (!(s->flags & SLAB_STORE_USER))
goto out;
@@ -3945,9 +3946,8 @@ static int validate_slab_node(struct kmem_cache *s,
count++;
}
if (count != atomic_long_read(&n->nr_slabs))
- printk(KERN_ERR "SLUB: %s %ld slabs counted but "
- "counter=%ld\n", s->name, count,
- atomic_long_read(&n->nr_slabs));
+ pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
+ s->name, count, atomic_long_read(&n->nr_slabs));
out:
spin_unlock_irqrestore(&n->list_lock, flags);
@@ -4211,53 +4211,50 @@ static void resiliency_test(void)
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
- printk(KERN_ERR "SLUB resiliency testing\n");
- printk(KERN_ERR "-----------------------\n");
- printk(KERN_ERR "A. Corruption after allocation\n");
+ pr_err("SLUB resiliency testing\n");
+ pr_err("-----------------------\n");
+ pr_err("A. Corruption after allocation\n");
p = kzalloc(16, GFP_KERNEL);
p[16] = 0x12;
- printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
- " 0x12->0x%p\n\n", p + 16);
+ pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
+ p + 16);
validate_slab_cache(kmalloc_caches[4]);
/* Hmmm... The next two are dangerous */
p = kzalloc(32, GFP_KERNEL);
p[32 + sizeof(void *)] = 0x34;
- printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
- " 0x34 -> -0x%p\n", p);
- printk(KERN_ERR
- "If allocated object is overwritten then not detectable\n\n");
+ pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
+ p);
+ pr_err("If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches[5]);
p = kzalloc(64, GFP_KERNEL);
p += 64 + (get_cycles() & 0xff) * sizeof(void *);
*p = 0x56;
- printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
- p);
- printk(KERN_ERR
- "If allocated object is overwritten then not detectable\n\n");
+ pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
+ p);
+ pr_err("If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches[6]);
- printk(KERN_ERR "\nB. Corruption after free\n");
+ pr_err("\nB. Corruption after free\n");
p = kzalloc(128, GFP_KERNEL);
kfree(p);
*p = 0x78;
- printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
+ pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches[7]);
p = kzalloc(256, GFP_KERNEL);
kfree(p);
p[50] = 0x9a;
- printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
- p);
+ pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches[8]);
p = kzalloc(512, GFP_KERNEL);
kfree(p);
p[512] = 0xab;
- printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
+ pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches[9]);
}
#else
@@ -4332,7 +4329,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
}
}
- lock_memory_hotplug();
+ get_online_mems();
#ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) {
for_each_node_state(node, N_NORMAL_MEMORY) {
@@ -4372,7 +4369,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);
#endif
- unlock_memory_hotplug();
+ put_online_mems();
kfree(nodes);
return x + sprintf(buf + x, "\n");
}
@@ -5303,7 +5300,7 @@ static int __init slab_sysfs_init(void)
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) {
mutex_unlock(&slab_mutex);
- printk(KERN_ERR "Cannot register slab subsystem.\n");
+ pr_err("Cannot register slab subsystem.\n");
return -ENOSYS;
}
@@ -5312,8 +5309,8 @@ static int __init slab_sysfs_init(void)
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
if (err)
- printk(KERN_ERR "SLUB: Unable to add boot slab %s"
- " to sysfs\n", s->name);
+ pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
+ s->name);
}
while (alias_list) {
@@ -5322,8 +5319,8 @@ static int __init slab_sysfs_init(void)
alias_list = alias_list->next;
err = sysfs_slab_alias(al->s, al->name);
if (err)
- printk(KERN_ERR "SLUB: Unable to add boot slab alias"
- " %s to sysfs\n", al->name);
+ pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
+ al->name);
kfree(al);
}