summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c40
1 files changed, 14 insertions, 26 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index adbace4256ef..7c8298c17145 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -197,7 +197,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
size = ALIGN(size, sizeof(void *));
align = calculate_alignment(flags, align, size);
size = ALIGN(size, align);
- flags = kmem_cache_flags(size, flags, name, NULL);
+ flags = kmem_cache_flags(size, flags, name);
if (flags & SLAB_NEVER_MERGE)
return NULL;
@@ -309,9 +309,6 @@ kmem_cache_create_usercopy(const char *name,
const char *cache_name;
int err;
- get_online_cpus();
- get_online_mems();
-
mutex_lock(&slab_mutex);
err = kmem_cache_sanity_check(name, size);
@@ -360,9 +357,6 @@ kmem_cache_create_usercopy(const char *name,
out_unlock:
mutex_unlock(&slab_mutex);
- put_online_mems();
- put_online_cpus();
-
if (err) {
if (flags & SLAB_PANIC)
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
@@ -486,9 +480,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s))
return;
- get_online_cpus();
- get_online_mems();
-
mutex_lock(&slab_mutex);
s->refcount--;
@@ -503,9 +494,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
}
out_unlock:
mutex_unlock(&slab_mutex);
-
- put_online_mems();
- put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -522,12 +510,10 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
{
int ret;
- get_online_cpus();
- get_online_mems();
+
kasan_cache_shrink(cachep);
ret = __kmem_cache_shrink(cachep);
- put_online_mems();
- put_online_cpus();
+
return ret;
}
EXPORT_SYMBOL(kmem_cache_shrink);
@@ -912,8 +898,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_pages(flags, order);
if (likely(page)) {
ret = page_address(page);
- mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
- PAGE_SIZE << order);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+ PAGE_SIZE << order);
}
ret = kasan_kmalloc_large(ret, size, flags);
/* As ret might get tagged, call kmemleak hook after KASAN. */
@@ -1232,19 +1218,21 @@ size_t ksize(const void *objp)
size_t size;
/*
- * We need to check that the pointed to object is valid, and only then
- * unpoison the shadow memory below. We use __kasan_check_read(), to
- * generate a more useful report at the time ksize() is called (rather
- * than later where behaviour is undefined due to potential
- * use-after-free or double-free).
+ * We need to first check that the pointer to the object is valid, and
+ * only then unpoison the memory. The report printed from ksize() is
+ * more useful, then when it's printed later when the behaviour could
+ * be undefined due to a potential use-after-free or double-free.
+ *
+ * We use kasan_check_byte(), which is supported for the hardware
+ * tag-based KASAN mode, unlike kasan_check_read/write().
*
- * If the pointed to memory is invalid we return 0, to avoid users of
+ * If the pointed to memory is invalid, we return 0 to avoid users of
* ksize() writing to and potentially corrupting the memory region.
*
* We want to perform the check before __ksize(), to avoid potentially
* crashing in __ksize() due to accessing invalid metadata.
*/
- if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
+ if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
return 0;
size = __ksize(objp);