summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab_common.c48
-rw-r--r--mm/slub.c18
2 files changed, 48 insertions, 18 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0dfa3cfb6be5..2180d0612353 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -392,6 +392,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
}
EXPORT_SYMBOL(kmem_cache_create);
+#ifdef SLAB_SUPPORTS_SYSFS
+/*
+ * For a given kmem_cache, kmem_cache_destroy() should only be called
+ * once or there will be a use-after-free problem. The actual deletion
+ * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
+ * protection. So they are now done without holding those locks.
+ *
+ * Note that there will be a slight delay in the deletion of sysfs files
+ * if kmem_cache_release() is called indrectly from a work function.
+ */
+static void kmem_cache_release(struct kmem_cache *s)
+{
+ sysfs_slab_unlink(s);
+ sysfs_slab_release(s);
+}
+#else
+static void kmem_cache_release(struct kmem_cache *s)
+{
+ slab_kmem_cache_release(s);
+}
+#endif
+
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{
LIST_HEAD(to_destroy);
@@ -418,11 +440,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
list_for_each_entry_safe(s, s2, &to_destroy, list) {
debugfs_slab_release(s);
kfence_shutdown_cache(s);
-#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_release(s);
-#else
- slab_kmem_cache_release(s);
-#endif
+ kmem_cache_release(s);
}
}
@@ -437,20 +455,11 @@ static int shutdown_cache(struct kmem_cache *s)
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
-#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_unlink(s);
-#endif
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
kfence_shutdown_cache(s);
debugfs_slab_release(s);
-#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_unlink(s);
- sysfs_slab_release(s);
-#else
- slab_kmem_cache_release(s);
-#endif
}
return 0;
@@ -465,14 +474,19 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
+ int refcnt;
+ bool rcu_set;
+
if (unlikely(!s) || !kasan_check_byte(s))
return;
cpus_read_lock();
mutex_lock(&slab_mutex);
- s->refcount--;
- if (s->refcount)
+ rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
+
+ refcnt = --s->refcount;
+ if (refcnt)
goto out_unlock;
WARN(shutdown_cache(s),
@@ -481,6 +495,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
+ if (!refcnt && !rcu_set)
+ kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
diff --git a/mm/slub.c b/mm/slub.c
index dc59b9e8c66f..a48ac10a1153 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
*/
static nodemask_t slab_nodes;
+/*
+ * Workqueue used for flush_cpu_slab().
+ */
+static struct workqueue_struct *flushwq;
+
/********************************************************************
* Core slab cache functions
*******************************************************************/
@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s)
INIT_WORK(&sfw->work, flush_cpu_slab);
sfw->skip = false;
sfw->s = s;
- schedule_work_on(cpu, &sfw->work);
+ queue_work_on(cpu, flushwq, &sfw->work);
}
for_each_online_cpu(cpu) {
@@ -4858,6 +4863,8 @@ void __init kmem_cache_init(void)
void __init kmem_cache_init_late(void)
{
+ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
+ WARN_ON(!flushwq);
}
struct kmem_cache *
@@ -4926,6 +4933,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_track_caller);
@@ -4957,6 +4966,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
@@ -5885,7 +5896,8 @@ static char *create_unique_id(struct kmem_cache *s)
char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
char *p = name;
- BUG_ON(!name);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
*p++ = ':';
/*
@@ -5943,6 +5955,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
* for the symlinks.
*/
name = create_unique_id(s);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
}
s->kobj.kset = kset;