summaryrefslogtreecommitdiff
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c121
1 files changed, 72 insertions, 49 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 61261195f5b6..6946f8e204e3 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/kmemleak.h>
+#include <linux/cpu.h>
#define ODEBUG_HASH_BITS 14
#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
@@ -90,7 +91,7 @@ static int debug_objects_pool_size __read_mostly
= ODEBUG_POOL_SIZE;
static int debug_objects_pool_min_level __read_mostly
= ODEBUG_POOL_MIN_LEVEL;
-static struct debug_obj_descr *descr_test __read_mostly;
+static const struct debug_obj_descr *descr_test __read_mostly;
static struct kmem_cache *obj_cache __read_mostly;
/*
@@ -132,14 +133,18 @@ static void fill_pool(void)
struct debug_obj *obj;
unsigned long flags;
- if (likely(obj_pool_free >= debug_objects_pool_min_level))
+ if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
return;
/*
* Reuse objs from the global free list; they will be reinitialized
* when allocating.
+ *
+ * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
+ * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
+ * sections.
*/
- while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
raw_spin_lock_irqsave(&pool_lock, flags);
/*
* Recheck with the lock held as the worker thread might have
@@ -148,9 +153,9 @@ static void fill_pool(void)
while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
- obj_nr_tofree--;
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
hlist_add_head(&obj->node, &obj_pool);
- obj_pool_free++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
@@ -158,7 +163,7 @@ static void fill_pool(void)
if (unlikely(!obj_cache))
return;
- while (obj_pool_free < debug_objects_pool_min_level) {
+ while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
struct debug_obj *new[ODEBUG_BATCH_SIZE];
int cnt;
@@ -174,7 +179,7 @@ static void fill_pool(void)
while (cnt) {
hlist_add_head(&new[--cnt]->node, &obj_pool);
debug_objects_allocated++;
- obj_pool_free++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
@@ -219,7 +224,7 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
* Must be called with interrupts disabled.
*/
static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
{
struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
struct debug_obj *obj;
@@ -236,7 +241,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
obj = __alloc_object(&obj_pool);
if (obj) {
obj_pool_used++;
- obj_pool_free--;
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
/*
* Looking ahead, allocate one batch of debug objects and
@@ -255,7 +260,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
&percpu_pool->free_objs);
percpu_pool->obj_free++;
obj_pool_used++;
- obj_pool_free--;
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
}
}
@@ -309,8 +314,8 @@ static void free_obj_work(struct work_struct *work)
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
hlist_add_head(&obj->node, &obj_pool);
- obj_pool_free++;
- obj_nr_tofree--;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
return;
@@ -324,7 +329,7 @@ free_objs:
if (obj_nr_tofree) {
hlist_move_list(&obj_to_free, &tofree);
debug_objects_freed += obj_nr_tofree;
- obj_nr_tofree = 0;
+ WRITE_ONCE(obj_nr_tofree, 0);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
@@ -375,10 +380,10 @@ free_to_obj_pool:
obj_pool_used--;
if (work) {
- obj_nr_tofree++;
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
hlist_add_head(&obj->node, &obj_to_free);
if (lookahead_count) {
- obj_nr_tofree += lookahead_count;
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
obj_pool_used -= lookahead_count;
while (lookahead_count) {
hlist_add_head(&objs[--lookahead_count]->node,
@@ -396,15 +401,15 @@ free_to_obj_pool:
for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
obj = __alloc_object(&obj_pool);
hlist_add_head(&obj->node, &obj_to_free);
- obj_pool_free--;
- obj_nr_tofree++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
}
}
} else {
- obj_pool_free++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
hlist_add_head(&obj->node, &obj_pool);
if (lookahead_count) {
- obj_pool_free += lookahead_count;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
obj_pool_used -= lookahead_count;
while (lookahead_count) {
hlist_add_head(&objs[--lookahead_count]->node,
@@ -423,12 +428,31 @@ free_to_obj_pool:
static void free_object(struct debug_obj *obj)
{
__free_object(obj);
- if (!obj_freeing && obj_nr_tofree) {
+ if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
WRITE_ONCE(obj_freeing, true);
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
}
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int object_cpu_offline(unsigned int cpu)
+{
+ struct debug_percpu_free *percpu_pool;
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
+
+ /* Remote access is safe as the CPU is dead already */
+ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
+ hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
+ percpu_pool->obj_free = 0;
+
+ return 0;
+}
+#endif
+
/*
* We run out of memory. That means we probably have tons of objects
* allocated.
@@ -471,7 +495,7 @@ static struct debug_bucket *get_bucket(unsigned long addr)
static void debug_print_object(struct debug_obj *obj, char *msg)
{
- struct debug_obj_descr *descr = obj->descr;
+ const struct debug_obj_descr *descr = obj->descr;
static int limit;
if (limit < 5 && descr != descr_test) {
@@ -525,7 +549,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
}
static void
-__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
bool check_stack = false;
@@ -533,7 +557,12 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context:
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ fill_pool();
db = get_bucket((unsigned long) addr);
@@ -583,7 +612,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_init(void *addr, struct debug_obj_descr *descr)
+void debug_object_init(void *addr, const struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
@@ -598,7 +627,7 @@ EXPORT_SYMBOL_GPL(debug_object_init);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
+void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
@@ -613,7 +642,7 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
* @descr: pointer to an object specific debug description structure
* Returns 0 for success, -EINVAL for check failed.
*/
-int debug_object_activate(void *addr, struct debug_obj_descr *descr)
+int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
@@ -691,7 +720,7 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
+void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{
struct debug_bucket *db;
struct debug_obj *obj;
@@ -743,7 +772,7 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
+void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
@@ -793,7 +822,7 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_free(void *addr, struct debug_obj_descr *descr)
+void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
@@ -834,7 +863,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
+void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
{
struct debug_bucket *db;
struct debug_obj *obj;
@@ -882,7 +911,7 @@ EXPORT_SYMBOL_GPL(debug_object_assert_init);
* @next: state to move to if expected state is found
*/
void
-debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
struct debug_bucket *db;
@@ -930,7 +959,7 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- struct debug_obj_descr *descr;
+ const struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
struct hlist_node *tmp;
@@ -982,7 +1011,7 @@ repeat:
debug_objects_maxchecked = objs_checked;
/* Schedule work to actually kmem_cache_free() objects */
- if (!obj_freeing && obj_nr_tofree) {
+ if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
WRITE_ONCE(obj_freeing, true);
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
}
@@ -1008,28 +1037,17 @@ static int debug_stats_show(struct seq_file *m, void *v)
seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
- seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free);
+ seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
- seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
+ seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
return 0;
}
-
-static int debug_stats_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, debug_stats_show, NULL);
-}
-
-static const struct file_operations debug_stats_fops = {
- .open = debug_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(debug_stats);
static int __init debug_objects_init_debugfs(void)
{
@@ -1059,7 +1077,7 @@ struct self_test {
unsigned long dummy2[3];
};
-static __initdata struct debug_obj_descr descr_type_test;
+static __initconst const struct debug_obj_descr descr_type_test;
static bool __init is_static_object(void *addr)
{
@@ -1184,7 +1202,7 @@ out:
return res;
}
-static __initdata struct debug_obj_descr descr_type_test = {
+static __initconst const struct debug_obj_descr descr_type_test = {
.name = "selftest",
.is_static_object = is_static_object,
.fixup_init = fixup_init,
@@ -1374,6 +1392,11 @@ void __init debug_objects_mem_init(void)
} else
debug_objects_selftest();
+#ifdef CONFIG_HOTPLUG_CPU
+ cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
+ object_cpu_offline);
+#endif
+
/*
* Increase the thresholds for allocating and freeing objects
* according to the number of possible CPUs available in the system.