summaryrefslogtreecommitdiff
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 9867412d7946..a3d4c54f0839 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -430,27 +430,28 @@ static void free_object(struct debug_obj *obj)
}
#ifdef CONFIG_HOTPLUG_CPU
-static int object_cpu_offline(unsigned int cpu)
+static void put_objects(struct hlist_head *list)
{
- struct debug_percpu_free *percpu_pool;
struct hlist_node *tmp;
struct debug_obj *obj;
- unsigned long flags;
- /* Remote access is safe as the CPU is dead already */
- percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
- hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
+ /*
+ * Using free_object() puts the objects into reuse or schedules
+ * them for freeing and it get's all the accounting correct.
+ */
+ hlist_for_each_entry_safe(obj, tmp, list, node) {
hlist_del(&obj->node);
- kmem_cache_free(obj_cache, obj);
+ free_object(obj);
}
+}
- raw_spin_lock_irqsave(&pool_lock, flags);
- obj_pool_used -= percpu_pool->obj_free;
- debug_objects_freed += percpu_pool->obj_free;
- raw_spin_unlock_irqrestore(&pool_lock, flags);
-
- percpu_pool->obj_free = 0;
+static int object_cpu_offline(unsigned int cpu)
+{
+ /* Remote access is safe as the CPU is dead already */
+ struct debug_percpu_free *pcp = per_cpu_ptr(&percpu_obj_pool, cpu);
+ put_objects(&pcp->free_objs);
+ pcp->obj_free = 0;
return 0;
}
#endif