diff options
Diffstat (limited to 'lib/debugobjects.c')
| -rw-r--r-- | lib/debugobjects.c | 13 | 
1 files changed, 5 insertions, 8 deletions
| diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 70935ed91125..55437fd5128b 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -135,7 +135,6 @@ static void fill_pool(void)  		if (!new)  			return; -		kmemleak_ignore(new);  		raw_spin_lock_irqsave(&pool_lock, flags);  		hlist_add_head(&new->node, &obj_pool);  		debug_objects_allocated++; @@ -1128,16 +1127,14 @@ static int __init debug_objects_replace_static_objects(void)  		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);  		if (!obj)  			goto free; -		kmemleak_ignore(obj);  		hlist_add_head(&obj->node, &objects);  	}  	/* -	 * When debug_objects_mem_init() is called we know that only -	 * one CPU is up, so disabling interrupts is enough -	 * protection. This avoids the lockdep hell of lock ordering. +	 * debug_objects_mem_init() is now called early that only one CPU is up +	 * and interrupts have been disabled, so it is safe to replace the +	 * active object references.  	 */ -	local_irq_disable();  	/* Remove the statically allocated objects from the pool */  	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) @@ -1158,7 +1155,6 @@ static int __init debug_objects_replace_static_objects(void)  			cnt++;  		}  	} -	local_irq_enable();  	pr_debug("%d of %d active objects replaced\n",  		 cnt, obj_pool_used); @@ -1184,7 +1180,8 @@ void __init debug_objects_mem_init(void)  	obj_cache = kmem_cache_create("debug_objects_cache",  				      sizeof (struct debug_obj), 0, -				      SLAB_DEBUG_OBJECTS, NULL); +				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, +				      NULL);  	if (!obj_cache || debug_objects_replace_static_objects()) {  		debug_objects_enabled = 0; | 
