summaryrefslogtreecommitdiff
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-12-07 07:40:50 +0300
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 19:39:46 +0300
commit70e4506765602cca047cfa31933836e354c61a63 (patch)
treeb490e5066404eab1ec7cf5147521c0bd53226b07 /kernel/lockdep.c
parent72be2ccfff0e0e332b32f7ef8372890e39b7c4cb (diff)
downloadlinux-70e4506765602cca047cfa31933836e354c61a63.tar.xz
[PATCH] lockdep: register_lock_class() fix
The hash_lock must only ever be taken with irqs disabled. This happens in all the important places, except one codepath: register_lock_class(). The race should trigger rarely because register_lock_class() is quite rare and single-threaded (happens during init most of the time). The fix is to disable irqs. ( bug found live in -rt: there preemption is alot more agressive and preempting with the hash-lock held caused a lockup.) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3926c3674354..62e73ce68197 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1182,6 +1182,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
struct lockdep_subclass_key *key;
struct list_head *hash_head;
struct lock_class *class;
+ unsigned long flags;
class = look_up_lock_class(lock, subclass);
if (likely(class))
@@ -1203,6 +1204,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
key = lock->key->subkeys + subclass;
hash_head = classhashentry(key);
+ raw_local_irq_save(flags);
__raw_spin_lock(&hash_lock);
/*
* We have to do the hash-walk again, to avoid races
@@ -1217,6 +1219,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
*/
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
__raw_spin_unlock(&hash_lock);
+ raw_local_irq_restore(flags);
debug_locks_off();
printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
printk("turning off the locking correctness validator.\n");
@@ -1239,15 +1242,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (verbose(class)) {
__raw_spin_unlock(&hash_lock);
+ raw_local_irq_restore(flags);
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
dump_stack();
+ raw_local_irq_save(flags);
__raw_spin_lock(&hash_lock);
}
out_unlock_set:
__raw_spin_unlock(&hash_lock);
+ raw_local_irq_restore(flags);
if (!subclass || force)
lock->class_cache = class;