summaryrefslogtreecommitdiff
path: root/lib/kernel_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r--lib/kernel_lock.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cb5490ec00f2..e0fdfddb406e 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -14,7 +14,7 @@
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@ -92,7 +92,7 @@ void __lockfunc unlock_kernel(void)
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@ -177,7 +177,12 @@ static inline void __lock_kernel(void)
static inline void __unlock_kernel(void)
{
- spin_unlock(&kernel_flag);
+ /*
+ * the BKL is not covered by lockdep, so we open-code the
+ * unlocking sequence (and thus avoid the dep-chain ops):
+ */
+ _raw_spin_unlock(&kernel_flag);
+ preempt_enable();
}
/*