summaryrefslogtreecommitdiff
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-09-30 12:49:37 +0300
committerIngo Molnar <mingo@kernel.org>2020-10-09 09:53:08 +0300
commit2bb8945bcc1a768f2bc402a16c9610bba8d5187d (patch)
treebf7547f8e6361941a5838adeba9d80c2e55825f4 /kernel/locking/lockdep.c
parent583090b1b8232e6eae243a9009699666153a13a9 (diff)
downloadlinux-2bb8945bcc1a768f2bc402a16c9610bba8d5187d.tar.xz
lockdep: Fix usage_traceoverflow
Basically print_lock_class_header()'s for loop is out of sync with the the size of of ->usage_traces[]. Also clean things up a bit while at it, to avoid such mishaps in the future. Fixes: 23870f122768 ("locking/lockdep: Fix "USED" <- "IN-NMI" inversions") Reported-by: Qian Cai <cai@redhat.com> Debugged-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Tested-by: Qian Cai <cai@redhat.com> Link: https://lkml.kernel.org/r/20200930094937.GE2651@hirez.programming.kicks-ass.net
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2facbbd146ec..a430fbb01eb1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -585,6 +585,8 @@ static const char *usage_str[] =
#include "lockdep_states.h"
#undef LOCKDEP_STATE
[LOCK_USED] = "INITIAL USE",
+ [LOCK_USED_READ] = "INITIAL READ USE",
+ /* abused as string storage for verify_lock_unused() */
[LOCK_USAGE_STATES] = "IN-NMI",
};
#endif
@@ -1939,7 +1941,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
#endif
printk(KERN_CONT " {\n");
- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
+ for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
if (class->usage_mask & (1 << bit)) {
int len = depth;
@@ -3969,7 +3971,7 @@ static int separate_irq_context(struct task_struct *curr,
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
- unsigned int old_mask, new_mask, ret = 1;
+ unsigned int new_mask, ret = 1;
if (new_bit >= LOCK_USAGE_STATES) {
DEBUG_LOCKS_WARN_ON(1);
@@ -3996,30 +3998,26 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
if (unlikely(hlock_class(this)->usage_mask & new_mask))
goto unlock;
- old_mask = hlock_class(this)->usage_mask;
hlock_class(this)->usage_mask |= new_mask;
- /*
- * Save one usage_traces[] entry and map both LOCK_USED and
- * LOCK_USED_READ onto the same entry.
- */
- if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
- if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
- goto unlock;
- new_bit = LOCK_USED;
+ if (new_bit < LOCK_TRACE_STATES) {
+ if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
+ return 0;
}
- if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
- return 0;
-
switch (new_bit) {
+ case 0 ... LOCK_USED-1:
+ ret = mark_lock_irq(curr, this, new_bit);
+ if (!ret)
+ return 0;
+ break;
+
case LOCK_USED:
debug_atomic_dec(nr_unused_locks);
break;
+
default:
- ret = mark_lock_irq(curr, this, new_bit);
- if (!ret)
- return 0;
+ break;
}
unlock: