diff options
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 7 | ||||
-rw-r--r-- | kernel/locking/lockdep_internals.h | 8 | ||||
-rw-r--r-- | kernel/locking/qrwlock.c | 7 |
3 files changed, 12 insertions, 10 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c6d0c1dc6253..ef28a0b9cf1e 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -705,7 +705,7 @@ static void print_lock_name(struct lock_class *class) printk(KERN_CONT " ("); __print_lock_name(class); - printk(KERN_CONT "){%s}-{%hd:%hd}", usage, + printk(KERN_CONT "){%s}-{%d:%d}", usage, class->wait_type_outer ?: class->wait_type_inner, class->wait_type_inner); } @@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock) /* Debug-check: all keys must be persistent! */ debug_locks_off(); pr_err("INFO: trying to register non-static key.\n"); - pr_err("the code is fine but needs lockdep annotation.\n"); + pr_err("The code is fine but needs lockdep annotation, or maybe\n"); + pr_err("you didn't initialize this object before use?\n"); pr_err("turning off the locking correctness validator.\n"); dump_stack(); return false; @@ -1392,7 +1393,7 @@ static int add_lock_to_list(struct lock_class *this, /* * For good efficiency of modular, we use power of 2 */ -#define MAX_CIRCULAR_QUEUE_SIZE 4096UL +#define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS) #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) /* diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index de49f9e1c11b..ecb8662e7a4e 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -99,16 +99,16 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = #define MAX_STACK_TRACE_ENTRIES 262144UL #define STACK_TRACE_HASH_SIZE 8192 #else -#define MAX_LOCKDEP_ENTRIES 32768UL +#define MAX_LOCKDEP_ENTRIES (1UL << CONFIG_LOCKDEP_BITS) -#define MAX_LOCKDEP_CHAINS_BITS 16 +#define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the hash_lock. */ -#define MAX_STACK_TRACE_ENTRIES 524288UL -#define STACK_TRACE_HASH_SIZE 16384 +#define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS) +#define STACK_TRACE_HASH_SIZE (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS) #endif /* diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index 4786dd271b45..b94f3831e963 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -60,6 +60,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); */ void queued_write_lock_slowpath(struct qrwlock *lock) { + int cnts; + /* Put the writer into the wait queue */ arch_spin_lock(&lock->wait_lock); @@ -73,9 +75,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock) /* When no more readers or writers, set the locked flag */ do { - atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); - } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, - _QW_LOCKED) != _QW_WAITING); + cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); + } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); unlock: arch_spin_unlock(&lock->wait_lock); } |