diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-17 02:49:55 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-17 02:49:55 +0300 |
commit | c7eba51cfdf9cd1ca7ed4201b30be8b2bef15ff5 (patch) | |
tree | 048bff0b92dc230f01b5af23cd1b5960615bd68a /include | |
parent | cc9b499a1f71696054a2771aae504c53eecff31d (diff) | |
parent | e57d143091f1c0b1a98140a4d2e63e113afb62c0 (diff) | |
download | linux-c7eba51cfdf9cd1ca7ed4201b30be8b2bef15ff5.tar.xz |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- improve rwsem scalability
- add uninitialized rwsem debugging check
- reduce lockdep's stacktrace memory usage and add diagnostics
- misc cleanups, code consolidation and constification
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
mutex: Fix up mutex_waiter usage
locking/mutex: Use mutex flags macro instead of hard code
locking/mutex: Make __mutex_owner static to mutex.c
locking/qspinlock,x86: Clarify virt_spin_lock_key
locking/rwsem: Check for operations on an uninitialized rwsem
locking/rwsem: Make handoff writer optimistically spin on owner
locking/lockdep: Report more stack trace statistics
locking/lockdep: Reduce space occupied by stack traces
stacktrace: Constify 'entries' arguments
locking/lockdep: Make it clear that what lock_class::key points at is not modified
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/lockdep.h | 11 | ||||
-rw-r--r-- | include/linux/mutex.h | 25 | ||||
-rw-r--r-- | include/linux/rwsem.h | 10 | ||||
-rw-r--r-- | include/linux/stacktrace.h | 4 |
4 files changed, 19 insertions, 31 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 0b0d7259276d..b8a835fd611b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,10 +66,7 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; -struct lock_trace { - unsigned int nr_entries; - unsigned int offset; -}; +struct lock_trace; #define LOCKSTAT_POINTS 4 @@ -97,7 +94,7 @@ struct lock_class { */ struct list_head locks_after, locks_before; - struct lockdep_subclass_key *key; + const struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; @@ -105,7 +102,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; + const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -193,7 +190,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct lock_trace trace; + const struct lock_trace *trace; int distance; /* diff --git a/include/linux/mutex.h b/include/linux/mutex.h index dcd03fee6e01..aca8f36dfac9 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -66,16 +66,6 @@ struct mutex { }; /* - * Internal helper function; C doesn't allow us to hide it :/ - * - * DO NOT USE (outside of mutex code). - */ -static inline struct task_struct *__mutex_owner(struct mutex *lock) -{ - return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); -} - -/* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: */ @@ -144,10 +134,7 @@ extern void __mutex_init(struct mutex *lock, const char *name, * * Returns true if the mutex is locked, false if unlocked. */ -static inline bool mutex_is_locked(struct mutex *lock) -{ - return __mutex_owner(lock) != NULL; -} +extern bool mutex_is_locked(struct mutex *lock); /* * See kernel/locking/mutex.c for detailed documentation of these APIs. @@ -220,13 +207,7 @@ enum mutex_trylock_recursive_enum { * - MUTEX_TRYLOCK_SUCCESS - lock acquired, * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. */ -static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum -mutex_trylock_recursive(struct mutex *lock) -{ - if (unlikely(__mutex_owner(lock) == current)) - return MUTEX_TRYLOCK_RECURSIVE; - - return mutex_trylock(lock); -} +extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock); #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 9d9c663987d8..00d6054687dd 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -45,6 +45,9 @@ struct rw_semaphore { #endif raw_spinlock_t wait_lock; struct list_head wait_list; +#ifdef CONFIG_DEBUG_RWSEMS + void *magic; +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) # define __RWSEM_DEP_MAP_INIT(lockname) #endif +#ifdef CONFIG_DEBUG_RWSEMS +# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname +#else +# define __DEBUG_RWSEM_INITIALIZER(lockname) +#endif + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED #else @@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ + __DEBUG_RWSEM_INITIALIZER(name) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index f0cfd12cb45e..83bd8cb475d7 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -9,9 +9,9 @@ struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE -void stack_trace_print(unsigned long *trace, unsigned int nr_entries, +void stack_trace_print(const unsigned long *trace, unsigned int nr_entries, int spaces); -int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, +int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, unsigned int nr_entries, int spaces); unsigned int stack_trace_save(unsigned long *store, unsigned int size, unsigned int skipnr); |