diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 01:50:44 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 01:50:44 +0300 |
commit | fbed0bc0915e2dec7452fc3e66ad03dd2b0c04c7 (patch) | |
tree | be106a21a694dfdc949ff430ca6bd9eaf56d3a1f /include | |
parent | d37a14bb5fed13a52dada3fa9ef4488ac588b35e (diff) | |
parent | 38460a2178d225b39ade5ac66586c3733391cf86 (diff) | |
download | linux-fbed0bc0915e2dec7452fc3e66ad03dd2b0c04c7.tar.xz |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking changes from Ingo Molnar:
"Various updates:
- Futex scalability improvements: remove page lock use for shared
futex get_futex_key(), which speeds up 'perf bench futex hash'
benchmarks by over 40% on a 60-core Westmere. This makes anon-mem
shared futexes perform close to private futexes. (Mel Gorman)
- lockdep hash collision detection and fix (Alfredo Alvarez
Fernandez)
- lockdep testing enhancements (Alfredo Alvarez Fernandez)
- robustify lockdep init by using hlists (Andrew Morton, Andrey
Ryabinin)
- mutex and csd_lock micro-optimizations (Davidlohr Bueso)
- small x86 barriers tweaks (Michael S Tsirkin)
- qspinlock updates (Waiman Long)"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
locking/csd_lock: Use smp_cond_acquire() in csd_lock_wait()
locking/csd_lock: Explicitly inline csd_lock*() helpers
futex: Replace barrier() in unqueue_me() with READ_ONCE()
locking/lockdep: Detect chain_key collisions
locking/lockdep: Prevent chain_key collisions
tools/lib/lockdep: Fix link creation warning
tools/lib/lockdep: Add tests for AA and ABBA locking
tools/lib/lockdep: Add userspace version of READ_ONCE()
tools/lib/lockdep: Fix the build on recent kernels
locking/qspinlock: Move __ARCH_SPIN_LOCK_UNLOCKED to qspinlock_types.h
locking/mutex: Allow next waiter lockless wakeup
locking/pvqspinlock: Enable slowpath locking count tracking
locking/qspinlock: Use smp_cond_acquire() in pending code
locking/pvqspinlock: Move lock stealing count tracking code into pv_queued_spin_steal_lock()
locking/mcs: Fix mcs_spin_lock() ordering
futex: Remove requirement for lock_page() in get_futex_key()
futex: Rename barrier references in ordering guarantees
locking/atomics: Update comment about READ_ONCE() and structures
locking/lockdep: Eliminate lockdep_init()
locking/lockdep: Convert hash tables to hlists
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/qspinlock.h | 5 | ||||
-rw-r--r-- | include/asm-generic/qspinlock_types.h | 5 | ||||
-rw-r--r-- | include/linux/compiler.h | 5 | ||||
-rw-r--r-- | include/linux/lockdep.h | 2 |
4 files changed, 8 insertions, 9 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 39e1cb201b8e..35a52a880b2f 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -120,11 +120,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #endif /* - * Initializier - */ -#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } - -/* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. */ diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 85f888e86761..034acd0c4956 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -33,6 +33,11 @@ typedef struct qspinlock { } arch_spinlock_t; /* + * Initializier + */ +#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } + +/* * Bitfields in the atomic value: * * When NR_CPUS < 16K diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 48f5aab117ae..a27f4f17c382 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -263,8 +263,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * In contrast to ACCESS_ONCE these two macros will also work on aggregate * data types like structs or unions. If the size of the accessed data * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a - * compile-time warning. + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at + * least two memcpy()s: one for the __builtin_memcpy() and then one for + * the macro doing the copy of variable - '__u' allocated on the stack. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 4dca42fd32f5..d026b190c530 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -261,7 +261,6 @@ struct held_lock { /* * Initialization, self-test and debugging-output methods: */ -extern void lockdep_init(void); extern void lockdep_info(void); extern void lockdep_reset(void); extern void lockdep_reset_lock(struct lockdep_map *lock); @@ -392,7 +391,6 @@ static inline void lockdep_on(void) # define lockdep_set_current_reclaim_state(g) do { } while (0) # define lockdep_clear_current_reclaim_state() do { } while (0) # define lockdep_trace_alloc(g) do { } while (0) -# define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ do { (void)(name); (void)(key); } while (0) |