diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 22:15:00 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 22:15:00 +0300 |
commit | 00bcf5cdd6c0e2e92ce3dd852ca68a3b779fa4ec (patch) | |
tree | 387b2ed2ac0e35776714515665f25ddb5fee1de3 /arch/x86/xen | |
parent | de956b8f45b3338cfb66a725e22b4050109daf2a (diff) | |
parent | 08645077b7f9f7824dbaf1959b0e014a894c8acc (diff) | |
download | linux-00bcf5cdd6c0e2e92ce3dd852ca68a3b779fa4ec.tar.xz |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes in this cycle were:
- rwsem micro-optimizations (Davidlohr Bueso)
- Improve the implementation and optimize the performance of
percpu-rwsems. (Peter Zijlstra.)
- Convert all lglock users to better facilities such as percpu-rwsems
or percpu-spinlocks and remove lglocks. (Peter Zijlstra)
- Remove the ticket (spin)lock implementation. (Peter Zijlstra)
- Korean translation of memory-barriers.txt and related fixes to the
English document. (SeongJae Park)
- misc fixes and cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
x86/cmpxchg, locking/atomics: Remove superfluous definitions
x86, locking/spinlocks: Remove ticket (spin)lock implementation
locking/lglock: Remove lglock implementation
stop_machine: Remove stop_cpus_lock and lg_double_lock/unlock()
fs/locks: Use percpu_down_read_preempt_disable()
locking/percpu-rwsem: Add down_read_preempt_disable()
fs/locks: Replace lg_local with a per-cpu spinlock
fs/locks: Replace lg_global with a percpu-rwsem
locking/percpu-rwsem: Add DEFINE_STATIC_PERCPU_RWSEMand percpu_rwsem_assert_held()
locking/pv-qspinlock: Use cmpxchg_release() in __pv_queued_spin_unlock()
locking/rwsem, x86: Drop a bogus cc clobber
futex: Add some more function commentry
locking/hung_task: Show all locks
locking/rwsem: Scan the wait_list for readers only once
locking/rwsem: Remove a few useless comments
locking/rwsem: Return void in __rwsem_mark_wake()
locking, rcu, cgroup: Avoid synchronize_sched() in __cgroup_procs_write()
locking/Documentation: Add Korean translation
locking/Documentation: Fix a typo of example result
locking/Documentation: Fix wrong section reference
...
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/spinlock.c | 250 |
1 files changed, 1 insertions, 249 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index f42e78de1e10..3d6e0064cbfc 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -21,8 +21,6 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(char *, irq_name); static bool xen_pvspin = true; -#ifdef CONFIG_QUEUED_SPINLOCKS - #include <asm/qspinlock.h> static void xen_qlock_kick(int cpu) @@ -71,207 +69,6 @@ static void xen_qlock_wait(u8 *byte, u8 val) xen_poll_irq(irq); } -#else /* CONFIG_QUEUED_SPINLOCKS */ - -enum xen_contention_stat { - TAKEN_SLOW, - TAKEN_SLOW_PICKUP, - TAKEN_SLOW_SPURIOUS, - RELEASED_SLOW, - RELEASED_SLOW_KICKED, - NR_CONTENTION_STATS -}; - - -#ifdef CONFIG_XEN_DEBUG_FS -#define HISTO_BUCKETS 30 -static struct xen_spinlock_stats -{ - u32 contention_stats[NR_CONTENTION_STATS]; - u32 histo_spin_blocked[HISTO_BUCKETS+1]; - u64 time_blocked; -} spinlock_stats; - -static u8 zero_stats; - -static inline void check_zero(void) -{ - u8 ret; - u8 old = READ_ONCE(zero_stats); - if (unlikely(old)) { - ret = cmpxchg(&zero_stats, old, 0); - /* This ensures only one fellow resets the stat */ - if (ret == old) - memset(&spinlock_stats, 0, sizeof(spinlock_stats)); - } -} - -static inline void add_stats(enum xen_contention_stat var, u32 val) -{ - check_zero(); - spinlock_stats.contention_stats[var] += val; -} - -static inline u64 spin_time_start(void) -{ - return xen_clocksource_read(); -} - -static void __spin_time_accum(u64 delta, u32 *array) -{ - unsigned index = ilog2(delta); - - check_zero(); - - if (index < HISTO_BUCKETS) - array[index]++; - else - array[HISTO_BUCKETS]++; -} - -static inline void spin_time_accum_blocked(u64 start) -{ - u32 delta = xen_clocksource_read() - start; - - __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); - spinlock_stats.time_blocked += delta; -} -#else /* !CONFIG_XEN_DEBUG_FS */ -static inline void add_stats(enum xen_contention_stat var, u32 val) -{ -} - -static inline u64 spin_time_start(void) -{ - return 0; -} - -static inline void spin_time_accum_blocked(u64 start) -{ -} -#endif /* CONFIG_XEN_DEBUG_FS */ - -struct xen_lock_waiting { - struct arch_spinlock *lock; - __ticket_t want; -}; - -static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); -static cpumask_t waiting_cpus; - -__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) -{ - int irq = __this_cpu_read(lock_kicker_irq); - struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); - int cpu = smp_processor_id(); - u64 start; - __ticket_t head; - unsigned long flags; - - /* If kicker interrupts not initialized yet, just spin */ - if (irq == -1) - return; - - start = spin_time_start(); - - /* - * Make sure an interrupt handler can't upset things in a - * partially setup state. - */ - local_irq_save(flags); - /* - * We don't really care if we're overwriting some other - * (lock,want) pair, as that would mean that we're currently - * in an interrupt context, and the outer context had - * interrupts enabled. That has already kicked the VCPU out - * of xen_poll_irq(), so it will just return spuriously and - * retry with newly setup (lock,want). - * - * The ordering protocol on this is that the "lock" pointer - * may only be set non-NULL if the "want" ticket is correct. - * If we're updating "want", we must first clear "lock". - */ - w->lock = NULL; - smp_wmb(); - w->want = want; - smp_wmb(); - w->lock = lock; - - /* This uses set_bit, which atomic and therefore a barrier */ - cpumask_set_cpu(cpu, &waiting_cpus); - add_stats(TAKEN_SLOW, 1); - - /* clear pending */ - xen_clear_irq_pending(irq); - - /* Only check lock once pending cleared */ - barrier(); - - /* - * Mark entry to slowpath before doing the pickup test to make - * sure we don't deadlock with an unlocker. - */ - __ticket_enter_slowpath(lock); - - /* make sure enter_slowpath, which is atomic does not cross the read */ - smp_mb__after_atomic(); - - /* - * check again make sure it didn't become free while - * we weren't looking - */ - head = READ_ONCE(lock->tickets.head); - if (__tickets_equal(head, want)) { - add_stats(TAKEN_SLOW_PICKUP, 1); - goto out; - } - - /* Allow interrupts while blocked */ - local_irq_restore(flags); - - /* - * If an interrupt happens here, it will leave the wakeup irq - * pending, which will cause xen_poll_irq() to return - * immediately. - */ - - /* Block until irq becomes pending (or perhaps a spurious wakeup) */ - xen_poll_irq(irq); - add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq)); - - local_irq_save(flags); - - kstat_incr_irq_this_cpu(irq); -out: - cpumask_clear_cpu(cpu, &waiting_cpus); - w->lock = NULL; - - local_irq_restore(flags); - - spin_time_accum_blocked(start); -} -PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning); - -static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) -{ - int cpu; - - add_stats(RELEASED_SLOW, 1); - - for_each_cpu(cpu, &waiting_cpus) { - const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); - - /* Make sure we read lock before want */ - if (READ_ONCE(w->lock) == lock && - READ_ONCE(w->want) == next) { - add_stats(RELEASED_SLOW_KICKED, 1); - xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); - break; - } - } -} -#endif /* CONFIG_QUEUED_SPINLOCKS */ - static irqreturn_t dummy_handler(int irq, void *dev_id) { BUG(); @@ -334,16 +131,12 @@ void __init xen_init_spinlocks(void) return; } printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); -#ifdef CONFIG_QUEUED_SPINLOCKS + __pv_init_lock_hash(); pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); pv_lock_ops.wait = xen_qlock_wait; pv_lock_ops.kick = xen_qlock_kick; -#else - pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); - pv_lock_ops.unlock_kick = xen_unlock_kick; -#endif } /* @@ -372,44 +165,3 @@ static __init int xen_parse_nopvspin(char *arg) } early_param("xen_nopvspin", xen_parse_nopvspin); -#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS) - -static struct dentry *d_spin_debug; - -static int __init xen_spinlock_debugfs(void) -{ - struct dentry *d_xen = xen_init_debugfs(); - - if (d_xen == NULL) - return -ENOMEM; - - if (!xen_pvspin) - return 0; - - d_spin_debug = debugfs_create_dir("spinlocks", d_xen); - - debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); - - debugfs_create_u32("taken_slow", 0444, d_spin_debug, - &spinlock_stats.contention_stats[TAKEN_SLOW]); - debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, - &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]); - debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, - &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]); - - debugfs_create_u32("released_slow", 0444, d_spin_debug, - &spinlock_stats.contention_stats[RELEASED_SLOW]); - debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, - &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]); - - debugfs_create_u64("time_blocked", 0444, d_spin_debug, - &spinlock_stats.time_blocked); - - debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, - spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); - - return 0; -} -fs_initcall(xen_spinlock_debugfs); - -#endif /* CONFIG_XEN_DEBUG_FS */ |