diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-04 01:46:07 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-04 01:46:07 +0300 |
commit | ca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch) | |
tree | 883eb497642d98635817f9cf954ac98e043fb573 /arch/blackfin | |
parent | 4c12ab7e5e2e892fa94df500f96001837918a281 (diff) | |
parent | d420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff) | |
download | linux-ca520cab25e0e8da717c596ccaa2c2b3650cfa09.tar.xz |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar:
"Main changes in this cycle are:
- Extend atomic primitives with coherent logic op primitives
(atomic_{or,and,xor}()) and deprecate the old partial APIs
(atomic_{set,clear}_mask())
The old ops were incoherent with incompatible signatures across
architectures and with incomplete support. Now every architecture
supports the primitives consistently (by Peter Zijlstra)
- Generic support for 'relaxed atomics':
- _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return()
- atomic_read_acquire()
- atomic_set_release()
This came out of porting qwrlock code to arm64 (by Will Deacon)
- Clean up the fragile static_key APIs that were causing repeat bugs,
by introducing a new one:
DEFINE_STATIC_KEY_TRUE(name);
DEFINE_STATIC_KEY_FALSE(name);
which define a key of different types with an initial true/false
value.
Then allow:
static_branch_likely()
static_branch_unlikely()
to take a key of either type and emit the right instruction for the
case. To be able to know the 'type' of the static key we encode it
in the jump entry (by Peter Zijlstra)
- Static key self-tests (by Jason Baron)
- qrwlock optimizations (by Waiman Long)
- small futex enhancements (by Davidlohr Bueso)
- ... and misc other changes"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits)
jump_label/x86: Work around asm build bug on older/backported GCCs
locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h
locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics
locking/qrwlock: Implement queue_write_unlock() using smp_store_release()
locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition
locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'
locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication
locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations
locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic
locking/static_keys: Make verify_keys() static
jump label, locking/static_keys: Update docs
locking/static_keys: Provide a selftest
jump_label: Provide a self-test
s390/uaccess, locking/static_keys: employ static_branch_likely()
x86, tsc, locking/static_keys: Employ static_branch_likely()
locking/static_keys: Add selftest
locking/static_keys: Add a new static_key interface
locking/static_keys: Rework update logic
locking/static_keys: Add static_key_{en,dis}able() helpers
...
Diffstat (limited to 'arch/blackfin')
-rw-r--r-- | arch/blackfin/include/asm/atomic.h | 16 | ||||
-rw-r--r-- | arch/blackfin/kernel/bfin_ksyms.c | 7 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/atomic.S | 30 | ||||
-rw-r--r-- | arch/blackfin/mach-common/smp.c | 2 |
4 files changed, 29 insertions, 26 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index a107a98e9978..1c1c42330c99 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -16,19 +16,21 @@ #include <linux/types.h> asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); -asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); -asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); -asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); +asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value); + +asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value); +asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) -#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) -#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) +#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) +#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) -#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) -#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) +#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) +#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) +#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) #endif diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index c446591b961d..a401c27b69b4 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c @@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl); EXPORT_SYMBOL(insl_16); #ifdef CONFIG_SMP -EXPORT_SYMBOL(__raw_atomic_update_asm); -EXPORT_SYMBOL(__raw_atomic_clear_asm); -EXPORT_SYMBOL(__raw_atomic_set_asm); +EXPORT_SYMBOL(__raw_atomic_add_asm); +EXPORT_SYMBOL(__raw_atomic_and_asm); +EXPORT_SYMBOL(__raw_atomic_or_asm); EXPORT_SYMBOL(__raw_atomic_xor_asm); EXPORT_SYMBOL(__raw_atomic_test_asm); + EXPORT_SYMBOL(__raw_xchg_1_asm); EXPORT_SYMBOL(__raw_xchg_2_asm); EXPORT_SYMBOL(__raw_xchg_4_asm); diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S index 2a08df8e8c4c..26fccb5568b9 100644 --- a/arch/blackfin/mach-bf561/atomic.S +++ b/arch/blackfin/mach-bf561/atomic.S @@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm) * r0 = ptr * r1 = value * - * Add a signed value to a 32bit word and return the new value atomically. + * ADD a signed value to a 32bit word and return the new value atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_update_asm) +ENTRY(___raw_atomic_add_asm) p1 = r0; r3 = r1; [--sp] = rets; @@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_update_asm) +ENDPROC(___raw_atomic_add_asm) /* * r0 = ptr * r1 = mask * - * Clear the mask bits from a 32bit word and return the old 32bit value + * AND the mask bits from a 32bit word and return the old 32bit value * atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_clear_asm) +ENTRY(___raw_atomic_and_asm) p1 = r0; - r3 = ~r1; + r3 = r1; [--sp] = rets; call _get_core_lock; r2 = [p1]; @@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_clear_asm) +ENDPROC(___raw_atomic_and_asm) /* * r0 = ptr * r1 = mask * - * Set the mask bits into a 32bit word and return the old 32bit value + * OR the mask bits into a 32bit word and return the old 32bit value * atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_set_asm) +ENTRY(___raw_atomic_or_asm) p1 = r0; r3 = r1; [--sp] = rets; @@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_set_asm) +ENDPROC(___raw_atomic_or_asm) /* * r0 = ptr @@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm) r2 = r1; r1 = 1; r1 <<= r2; - jump ___raw_atomic_set_asm + jump ___raw_atomic_or_asm ENDPROC(___raw_bit_set_asm) /* @@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm) * Clobbers: r3:0, p1:0 */ ENTRY(___raw_bit_clear_asm) - r2 = r1; - r1 = 1; - r1 <<= r2; - jump ___raw_atomic_clear_asm + r2 = 1; + r2 <<= r1; + r1 = ~r2; + jump ___raw_atomic_and_asm ENDPROC(___raw_bit_clear_asm) /* diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 1c7259597395..0030e21cfceb 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) local_irq_save(flags); for_each_cpu(cpu, cpumask) { bfin_ipi_data = &per_cpu(bfin_ipi, cpu); - atomic_set_mask((1 << msg), &bfin_ipi_data->bits); + atomic_or((1 << msg), &bfin_ipi_data->bits); atomic_inc(&bfin_ipi_data->count); } local_irq_restore(flags); |