summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/include/asm/atomic.h42
-rw-r--r--arch/arc/include/asm/atomic.h8
-rw-r--r--arch/arm/include/asm/atomic.h51
-rw-r--r--arch/arm/include/asm/barrier.h4
-rw-r--r--arch/arm/include/asm/cmpxchg.h47
-rw-r--r--arch/arm/include/asm/jump_label.h25
-rw-r--r--arch/arm/kernel/jump_label.c2
-rw-r--r--arch/arm64/include/asm/atomic.h14
-rw-r--r--arch/arm64/include/asm/barrier.h4
-rw-r--r--arch/arm64/include/asm/jump_label.h18
-rw-r--r--arch/arm64/kernel/jump_label.c2
-rw-r--r--arch/avr32/include/asm/atomic.h12
-rw-r--r--arch/blackfin/include/asm/atomic.h16
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c7
-rw-r--r--arch/blackfin/mach-bf561/atomic.S30
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/frv/include/asm/atomic.h107
-rw-r--r--arch/frv/include/asm/atomic_defs.h172
-rw-r--r--arch/frv/include/asm/bitops.h99
-rw-r--r--arch/frv/kernel/dma.c6
-rw-r--r--arch/frv/kernel/frv_ksyms.c5
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/atomic-lib.c7
-rw-r--r--arch/frv/lib/atomic-ops.S110
-rw-r--r--arch/frv/lib/atomic64-ops.S94
-rw-r--r--arch/h8300/include/asm/atomic.h137
-rw-r--r--arch/hexagon/include/asm/atomic.h4
-rw-r--r--arch/ia64/include/asm/atomic.h24
-rw-r--r--arch/ia64/include/asm/barrier.h4
-rw-r--r--arch/m32r/include/asm/atomic.h45
-rw-r--r--arch/m32r/kernel/smp.c4
-rw-r--r--arch/m68k/include/asm/atomic.h14
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h38
-rw-r--r--arch/metag/include/asm/atomic_lock1.h23
-rw-r--r--arch/metag/include/asm/barrier.h4
-rw-r--r--arch/mips/include/asm/atomic.h7
-rw-r--r--arch/mips/include/asm/barrier.h4
-rw-r--r--arch/mips/include/asm/jump_label.h19
-rw-r--r--arch/mips/kernel/jump_label.c2
-rw-r--r--arch/mn10300/include/asm/atomic.h71
-rw-r--r--arch/mn10300/mm/tlb-smp.c2
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/include/asm/atomic.h7
-rw-r--r--arch/powerpc/include/asm/atomic.h7
-rw-r--r--arch/powerpc/include/asm/barrier.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h19
-rw-r--r--arch/powerpc/kernel/jump_label.c2
-rw-r--r--arch/powerpc/kernel/misc_32.S19
-rw-r--r--arch/s390/include/asm/atomic.h41
-rw-r--r--arch/s390/include/asm/barrier.h4
-rw-r--r--arch/s390/include/asm/jump_label.h19
-rw-r--r--arch/s390/kernel/jump_label.c2
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kvm/interrupt.c30
-rw-r--r--arch/s390/kvm/kvm-s390.c32
-rw-r--r--arch/s390/lib/uaccess.c12
-rw-r--r--arch/sh/include/asm/atomic-grb.h43
-rw-r--r--arch/sh/include/asm/atomic-irq.h21
-rw-r--r--arch/sh/include/asm/atomic-llsc.h31
-rw-r--r--arch/sparc/include/asm/atomic_32.h4
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/sparc/include/asm/barrier_64.h4
-rw-r--r--arch/sparc/include/asm/jump_label.h35
-rw-r--r--arch/sparc/kernel/jump_label.c2
-rw-r--r--arch/sparc/lib/atomic32.c22
-rw-r--r--arch/sparc/lib/atomic_64.S6
-rw-r--r--arch/sparc/lib/ksyms.c3
-rw-r--r--arch/tile/include/asm/atomic_32.h28
-rw-r--r--arch/tile/include/asm/atomic_64.h40
-rw-r--r--arch/tile/lib/atomic_32.c23
-rw-r--r--arch/tile/lib/atomic_asm_32.S4
-rw-r--r--arch/x86/include/asm/atomic.h25
-rw-r--r--arch/x86/include/asm/atomic64_32.h14
-rw-r--r--arch/x86/include/asm/atomic64_64.h15
-rw-r--r--arch/x86/include/asm/barrier.h8
-rw-r--r--arch/x86/include/asm/jump_label.h23
-rw-r--r--arch/x86/include/asm/qrwlock.h10
-rw-r--r--arch/x86/kernel/jump_label.c2
-rw-r--r--arch/x86/kernel/tsc.c22
-rw-r--r--arch/xtensa/configs/iss_defconfig1
-rw-r--r--arch/xtensa/include/asm/atomic.h73
83 files changed, 899 insertions, 1062 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 8a8ea7110de8..a71cdbe2a04d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -71,6 +71,12 @@ config JUMP_LABEL
( On 32-bit x86, the necessary options added to the compiler
flags may increase the size of the kernel slightly. )
+config STATIC_KEYS_SELFTEST
+ bool "Static key selftest"
+ depends on JUMP_LABEL
+ help
+ Boot time self-test of the branch patching code.
+
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 8f8eafbedd7c..e8c956098424 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -29,13 +29,13 @@
* branch back to restart the operation.
*/
-#define ATOMIC_OP(op) \
+#define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
- " " #op "l %0,%2,%0\n" \
+ " " #asm_op " %0,%2,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
:"Ir" (i), "m" (v->counter)); \
} \
-#define ATOMIC_OP_RETURN(op) \
+#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
- " " #op "l %0,%3,%2\n" \
- " " #op "l %0,%3,%0\n" \
+ " " #asm_op " %0,%3,%2\n" \
+ " " #asm_op " %0,%3,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
-#define ATOMIC64_OP(op) \
+#define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
- " " #op "q %0,%2,%0\n" \
+ " " #asm_op " %0,%2,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
:"Ir" (i), "m" (v->counter)); \
} \
-#define ATOMIC64_OP_RETURN(op) \
+#define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
- " " #op "q %0,%3,%2\n" \
- " " #op "q %0,%3,%0\n" \
+ " " #asm_op " %0,%3,%2\n" \
+ " " #asm_op " %0,%3,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
return result; \
}
-#define ATOMIC_OPS(opg) \
- ATOMIC_OP(opg) \
- ATOMIC_OP_RETURN(opg) \
- ATOMIC64_OP(opg) \
- ATOMIC64_OP_RETURN(opg)
+#define ATOMIC_OPS(op) \
+ ATOMIC_OP(op, op##l) \
+ ATOMIC_OP_RETURN(op, op##l) \
+ ATOMIC64_OP(op, op##q) \
+ ATOMIC64_OP_RETURN(op, op##q)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+#define atomic_andnot atomic_andnot
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, bis)
+ATOMIC_OP(xor, xor)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, bis)
+ATOMIC64_OP(xor, xor)
+
#undef ATOMIC_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 87d18ae53115..c3ecda023e3a 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -172,9 +172,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-ATOMIC_OP(and, &=, and)
-#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c11970b7b..fe3ef397f5a4 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
- smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic_" #op "_return\n" \
@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
: "r" (&v->counter), "Ir" (i) \
: "cc"); \
\
- smp_mb(); \
- \
return result; \
}
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+
+static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
- smp_mb();
prefetchw(&ptr->counter);
do {
@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);
- smp_mb();
-
return oldval;
}
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
@@ -194,6 +192,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or, |=, orr)
+ATOMIC_OP(xor, ^=, eor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -290,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+static inline long long \
+atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
{ \
long long result; \
unsigned long tmp; \
\
- smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic64_" #op "_return\n" \
@@ -309,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
\
- smp_mb(); \
- \
return result; \
}
@@ -321,17 +324,26 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OP(and, and, and)
+ATOMIC64_OP(andnot, bic, bic)
+ATOMIC64_OP(or, orr, orr)
+ATOMIC64_OP(xor, eor, eor)
+
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
- long long new)
+static inline long long
+atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
{
long long oldval;
unsigned long res;
- smp_mb();
prefetchw(&ptr->counter);
do {
@@ -346,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
: "cc");
} while (res);
- smp_mb();
-
return oldval;
}
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
{
long long result;
unsigned long tmp;
- smp_mb();
prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
: "r" (&ptr->counter), "r" (new)
: "cc");
- smp_mb();
-
return result;
}
+#define atomic64_xchg_relaxed atomic64_xchg_relaxed
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 6c2327e1c732..70393574e0fa 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -67,12 +67,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 1692a05d3207..916a2744d5c6 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
- smp_mb();
prefetchw((const void *)ptr);
switch (size) {
@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
- smp_mb();
return ret;
}
-#define xchg(ptr, x) ({ \
+#define xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
+#define xchg xchg_relaxed
+
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long ret;
-
- smp_mb();
- ret = __cmpxchg(ptr, old, new, size);
- smp_mb();
-
- return ret;
-}
-
-#define cmpxchg(ptr,o,n) ({ \
- (__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))); \
+#define cmpxchg_relaxed(ptr,o,n) ({ \
+ (__typeof__(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
})
static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
-static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- unsigned long long ret;
-
- smp_mb();
- ret = __cmpxchg64(ptr, old, new);
- smp_mb();
-
- return ret;
-}
-
-#define cmpxchg64(ptr, o, n) ({ \
- (__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)); \
-})
-
#endif /* __LINUX_ARM_ARCH__ >= 6 */
#endif /* __ASM_ARM_CMPXCHG_H */
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5f337dc5c108..34f7b6980d21 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,23 +4,32 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/unified.h>
#define JUMP_LABEL_NOP_SIZE 4
-#ifdef CONFIG_THUMB2_KERNEL
-#define JUMP_LABEL_NOP "nop.w"
-#else
-#define JUMP_LABEL_NOP "nop"
-#endif
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ WASM(nop) "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
- JUMP_LABEL_NOP "\n\t"
+ WASM(b) " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
- : : "i" (key) : : l_yes);
+ : : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index e39cbf488cfe..845a5dd9c42b 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -12,7 +12,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
void *addr = (void *)entry->code;
unsigned int insn;
- if (type == JUMP_LABEL_ENABLE)
+ if (type == JUMP_LABEL_JMP)
insn = arm_gen_branch(entry->code, entry->target);
else
insn = arm_gen_nop();
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7047051ded40..866a71fca9a3 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -85,6 +85,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, sub)
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, orr)
+ATOMIC_OP(xor, eor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -183,6 +190,13 @@ static inline long atomic64_##op##_return(long i, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, sub)
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, orr)
+ATOMIC64_OP(xor, eor)
+
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fa47c4275cb..ef93b20bc964 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -44,12 +44,12 @@
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index c0e5165c2f76..1b5e0e843c3a 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -26,14 +26,28 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
- : : "i"(key) : : l_yes);
+ : : "i"(&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm goto("1: b %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 3\n\t"
+ ".quad 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i"(&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 4f1fec7a46db..c2dd1ad3e648 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -28,7 +28,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
void *addr = (void *)entry->code;
u32 insn;
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
insn = aarch64_insn_gen_branch_imm(entry->code,
entry->target,
AARCH64_INSN_BRANCH_NOLINK);
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 2d07ce1c5327..97c9bdf83409 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r)
+#define ATOMIC_OP(op, asm_op) \
+ATOMIC_OP_RETURN(op, asm_op, r) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ (void)__atomic_##op##_return(i, v); \
+}
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OP
#undef ATOMIC_OP_RETURN
/*
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98e9978..1c1c42330c99 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,21 @@
#include <linux/types.h>
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
-asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
-#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
-#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
+#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
+#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
-#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
-#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m)
+#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
#endif
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591b961d..a401c27b69b4 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(__raw_atomic_update_asm);
-EXPORT_SYMBOL(__raw_atomic_clear_asm);
-EXPORT_SYMBOL(__raw_atomic_set_asm);
+EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_and_asm);
+EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm);
EXPORT_SYMBOL(__raw_atomic_test_asm);
+
EXPORT_SYMBOL(__raw_xchg_1_asm);
EXPORT_SYMBOL(__raw_xchg_2_asm);
EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8e8c4c..26fccb5568b9 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
* r0 = ptr
* r1 = value
*
- * Add a signed value to a 32bit word and return the new value atomically.
+ * ADD a signed value to a 32bit word and return the new value atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_update_asm)
+ENTRY(___raw_atomic_add_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_update_asm)
+ENDPROC(___raw_atomic_add_asm)
/*
* r0 = ptr
* r1 = mask
*
- * Clear the mask bits from a 32bit word and return the old 32bit value
+ * AND the mask bits from a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_clear_asm)
+ENTRY(___raw_atomic_and_asm)
p1 = r0;
- r3 = ~r1;
+ r3 = r1;
[--sp] = rets;
call _get_core_lock;
r2 = [p1];
@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_clear_asm)
+ENDPROC(___raw_atomic_and_asm)
/*
* r0 = ptr
* r1 = mask
*
- * Set the mask bits into a 32bit word and return the old 32bit value
+ * OR the mask bits into a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_set_asm)
+ENTRY(___raw_atomic_or_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_set_asm)
+ENDPROC(___raw_atomic_or_asm)
/*
* r0 = ptr
@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
r2 = r1;
r1 = 1;
r1 <<= r2;
- jump ___raw_atomic_set_asm
+ jump ___raw_atomic_or_asm
ENDPROC(___raw_bit_set_asm)
/*
@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
* Clobbers: r3:0, p1:0
*/
ENTRY(___raw_bit_clear_asm)
- r2 = r1;
- r1 = 1;
- r1 <<= r2;
- jump ___raw_atomic_clear_asm
+ r2 = 1;
+ r2 <<= r1;
+ r1 = ~r2;
+ jump ___raw_atomic_and_asm
ENDPROC(___raw_bit_clear_asm)
/*
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1c7259597395..0030e21cfceb 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
- atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
+ atomic_or((1 << msg), &bfin_ipi_data->bits);
atomic_inc(&bfin_ipi_data->count);
}
local_irq_restore(flags);
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 102190a61d65..0da689def4cc 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -15,7 +15,6 @@
#define _ASM_ATOMIC_H
#include <linux/types.h>
-#include <asm/spr-regs.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
@@ -23,6 +22,8 @@
#error not SMP safe
#endif
+#include <asm/atomic_defs.h>
+
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
@@ -34,56 +35,26 @@
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i))
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-static inline int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_inc_return(atomic_t *v)
{
- unsigned long val;
+ return __atomic_add_return(1, &v->counter);
+}
- asm("0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " add%I2 %1,%2,%1 \n"
- " cst.p %1,%M0 ,cc3,#1 \n"
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
- " beq icc3,#0,0b \n"
- : "+U"(v->counter), "=&r"(val)
- : "NPr"(i)
- : "memory", "cc7", "cc3", "icc3"
- );
+static inline int atomic_dec_return(atomic_t *v)
+{
+ return __atomic_sub_return(1, &v->counter);
+}
- return val;
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ return __atomic_add_return(i, &v->counter);
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
- unsigned long val;
-
- asm("0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " sub%I2 %1,%2,%1 \n"
- " cst.p %1,%M0 ,cc3,#1 \n"
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
- " beq icc3,#0,0b \n"
- : "+U"(v->counter), "=&r"(val)
- : "NPr"(i)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return val;
+ return __atomic_sub_return(i, &v->counter);
}
-#else
-
-extern int atomic_add_return(int i, atomic_t *v);
-extern int atomic_sub_return(int i, atomic_t *v);
-
-#endif
-
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline void atomic_inc(atomic_t *v)
{
- atomic_add_return(1, v);
+ atomic_inc_return(v);
}
static inline void atomic_dec(atomic_t *v)
{
- atomic_sub_return(1, v);
+ atomic_dec_return(v);
}
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
@@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v)
* 64-bit atomic ops
*/
typedef struct {
- volatile long long counter;
+ long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
-static inline long long atomic64_read(atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
long long counter;
asm("ldd%I1 %M1,%0"
: "=e"(counter)
: "m"(v->counter));
+
return counter;
}
@@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "e"(i));
}
-extern long long atomic64_inc_return(atomic64_t *v);
-extern long long atomic64_dec_return(atomic64_t *v);
-extern long long atomic64_add_return(long long i, atomic64_t *v);
-extern long long atomic64_sub_return(long long i, atomic64_t *v);
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+ return __atomic64_add_return(1, &v->counter);
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+ return __atomic64_sub_return(1, &v->counter);
+}
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ return __atomic64_add_return(i, &v->counter);
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ return __atomic64_sub_return(i, &v->counter);
+}
static inline long long atomic64_add_negative(long long i, atomic64_t *v)
{
@@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
+
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
@@ -196,5 +181,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ (void)__atomic32_fetch_##op(i, &v->counter); \
+} \
+ \
+static inline void atomic64_##op(long long i, atomic64_t *v) \
+{ \
+ (void)__atomic64_fetch_##op(i, &v->counter); \
+}
+
+ATOMIC_OP(or)
+ATOMIC_OP(and)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h
new file mode 100644
index 000000000000..36e126d2f801
--- /dev/null
+++ b/arch/frv/include/asm/atomic_defs.h
@@ -0,0 +1,172 @@
+
+#include <asm/spr-regs.h>
+
+#ifdef __ATOMIC_LIB__
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_QUALS
+#define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x)
+
+#else /* !OUTOFLINE && LIB */
+
+#define ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)
+
+#endif /* OUTOFLINE */
+
+#else /* !__ATOMIC_LIB__ */
+
+#define ATOMIC_EXPORT(x)
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_OP_RETURN(op) \
+extern int __atomic_##op##_return(int i, int *v); \
+extern long long __atomic64_##op##_return(long long i, long long *v);
+
+#define ATOMIC_FETCH_OP(op) \
+extern int __atomic32_fetch_##op(int i, int *v); \
+extern long long __atomic64_fetch_##op(long long i, long long *v);
+
+#else /* !OUTOFLINE && !LIB */
+
+#define ATOMIC_QUALS static inline
+
+#endif /* OUTOFLINE */
+#endif /* __ATOMIC_LIB__ */
+
+
+/*
+ * Note on the 64 bit inline asm variants...
+ *
+ * CSTD is a conditional instruction and needs a constrained memory reference.
+ * Normally 'U' provides the correct constraints for conditional instructions
+ * and this is used for the 32 bit version, however 'U' does not appear to work
+ * for 64 bit values (gcc-4.9)
+ *
+ * The exact constraint is that conditional instructions cannot deal with an
+ * immediate displacement in the memory reference, so what we do is we read the
+ * address through a volatile cast into a local variable in order to insure we
+ * _have_ to compute the correct address without displacement. This allows us
+ * to use the regular 'm' for the memory address.
+ *
+ * Furthermore, the %Ln operand, which prints the low word register (r+1),
+ * really only works for registers, this means we cannot allow immediate values
+ * for the 64 bit versions -- like we do for the 32 bit ones.
+ *
+ */
+
+#ifndef ATOMIC_OP_RETURN
+#define ATOMIC_OP_RETURN(op) \
+ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \
+{ \
+ int val; \
+ \
+ asm volatile( \
+ "0: \n" \
+ " orcc gr0,gr0,gr0,icc3 \n" \
+ " ckeq icc3,cc7 \n" \
+ " ld.p %M0,%1 \n" \
+ " orcr cc7,cc7,cc3 \n" \
+ " "#op"%I2 %1,%2,%1 \n" \
+ " cst.p %1,%M0 ,cc3,#1 \n" \
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
+ " beq icc3,#0,0b \n" \
+ : "+U"(*v), "=&r"(val) \
+ : "NPr"(i) \
+ : "memory", "cc7", "cc3", "icc3" \
+ ); \
+ \
+ return val; \
+} \
+ATOMIC_EXPORT(__atomic_##op##_return); \
+ \
+ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \
+{ \
+ long long *__v = READ_ONCE(v); \
+ long long val; \
+ \
+ asm volatile( \
+ "0: \n" \
+ " orcc gr0,gr0,gr0,icc3 \n" \
+ " ckeq icc3,cc7 \n" \
+ " ldd.p %M0,%1 \n" \
+ " orcr cc7,cc7,cc3 \n" \
+ " "#op"cc %L1,%L2,%L1,icc0 \n" \
+ " "#op"x %1,%2,%1,icc0 \n" \
+ " cstd.p %1,%M0 ,cc3,#1 \n" \
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
+ " beq icc3,#0,0b \n" \
+ : "+m"(*__v), "=&e"(val) \
+ : "e"(i) \
+ : "memory", "cc7", "cc3", "icc0", "icc3" \
+ ); \
+ \
+ return val; \
+} \
+ATOMIC_EXPORT(__atomic64_##op##_return);
+#endif
+
+#ifndef ATOMIC_FETCH_OP
+#define ATOMIC_FETCH_OP(op) \
+ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \
+{ \
+ int old, tmp; \
+ \
+ asm volatile( \
+ "0: \n" \
+ " orcc gr0,gr0,gr0,icc3 \n" \
+ " ckeq icc3,cc7 \n" \
+ " ld.p %M0,%1 \n" \
+ " orcr cc7,cc7,cc3 \n" \
+ " "#op"%I3 %1,%3,%2 \n" \
+ " cst.p %2,%M0 ,cc3,#1 \n" \
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
+ " beq icc3,#0,0b \n" \
+ : "+U"(*v), "=&r"(old), "=r"(tmp) \
+ : "NPr"(i) \
+ : "memory", "cc7", "cc3", "icc3" \
+ ); \
+ \
+ return old; \
+} \
+ATOMIC_EXPORT(__atomic32_fetch_##op); \
+ \
+ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \
+{ \
+ long long *__v = READ_ONCE(v); \
+ long long old, tmp; \
+ \
+ asm volatile( \
+ "0: \n" \
+ " orcc gr0,gr0,gr0,icc3 \n" \
+ " ckeq icc3,cc7 \n" \
+ " ldd.p %M0,%1 \n" \
+ " orcr cc7,cc7,cc3 \n" \
+ " "#op" %L1,%L3,%L2 \n" \
+ " "#op" %1,%3,%2 \n" \
+ " cstd.p %2,%M0 ,cc3,#1 \n" \
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
+ " beq icc3,#0,0b \n" \
+ : "+m"(*__v), "=&e"(old), "=e"(tmp) \
+ : "e"(i) \
+ : "memory", "cc7", "cc3", "icc3" \
+ ); \
+ \
+ return old; \
+} \
+ATOMIC_EXPORT(__atomic64_fetch_##op);
+#endif
+
+ATOMIC_FETCH_OP(or)
+ATOMIC_FETCH_OP(and)
+ATOMIC_FETCH_OP(xor)
+
+ATOMIC_OP_RETURN(add)
+ATOMIC_OP_RETURN(sub)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_QUALS
+#undef ATOMIC_EXPORT
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 96de220ef131..0df8e95e3715 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -25,109 +25,30 @@
#include <asm-generic/bitops/ffz.h>
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-static inline
-unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
-{
- unsigned long old, tmp;
-
- asm volatile(
- "0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " and%I3 %1,%3,%2 \n"
- " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
- " beq icc3,#0,0b \n"
- : "+U"(*v), "=&r"(old), "=r"(tmp)
- : "NPr"(~mask)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return old;
-}
-
-static inline
-unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
-{
- unsigned long old, tmp;
-
- asm volatile(
- "0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " or%I3 %1,%3,%2 \n"
- " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
- " beq icc3,#0,0b \n"
- : "+U"(*v), "=&r"(old), "=r"(tmp)
- : "NPr"(mask)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return old;
-}
-
-static inline
-unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
-{
- unsigned long old, tmp;
-
- asm volatile(
- "0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " xor%I3 %1,%3,%2 \n"
- " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
- " beq icc3,#0,0b \n"
- : "+U"(*v), "=&r"(old), "=r"(tmp)
- : "NPr"(mask)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return old;
-}
-
-#else
-
-extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-
-#endif
-
-#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
-#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
+#include <asm/atomic.h>
static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
{
- volatile unsigned long *ptr = addr;
- unsigned long mask = 1UL << (nr & 31);
+ unsigned int *ptr = (void *)addr;
+ unsigned int mask = 1UL << (nr & 31);
ptr += nr >> 5;
- return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
+ return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
}
static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
{
- volatile unsigned long *ptr = addr;
- unsigned long mask = 1UL << (nr & 31);
+ unsigned int *ptr = (void *)addr;
+ unsigned int mask = 1UL << (nr & 31);
ptr += nr >> 5;
- return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
+ return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
}
static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
{
- volatile unsigned long *ptr = addr;
- unsigned long mask = 1UL << (nr & 31);
+ unsigned int *ptr = (void *)addr;
+ unsigned int mask = 1UL << (nr & 31);
ptr += nr >> 5;
- return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
+ return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile void *addr)
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
index 156184e17e57..370dc9fa0b11 100644
--- a/arch/frv/kernel/dma.c
+++ b/arch/frv/kernel/dma.c
@@ -109,13 +109,13 @@ static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
static DEFINE_RWLOCK(frv_dma_channels_lock);
-unsigned long frv_dma_inprogress;
+unsigned int frv_dma_inprogress;
#define frv_clear_dma_inprogress(channel) \
- atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
+ (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
#define frv_set_dma_inprogress(channel) \
- atomic_set_mask(1 << (channel), &frv_dma_inprogress);
+ (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
/*****************************************************************************/
/*
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 86c516d96dcd..cdb4ce9960eb 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -58,11 +58,6 @@ EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns);
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
-EXPORT_SYMBOL(atomic_test_and_OR_mask);
-EXPORT_SYMBOL(atomic_test_and_XOR_mask);
-EXPORT_SYMBOL(atomic_add_return);
-EXPORT_SYMBOL(atomic_sub_return);
EXPORT_SYMBOL(__xchg_32);
EXPORT_SYMBOL(__cmpxchg_32);
#endif
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 4ff2fb1e6b16..970e8b4f1a02 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
- outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+ outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
diff --git a/arch/frv/lib/atomic-lib.c b/arch/frv/lib/atomic-lib.c
new file mode 100644
index 000000000000..4d1b887c248b
--- /dev/null
+++ b/arch/frv/lib/atomic-lib.c
@@ -0,0 +1,7 @@
+
+#include <linux/export.h>
+#include <asm/atomic.h>
+
+#define __ATOMIC_LIB__
+
+#include <asm/atomic_defs.h>
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index 5e9e6ab5dd0e..b7439a960b5b 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -19,116 +19,6 @@
###############################################################################
#
-# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
- .globl atomic_test_and_ANDNOT_mask
- .type atomic_test_and_ANDNOT_mask,@function
-atomic_test_and_ANDNOT_mask:
- not.p gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- and gr8,gr10,gr11
- cst.p gr11,@(gr9,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
- .globl atomic_test_and_OR_mask
- .type atomic_test_and_OR_mask,@function
-atomic_test_and_OR_mask:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- or gr8,gr10,gr11
- cst.p gr11,@(gr9,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
- .globl atomic_test_and_XOR_mask
- .type atomic_test_and_XOR_mask,@function
-atomic_test_and_XOR_mask:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- xor gr8,gr10,gr11
- cst.p gr11,@(gr9,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
-
-###############################################################################
-#
-# int atomic_add_return(int i, atomic_t *v)
-#
-###############################################################################
- .globl atomic_add_return
- .type atomic_add_return,@function
-atomic_add_return:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- add gr8,gr10,gr8
- cst.p gr8,@(gr9,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic_add_return, .-atomic_add_return
-
-###############################################################################
-#
-# int atomic_sub_return(int i, atomic_t *v)
-#
-###############################################################################
- .globl atomic_sub_return
- .type atomic_sub_return,@function
-atomic_sub_return:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- sub gr8,gr10,gr8
- cst.p gr8,@(gr9,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic_sub_return, .-atomic_sub_return
-
-###############################################################################
-#
# uint32_t __xchg_32(uint32_t i, uint32_t *v)
#
###############################################################################
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
index b6194eeac127..c4c472308a33 100644
--- a/arch/frv/lib/atomic64-ops.S
+++ b/arch/frv/lib/atomic64-ops.S
@@ -20,100 +20,6 @@
###############################################################################
#
-# long long atomic64_inc_return(atomic64_t *v)
-#
-###############################################################################
- .globl atomic64_inc_return
- .type atomic64_inc_return,@function
-atomic64_inc_return:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- addicc gr9,#1,gr9,icc0
- addxi gr8,#0,gr8,icc0
- cstd.p gr8,@(gr10,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic64_inc_return, .-atomic64_inc_return
-
-###############################################################################
-#
-# long long atomic64_dec_return(atomic64_t *v)
-#
-###############################################################################
- .globl atomic64_dec_return
- .type atomic64_dec_return,@function
-atomic64_dec_return:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- subicc gr9,#1,gr9,icc0
- subxi gr8,#0,gr8,icc0
- cstd.p gr8,@(gr10,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic64_dec_return, .-atomic64_dec_return
-
-###############################################################################
-#
-# long long atomic64_add_return(long long i, atomic64_t *v)
-#
-###############################################################################
- .globl atomic64_add_return
- .type atomic64_add_return,@function
-atomic64_add_return:
- or.p gr8,gr8,gr4
- or gr9,gr9,gr5
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- addcc gr9,gr5,gr9,icc0
- addx gr8,gr4,gr8,icc0
- cstd.p gr8,@(gr10,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic64_add_return, .-atomic64_add_return
-
-###############################################################################
-#
-# long long atomic64_sub_return(long long i, atomic64_t *v)
-#
-###############################################################################
- .globl atomic64_sub_return
- .type atomic64_sub_return,@function
-atomic64_sub_return:
- or.p gr8,gr8,gr4
- or gr9,gr9,gr5
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- subcc gr9,gr5,gr9,icc0
- subx gr8,gr4,gr8,icc0
- cstd.p gr8,@(gr10,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size atomic64_sub_return, .-atomic64_sub_return
-
-###############################################################################
-#
# uint64_t __xchg_64(uint64_t i, uint64_t *v)
#
###############################################################################
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 7ca73f8546cc..702ee539f87d 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -16,83 +16,52 @@
#include <linux/kernel.h>
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- h8300flags flags;
- int ret;
-
- flags = arch_local_irq_save();
- ret = v->counter += i;
- arch_local_irq_restore(flags);
- return ret;
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ h8300flags flags; \
+ int ret; \
+ \
+ flags = arch_local_irq_save(); \
+ ret = v->counter c_op i; \
+ arch_local_irq_restore(flags); \
+ return ret; \
}
-#define atomic_add(i, v) atomic_add_return(i, v)
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- h8300flags flags;
- int ret;
-
- flags = arch_local_irq_save();
- ret = v->counter -= i;
- arch_local_irq_restore(flags);
- return ret;
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ h8300flags flags; \
+ \
+ flags = arch_local_irq_save(); \
+ v->counter c_op i; \
+ arch_local_irq_restore(flags); \
}
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP_RETURN(sub, -=)
-static inline int atomic_inc_return(atomic_t *v)
-{
- h8300flags flags;
- int ret;
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
- flags = arch_local_irq_save();
- v->counter++;
- ret = v->counter;
- arch_local_irq_restore(flags);
- return ret;
-}
-
-#define atomic_inc(v) atomic_inc_return(v)
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+#define atomic_add(i, v) (void)atomic_add_return(i, v)
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static inline int atomic_dec_return(atomic_t *v)
-{
- h8300flags flags;
- int ret;
+#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
- flags = arch_local_irq_save();
- --v->counter;
- ret = v->counter;
- arch_local_irq_restore(flags);
- return ret;
-}
+#define atomic_inc_return(v) atomic_add_return(1, v)
+#define atomic_dec_return(v) atomic_sub_return(1, v)
-#define atomic_dec(v) atomic_dec_return(v)
+#define atomic_inc(v) (void)atomic_inc_return(v)
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-static inline int atomic_dec_and_test(atomic_t *v)
-{
- h8300flags flags;
- int ret;
-
- flags = arch_local_irq_save();
- --v->counter;
- ret = v->counter;
- arch_local_irq_restore(flags);
- return ret == 0;
-}
+#define atomic_dec(v) (void)atomic_dec_return(v)
+#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
@@ -120,40 +89,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return ret;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
- unsigned char ccr;
- unsigned long tmp;
-
- __asm__ __volatile__("stc ccr,%w3\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %0,%1\n\t"
- "and.l %2,%1\n\t"
- "mov.l %1,%0\n\t"
- "ldc %w3,ccr"
- : "=m"(*v), "=r"(tmp)
- : "g"(~(mask)), "r"(ccr));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
- unsigned char ccr;
- unsigned long tmp;
-
- __asm__ __volatile__("stc ccr,%w3\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %0,%1\n\t"
- "or.l %2,%1\n\t"
- "mov.l %1,%0\n\t"
- "ldc %w3,ccr"
- : "=m"(*v), "=r"(tmp)
- : "g"(~(mask)), "r"(ccr));
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 93d07025f183..811d61f6422d 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -132,6 +132,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0bf03501fe5c..be4beeb77d57 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \
ATOMIC_OP(add, +)
ATOMIC_OP(sub, -)
-#undef ATOMIC_OP
-
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
@@ -71,6 +69,16 @@ ATOMIC_OP(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
+ATOMIC_OP(and, &)
+ATOMIC_OP(or, |)
+ATOMIC_OP(xor, ^)
+
+#define atomic_and(i,v) (void)ia64_atomic_and(i,v)
+#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
+#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
+
+#undef ATOMIC_OP
+
#define ATOMIC64_OP(op, c_op) \
static __inline__ long \
ia64_atomic64_##op (__s64 i, atomic64_t *v) \
@@ -89,8 +97,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
ATOMIC64_OP(add, +)
ATOMIC64_OP(sub, -)
-#undef ATOMIC64_OP
-
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
@@ -115,6 +121,16 @@ ATOMIC64_OP(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
+#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
+#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
+
+#undef ATOMIC64_OP
+
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 843ba435e43b..df896a1c41d3 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -66,12 +66,12 @@
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 31bb74adba08..025e2a170493 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -94,6 +94,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -239,45 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-
-static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
-{
- unsigned long flags;
- unsigned long tmp;
-
- local_irq_save(flags);
- __asm__ __volatile__ (
- "# atomic_clear_mask \n\t"
- DCACHE_CLEAR("%0", "r5", "%1")
- M32R_LOCK" %0, @%1; \n\t"
- "and %0, %2; \n\t"
- M32R_UNLOCK" %0, @%1; \n\t"
- : "=&r" (tmp)
- : "r" (addr), "r" (~mask)
- : "memory"
- __ATOMIC_CLOBBER
- );
- local_irq_restore(flags);
-}
-
-static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
-{
- unsigned long flags;
- unsigned long tmp;
-
- local_irq_save(flags);
- __asm__ __volatile__ (
- "# atomic_set_mask \n\t"
- DCACHE_CLEAR("%0", "r5", "%1")
- M32R_LOCK" %0, @%1; \n\t"
- "or %0, %2; \n\t"
- M32R_UNLOCK" %0, @%1; \n\t"
- : "=&r" (tmp)
- : "r" (addr), "r" (mask)
- : "memory"
- __ATOMIC_CLOBBER
- );
- local_irq_restore(flags);
-}
-
#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c18ddc74ef9a..62d6961e7f2b 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
cpumask_clear_cpu(smp_processor_id(), &cpumask);
spin_lock(&flushcache_lock);
mask=cpumask_bits(&cpumask);
- atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
+ atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_vma = vma;
flush_va = va;
mask=cpumask_bits(&cpumask);
- atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
+ atomic_or(*mask, (atomic_t *)&flush_cpumask);
/*
* We have to send the IPI only to
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e85f047fb072..039fac120cc0 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -77,6 +77,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, eor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -170,16 +174,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
return c != 0;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
- __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
- __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
-}
-
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 948d8688643c..21c4c268b86c 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -74,44 +74,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
- int temp;
-
- asm volatile (
- "1: LNKGETD %0, [%1]\n"
- " AND %0, %0, %2\n"
- " LNKSETD [%1] %0\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp)
- : "da" (&v->counter), "bd" (~mask)
- : "cc");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- int temp;
-
- asm volatile (
- "1: LNKGETD %0, [%1]\n"
- " OR %0, %0, %2\n"
- " LNKSETD [%1], %0\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp)
- : "da" (&v->counter), "bd" (mask)
- : "cc");
-}
-
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int result, temp;
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index f5d5898c1020..f8efe380fe8b 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -68,31 +68,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long flags;
-
- __global_lock1(flags);
- fence();
- v->counter &= ~mask;
- __global_unlock1(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long flags;
-
- __global_lock1(flags);
- fence();
- v->counter |= mask;
- __global_unlock1(flags);
-}
-
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 5a696e507930..172b7e5efc53 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -90,12 +90,12 @@ static inline void fence(void)
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 26d436336f2e..4c42fd9af777 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -137,6 +137,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -416,6 +420,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
+ATOMIC64_OP(and, &=, and)
+ATOMIC64_OP(or, |=, or)
+ATOMIC64_OP(xor, ^=, xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 7ecba84656d4..752e0b86c171 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -133,12 +133,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 608aa57799c8..e77672539e8e 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -26,14 +26,29 @@
#define NOP_INSN "nop"
#endif
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\t" NOP_INSN "\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
- : : "i" (key) : : l_yes);
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\tj %l[l_yes]\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
return false;
l_yes:
return true;
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index dda800e9e731..3e586daa3a32 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -51,7 +51,7 @@ void arch_jump_label_transform(struct jump_entry *e,
/* Target must have the right alignment and ISA must be preserved. */
BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
insn.j_format.target = e->target >> J_RANGE_SHIFT;
} else {
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 5be655e83e70..375e59140c9c 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -89,6 +89,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -127,73 +131,6 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-/**
- * atomic_clear_mask - Atomically clear bits in memory
- * @mask: Mask of the bits to be cleared
- * @v: pointer to word in memory
- *
- * Atomically clears the bits set in mask from the memory word specified.
- */
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
- int status;
-
- asm volatile(
- "1: mov %3,(_AAR,%2) \n"
- " mov (_ADR,%2),%0 \n"
- " and %4,%0 \n"
- " mov %0,(_ADR,%2) \n"
- " mov (_ADR,%2),%0 \n" /* flush */
- " mov (_ASR,%2),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=m"(*addr)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
- : "memory", "cc");
-#else
- unsigned long flags;
-
- mask = ~mask;
- flags = arch_local_cli_save();
- *addr &= mask;
- arch_local_irq_restore(flags);
-#endif
-}
-
-/**
- * atomic_set_mask - Atomically set bits in memory
- * @mask: Mask of the bits to be set
- * @v: pointer to word in memory
- *
- * Atomically sets the bits set in mask from the memory word specified.
- */
-static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
- int status;
-
- asm volatile(
- "1: mov %3,(_AAR,%2) \n"
- " mov (_ADR,%2),%0 \n"
- " or %4,%0 \n"
- " mov %0,(_ADR,%2) \n"
- " mov (_ADR,%2),%0 \n" /* flush */
- " mov (_ASR,%2),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=m"(*addr)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
- : "memory", "cc");
-#else
- unsigned long flags;
-
- flags = arch_local_cli_save();
- *addr |= mask;
- arch_local_irq_restore(flags);
-#endif
-}
-
#endif /* __KERNEL__ */
#endif /* CONFIG_SMP */
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index e5d0ef722bfa..9a39ea9031d4 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_mm = mm;
flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
- atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+ atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
#else
#error Not supported.
#endif
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 269c23d23fcb..1a8f6f95689e 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -242,7 +242,6 @@ CONFIG_LOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
CONFIG_PROVE_RCU_DELAY=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
CONFIG_LATENCYTOP=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 33b148f825ba..0ffb08ff5125 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -295,7 +295,6 @@ CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_RCU_CPU_STALL_INFO=y
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 226f8ca993f6..2536965d00ea 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -126,6 +126,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -185,6 +189,9 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 512d2782b043..55f106ed12bf 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(or, or)
+ATOMIC64_OP(xor, xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 51ccc7232042..0eca6efc0631 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -76,12 +76,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_lwsync(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_lwsync(); \
___p1; \
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index efbf9a322a23..47e155f15433 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -18,14 +18,29 @@
#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
#define JUMP_LABEL_NOP_SIZE 4
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t"
- : : "i" (key) : : l_yes);
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "b %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".popsection \n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
return false;
l_yes:
return true;
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index a1ed8a8c7cb4..6472472093d0 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -17,7 +17,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
{
u32 *addr = (u32 *)(unsigned long)entry->code;
- if (type == JUMP_LABEL_ENABLE)
+ if (type == JUMP_LABEL_JMP)
patch_branch(addr, entry->target, 0);
else
patch_instruction(addr, PPC_INST_NOP);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7c6bb4b17b49..ed3ab509faca 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -596,25 +596,6 @@ _GLOBAL(copy_page)
b 2b
/*
- * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
- * void atomic_set_mask(atomic_t mask, atomic_t *addr);
- */
-_GLOBAL(atomic_clear_mask)
-10: lwarx r5,0,r4
- andc r5,r5,r3
- PPC405_ERR77(0,r4)
- stwcx. r5,0,r4
- bne- 10b
- blr
-_GLOBAL(atomic_set_mask)
-10: lwarx r5,0,r4
- or r5,r5,r3
- PPC405_ERR77(0,r4)
- stwcx. r5,0,r4
- bne- 10b
- blr
-
-/*
* Extended precision shifts.
*
* Updated to be valid for shift counts from 0 to 63 inclusive.
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index adbe3802e377..117fa5c921c1 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -27,6 +27,7 @@
#define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa"
+#define __ATOMIC_XOR "lax"
#define __ATOMIC_BARRIER "bcr 14,0\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
@@ -49,6 +50,7 @@
#define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar"
+#define __ATOMIC_XOR "xr"
#define __ATOMIC_BARRIER "\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
@@ -118,15 +120,17 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
- __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
+#define ATOMIC_OP(op, OP) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
}
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
-}
+ATOMIC_OP(and, AND)
+ATOMIC_OP(or, OR)
+ATOMIC_OP(xor, XOR)
+
+#undef ATOMIC_OP
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -167,6 +171,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag"
+#define __ATOMIC64_XOR "laxg"
#define __ATOMIC64_BARRIER "bcr 14,0\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
@@ -189,6 +194,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr"
+#define __ATOMIC64_XOR "xgr"
#define __ATOMIC64_BARRIER "\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
@@ -247,16 +253,6 @@ static inline void atomic64_add(long long i, atomic64_t *v)
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
}
-static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
-{
- __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
-}
-
-static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
-{
- __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
-}
-
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long long atomic64_cmpxchg(atomic64_t *v,
@@ -270,6 +266,17 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return old;
}
+#define ATOMIC64_OP(op, OP) \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
+}
+
+ATOMIC64_OP(and, AND)
+ATOMIC64_OP(or, OR)
+ATOMIC64_OP(xor, XOR)
+
+#undef ATOMIC64_OP
#undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index e6f8615a11eb..d48fe0162331 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -42,12 +42,12 @@
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 69972b7957ee..7f9fd5e3f1bf 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -12,14 +12,29 @@
* We use a brcl 0,2 instruction for jump labels at compile time so it
* can be easily distinguished from a hotpatch generated instruction.
*/
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
".balign 8\n"
".quad 0b, %l[label], %0\n"
".popsection\n"
- : : "X" (key) : : label);
+ : : "X" (&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("0: brcl 15, %l[label]\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ".balign 8\n"
+ ".quad 0b, %l[label], %0\n"
+ ".popsection\n"
+ : : "X" (&((char *)key)[branch]) : : label);
+
return false;
label:
return true;
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index c9dac2139f59..083b05f5f5ab 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -61,7 +61,7 @@ static void __jump_label_transform(struct jump_entry *entry,
{
struct insn old, new;
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
jump_label_make_nop(entry, &old);
jump_label_make_branch(entry, &new);
} else {
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 52524b9083c3..017c3a9bfc28 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -378,7 +378,7 @@ static void disable_sync_clock(void *dummy)
* increase the "sequence" counter to avoid the race of an
* etr event and the complete recovery against get_sync_clock.
*/
- atomic_clear_mask(0x80000000, sw_ptr);
+ atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}
@@ -389,7 +389,7 @@ static void disable_sync_clock(void *dummy)
static void enable_sync_clock(void)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
- atomic_set_mask(0x80000000, sw_ptr);
+ atomic_or(0x80000000, sw_ptr);
}
/*
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b277d50dcf76..5c2c169395c3 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -173,20 +173,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
- atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
- atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
{
- atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
- &vcpu->arch.sie_block->cpuflags);
+ atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+ &vcpu->arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x0000;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
@@ -199,7 +199,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
{
- atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
}
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -928,7 +928,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */
- atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
+ atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
}
@@ -1026,7 +1026,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
@@ -1041,7 +1041,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
/* another external call is pending */
return -EBUSY;
}
- atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
@@ -1067,7 +1067,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY;
*extcall = irq->u.extcall;
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
@@ -1139,7 +1139,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
@@ -1183,7 +1183,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
0, 0);
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
@@ -1196,7 +1196,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0, 0);
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
@@ -1375,13 +1375,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
spin_lock(&li->lock);
switch (type) {
case KVM_S390_MCHK:
- atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+ atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+ atomic_or(CPUSTAT_IO_INT, li->cpuflags);
break;
default:
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
break;
}
spin_unlock(&li->lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 98df53c01343..c91eb941b444 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1333,12 +1333,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
- atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
- atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
save_fpu_regs();
@@ -1443,9 +1443,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_STOPPED);
if (test_kvm_facility(vcpu->kvm, 78))
- atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
else if (test_kvm_facility(vcpu->kvm, 8))
- atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
kvm_s390_vcpu_setup_model(vcpu);
@@ -1557,24 +1557,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{
- atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+ atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu);
}
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{
- atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+ atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
}
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{
- atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+ atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu);
}
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
- atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+ atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/*
@@ -1583,7 +1583,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
- atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
@@ -1807,19 +1807,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control;
/* enforce guest PER */
- atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg);
} else {
- atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+ atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
vcpu->arch.guestdbg.last_bp = 0;
}
if (rc) {
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
- atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+ atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
}
return rc;
@@ -1894,7 +1894,7 @@ retry:
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
- atomic_set_mask(CPUSTAT_IBS,
+ atomic_or(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags);
}
goto retry;
@@ -1903,7 +1903,7 @@ retry:
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
- atomic_clear_mask(CPUSTAT_IBS,
+ atomic_andnot(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags);
}
goto retry;
@@ -2419,7 +2419,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
__disable_ibs_on_all_vcpus(vcpu->kvm);
}
- atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+ atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/*
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
@@ -2445,7 +2445,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu);
- atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+ atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) {
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0d002a746bec..ae4de559e3a0 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,7 +15,7 @@
#include <asm/mmu_context.h>
#include <asm/facility.h>
-static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+static DEFINE_STATIC_KEY_FALSE(have_mvcos);
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
@@ -104,7 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (static_key_false(&have_mvcos))
+ if (static_branch_likely(&have_mvcos))
return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n);
}
@@ -177,7 +177,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (static_key_false(&have_mvcos))
+ if (static_branch_likely(&have_mvcos))
return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n);
}
@@ -240,7 +240,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- if (static_key_false(&have_mvcos))
+ if (static_branch_likely(&have_mvcos))
return copy_in_user_mvcos(to, from, n);
return copy_in_user_mvc(to, from, n);
}
@@ -312,7 +312,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
unsigned long __clear_user(void __user *to, unsigned long size)
{
- if (static_key_false(&have_mvcos))
+ if (static_branch_likely(&have_mvcos))
return clear_user_mvcos(to, size);
return clear_user_xc(to, size);
}
@@ -373,7 +373,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
static int __init uaccess_init(void)
{
if (test_facility(27))
- static_key_slow_inc(&have_mvcos);
+ static_branch_enable(&have_mvcos);
return 0;
}
early_initcall(uaccess_init);
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 97a5fda83450..b94df40e5f2d 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -48,47 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
- int tmp;
- unsigned int _mask = ~mask;
-
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t" /* r0 = end point */
- " mov r15, r1 \n\t" /* r1 = saved sp */
- " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
- " mov.l @%1, %0 \n\t" /* load old value */
- " and %2, %0 \n\t" /* add */
- " mov.l %0, @%1 \n\t" /* store new value */
- "1: mov r1, r15 \n\t" /* LOGOUT */
- : "=&r" (tmp),
- "+r" (v)
- : "r" (_mask)
- : "memory" , "r0", "r1");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- int tmp;
-
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t" /* r0 = end point */
- " mov r15, r1 \n\t" /* r1 = saved sp */
- " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
- " mov.l @%1, %0 \n\t" /* load old value */
- " or %2, %0 \n\t" /* or */
- " mov.l %0, @%1 \n\t" /* store new value */
- "1: mov r1, r15 \n\t" /* LOGOUT */
- : "=&r" (tmp),
- "+r" (v)
- : "r" (mask)
- : "memory" , "r0", "r1");
-}
-
#endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 61d107523f06..23fcdad5773e 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter &= ~mask;
- raw_local_irq_restore(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter |= mask;
- raw_local_irq_restore(flags);
-}
-
#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 8575dccb9ef7..33d34b16d4d6 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_clear_mask \n"
-" and %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (~mask), "r" (&v->counter)
- : "t");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_set_mask \n"
-" or %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (mask), "r" (&v->counter)
- : "t");
-}
-
#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 0e69b7e7a439..7dcbebbcaec6 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -17,10 +17,12 @@
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
-
#define ATOMIC_INIT(i) { (i) }
int atomic_add_return(int, atomic_t *);
+void atomic_and(int, atomic_t *);
+void atomic_or(int, atomic_t *);
+void atomic_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int);
int __atomic_add_unless(atomic_t *, int, int);
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 4082749913ce..917084ace49d 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -33,6 +33,10 @@ long atomic64_##op##_return(long, atomic64_t *);
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 809941e33e12..14a928601657 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -60,12 +60,12 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index cc9b04a2b11b..62d0354d1727 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,16 +7,33 @@
#define JUMP_LABEL_NOP_SIZE 4
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
- "nop\n\t"
- "nop\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
- ".align 4\n\t"
- ".word 1b, %l[l_yes], %c0\n\t"
- ".popsection \n\t"
- : : "i" (key) : : l_yes);
+ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 4\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection \n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "b %l[l_yes]\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 4\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection \n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
return false;
l_yes:
return true;
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 48565c11e82a..59bbeff55024 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -16,7 +16,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
u32 val;
u32 *insn = (u32 *) (unsigned long) entry->code;
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
s32 off = (s32)entry->target - (s32)entry->code;
#ifdef CONFIG_SPARC64
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 71cd65ab200c..b9d63c0a7aab 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
-#define ATOMIC_OP(op, cop) \
+#define ATOMIC_OP_RETURN(op, c_op) \
int atomic_##op##_return(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
- ret = (v->counter cop i); \
+ ret = (v->counter c_op i); \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
EXPORT_SYMBOL(atomic_##op##_return);
-ATOMIC_OP(add, +=)
+#define ATOMIC_OP(op, c_op) \
+void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ spin_lock_irqsave(ATOMIC_HASH(v), flags); \
+ \
+ v->counter c_op i; \
+ \
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
+} \
+EXPORT_SYMBOL(atomic_##op);
+
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
int atomic_xchg(atomic_t *v, int new)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 05dac43907d1..d6b0363f345b 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return);
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
@@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return);
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 8069ce12f20b..8eb454cfe05c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1b109fad9fff..d320ce253d86 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v)
_atomic_xchg_add(&v->counter, i);
}
+#define ATOMIC_OP(op) \
+unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ _atomic_##op((unsigned long *)&v->counter, i); \
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
/**
* atomic_add_return - add integer and return
* @v: pointer of type atomic_t
@@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v)
_atomic64_xchg_add(&v->counter, i);
}
+#define ATOMIC64_OP(op) \
+long long _atomic64_##op(long long *v, long long n); \
+static inline void atomic64_##op(long long i, atomic64_t *v) \
+{ \
+ _atomic64_##op(&v->counter, i); \
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
/**
* atomic64_add_return - add integer and return
* @v: pointer of type atomic64_t
@@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
int *lock, int o, int n);
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
@@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p,
int *lock, long long o, long long n);
+extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 0496970cef82..096a56d6ead4 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
+static inline void atomic_and(int i, atomic_t *v)
+{
+ __insn_fetchand4((void *)&v->counter, i);
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+ __insn_fetchor4((void *)&v->counter, i);
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+ int guess, oldval = v->counter;
+ do {
+ guess = oldval;
+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+ oldval = __insn_cmpexch4(&v->counter, guess ^ i);
+ } while (guess != oldval);
+}
+
/* Now the true 64-bit operations. */
#define ATOMIC64_INIT(i) { (i) }
@@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
return oldval != u;
}
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+ __insn_fetchand((void *)&v->counter, i);
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+ __insn_fetchor((void *)&v->counter, i);
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+ long guess, oldval = v->counter;
+ do {
+ guess = oldval;
+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+ oldval = __insn_cmpexch(&v->counter, guess ^ i);
+ } while (guess != oldval);
+}
+
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index c89b211fd9e7..298df1e9912a 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
}
EXPORT_SYMBOL(_atomic_or);
+unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
+{
+ return __atomic_and((int *)p, __atomic_setup(p), mask).val;
+}
+EXPORT_SYMBOL(_atomic_and);
+
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
{
return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
@@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
}
EXPORT_SYMBOL(_atomic64_cmpxchg);
+long long _atomic64_and(long long *v, long long n)
+{
+ return __atomic64_and(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_and);
+
+long long _atomic64_or(long long *v, long long n)
+{
+ return __atomic64_or(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_or);
+
+long long _atomic64_xor(long long *v, long long n)
+{
+ return __atomic64_xor(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_xor);
/*
* If any of the atomic or futex routines hit a bad address (not in
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 6bda3132cd61..f611265633d6 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2"
atomic_op _xchg_add_unless, 32, \
"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
atomic_op _or, 32, "or r24, r22, r2"
+atomic_op _and, 32, "and r24, r22, r2"
atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
atomic_op _xor, 32, "xor r24, r22, r2"
@@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \
{ bbns r26, 3f; add r24, r22, r4 }; \
{ bbns r27, 3f; add r25, r23, r5 }; \
slt_u r26, r24, r22; add r25, r25, r26"
+atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
+atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
+atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
jrp lr /* happy backtracer */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index e9168955c42f..fb52aa644aab 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -182,6 +182,21 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new);
}
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ asm volatile(LOCK_PREFIX #op"l %1,%0" \
+ : "+m" (v->counter) \
+ : "ir" (i) \
+ : "memory"); \
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
/**
* __atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
@@ -219,16 +234,6 @@ static __always_inline short int atomic_inc_short(short int *v)
return *v;
}
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
- asm volatile(LOCK_PREFIX "andl %0,%1" \
- : : "r" (~(mask)), "m" (*(addr)) : "memory")
-
-#define atomic_set_mask(mask, addr) \
- asm volatile(LOCK_PREFIX "orl %0,%1" \
- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
- : "memory")
-
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index b154de75c90c..a11c30b77fb5 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64
#undef __alternative_atomic64
+#define ATOMIC64_OP(op, c_op) \
+static inline void atomic64_##op(long long i, atomic64_t *v) \
+{ \
+ long long old, c = 0; \
+ while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
+ c = old; \
+}
+
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#undef ATOMIC64_OP
+
#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index b965f9e03f2a..50e33eff58de 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
+#define ATOMIC64_OP(op) \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ asm volatile(LOCK_PREFIX #op"q %1,%0" \
+ : "+m" (v->counter) \
+ : "er" (i) \
+ : "memory"); \
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
+#undef ATOMIC64_OP
+
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 818cb8788225..0681d2532527 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -57,12 +57,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
@@ -74,12 +74,12 @@ do { \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a4c1cf7e93f8..5daeca3d0f9e 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -16,15 +16,32 @@
# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
#endif
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
- _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
+ _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
".popsection \n\t"
- : : "i" (key) : : l_yes);
+ : : "i" (key), "i" (branch) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:"
+ ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+ "2:\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+ ".popsection \n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+
return false;
l_yes:
return true;
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index ae0e241e228b..c537cbb038a7 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -2,16 +2,6 @@
#define _ASM_X86_QRWLOCK_H
#include <asm-generic/qrwlock_types.h>
-
-#ifndef CONFIG_X86_PPRO_FENCE
-#define queue_write_unlock queue_write_unlock
-static inline void queue_write_unlock(struct qrwlock *lock)
-{
- barrier();
- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
-}
-#endif
-
#include <asm-generic/qrwlock.h>
#endif /* _ASM_X86_QRWLOCK_H */
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 26d5a55a2736..e565e0e4d216 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -45,7 +45,7 @@ static void __jump_label_transform(struct jump_entry *entry,
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
if (init) {
/*
* Jump label is enabled for the first time.
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 79055cf2c497..c8d52cb4cb6e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -38,7 +38,7 @@ static int __read_mostly tsc_unstable;
erroneous rdtsc usage on !cpu_has_tsc processors */
static int __read_mostly tsc_disabled = -1;
-static struct static_key __use_tsc = STATIC_KEY_INIT;
+static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable;
@@ -274,7 +274,12 @@ done:
*/
u64 native_sched_clock(void)
{
- u64 tsc_now;
+ if (static_branch_likely(&__use_tsc)) {
+ u64 tsc_now = rdtsc();
+
+ /* return the value in ns */
+ return cycles_2_ns(tsc_now);
+ }
/*
* Fall back to jiffies if there's no TSC available:
@@ -284,16 +289,9 @@ u64 native_sched_clock(void)
* very important for it to be as fast as the platform
* can achieve it. )
*/
- if (!static_key_false(&__use_tsc)) {
- /* No locking but a rare wrong value is not a big deal: */
- return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
- }
-
- /* read the Time Stamp Counter: */
- tsc_now = rdtsc();
- /* return the value in ns */
- return cycles_2_ns(tsc_now);
+ /* No locking but a rare wrong value is not a big deal: */
+ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}
/*
@@ -1212,7 +1210,7 @@ void __init tsc_init(void)
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
- static_key_slow_inc(&__use_tsc);
+ static_branch_enable(&__use_tsc);
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index e4d193e7a300..f3dfe0d921c2 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -616,7 +616,6 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index ebcd1f6fc8cb..93795d047303 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (~mask), "a" (v)
- : "memory"
- );
-#else
- unsigned int all_f = -1;
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15,"__stringify(TOPLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " xor %1, %4, %3\n"
- " and %0, %0, %4\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval), "=a" (mask)
- : "a" (v), "a" (all_f), "1" (mask)
- : "a15", "memory"
- );
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (mask), "a" (v)
- : "memory"
- );
-#else
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15,"__stringify(TOPLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " or %0, %0, %1\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval)
- : "a" (mask), "a" (v)
- : "a15", "memory"
- );
-#endif
-}
-
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */