diff options
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 41 | ||||
-rw-r--r-- | arch/s390/include/asm/barrier.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/jump_label.h | 19 |
3 files changed, 43 insertions, 21 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index adbe3802e377..117fa5c921c1 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -27,6 +27,7 @@ #define __ATOMIC_OR "lao" #define __ATOMIC_AND "lan" #define __ATOMIC_ADD "laa" +#define __ATOMIC_XOR "lax" #define __ATOMIC_BARRIER "bcr 14,0\n" #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ @@ -49,6 +50,7 @@ #define __ATOMIC_OR "or" #define __ATOMIC_AND "nr" #define __ATOMIC_ADD "ar" +#define __ATOMIC_XOR "xr" #define __ATOMIC_BARRIER "\n" #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ @@ -118,15 +120,17 @@ static inline void atomic_add(int i, atomic_t *v) #define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); +#define ATOMIC_OP(op, OP) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ } -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); -} +ATOMIC_OP(and, AND) +ATOMIC_OP(or, OR) +ATOMIC_OP(xor, XOR) + +#undef ATOMIC_OP #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -167,6 +171,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define __ATOMIC64_OR "laog" #define __ATOMIC64_AND "lang" #define __ATOMIC64_ADD "laag" +#define __ATOMIC64_XOR "laxg" #define __ATOMIC64_BARRIER "bcr 14,0\n" #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ @@ -189,6 +194,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define __ATOMIC64_OR "ogr" #define __ATOMIC64_AND "ngr" #define __ATOMIC64_ADD "agr" +#define __ATOMIC64_XOR "xgr" #define __ATOMIC64_BARRIER "\n" #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ @@ -247,16 +253,6 @@ static inline void atomic64_add(long long i, atomic64_t *v) __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); } -static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) -{ - __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); -} - -static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) -{ - __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); -} - #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) static inline long long atomic64_cmpxchg(atomic64_t *v, @@ -270,6 +266,17 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, return old; } +#define ATOMIC64_OP(op, OP) \ +static inline void atomic64_##op(long i, atomic64_t *v) \ +{ \ + __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ +} + +ATOMIC64_OP(and, AND) +ATOMIC64_OP(or, OR) +ATOMIC64_OP(xor, XOR) + +#undef ATOMIC64_OP #undef __ATOMIC64_LOOP static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index e6f8615a11eb..d48fe0162331 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -42,12 +42,12 @@ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 69972b7957ee..7f9fd5e3f1bf 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -12,14 +12,29 @@ * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. */ -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" ".quad 0b, %l[label], %0\n" ".popsection\n" - : : "X" (key) : : label); + : : "X" (&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("0: brcl 15, %l[label]\n" + ".pushsection __jump_table, \"aw\"\n" + ".balign 8\n" + ".quad 0b, %l[label], %0\n" + ".popsection\n" + : : "X" (&((char *)key)[branch]) : : label); + return false; label: return true; |