summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/bitops.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-28 03:54:15 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-28 03:54:15 +0300
commit6daa755f813e6aa0bcc97e352666e072b1baac25 (patch)
treec079031f74beadc9d5c2099579d707d14cab8f8c /arch/s390/include/asm/bitops.h
parentc6536676c7fe3f572ba55842e59c3c71c01e7fb3 (diff)
parent6f3353c2d2b3eb4de52e9704cb962712033db181 (diff)
downloadlinux-6daa755f813e6aa0bcc97e352666e072b1baac25.tar.xz
Merge tag 's390-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens: - fix buffer size for in-kernel disassembler for ebpf programs. - fix two memory leaks in zcrypt driver. - expose PCI device UID as index, including an indicator if the uid is unique. - remove some oprofile leftovers. - improve stack unwinder tests. - don't use gcc atomic builtins anymore, just like all other architectures. Even though I'm sure the current code is ok, I totally dislike that s390 is the only architecture being special here; especially considering that there was a lengthly discussion about this topic and the outcome was not to use the builtins. Therefore open-code atomic ops again with inline assembly and switch to gcc builtins as soon as other architectures are doing. - couple of other changes to atomic and cmpxchg, and use atomic-instrumented.h for KASAN. - separate zbus creation, registration, and scanning in our PCI code which allows for cleaner and easier handling. - a rather large change to the vfio-ap code to fix circular locking dependencies when updating crypto masks. - move QAOB handling from qdio layer down to drivers. - add CRW inject facility to common I/O layer. This adds debugs files which allow to generate artificial events from user space for testing purposes. - increase SCLP console line length from 80 to 320 characters to avoid odd wrapped lines. - add protected virtualization guest and host indication files, which indicate either that a guest is running in pv mode or if the hypervisor is capable of starting pv guests. - various other small fixes and improvements all over the place. * tag 's390-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (53 commits) s390/disassembler: increase ebpf disasm buffer size s390/archrandom: add parameter check for s390_arch_random_generate s390/zcrypt: fix zcard and zqueue hot-unplug memleak s390/pci: expose a PCI device's UID as its index s390/atomic,cmpxchg: always inline __xchg/__cmpxchg s390/smp: fix do_restart() prototype s390: get rid of oprofile leftovers s390/atomic,cmpxchg: make constraints work with old compilers s390/test_unwind: print test suite start/end info s390/cmpxchg: use unsigned long values instead of void pointers s390/test_unwind: add WARN if tests failed s390/test_unwind: unify error handling paths s390: update defconfigs s390/spinlock: use R constraint in inline assembly s390/atomic,cmpxchg: switch to use atomic-instrumented.h s390/cmpxchg: get rid of gcc atomic builtins s390/atomic: get rid of gcc atomic builtins s390/atomic: use proper constraints s390/atomic: move remaining inline assemblies to atomic_ops.h s390/bitops: make bitops only work on longs ...
Diffstat (limited to 'arch/s390/include/asm/bitops.h')
-rw-r--r--arch/s390/include/asm/bitops.h93
1 files changed, 47 insertions, 46 deletions
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 31121d32f81d..68da67d2c4c9 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,7 +42,7 @@
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
-__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+__bitops_word(unsigned long nr, const volatile unsigned long *ptr)
{
unsigned long addr;
@@ -50,37 +50,33 @@ __bitops_word(unsigned long nr, volatile unsigned long *ptr)
return (unsigned long *)addr;
}
-static inline unsigned char *
-__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
+static inline unsigned long __bitops_mask(unsigned long nr)
{
- return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ return 1UL << (nr & (BITS_PER_LONG - 1));
}
static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
+ unsigned long mask = __bitops_mask(nr);
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_or(mask, (long *)addr);
}
static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
+ unsigned long mask = __bitops_mask(nr);
- mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __atomic64_and(mask, (long *)addr);
+ __atomic64_and(~mask, (long *)addr);
}
static __always_inline void arch_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
+ unsigned long mask = __bitops_mask(nr);
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_xor(mask, (long *)addr);
}
@@ -88,99 +84,104 @@ static inline bool arch_test_and_set_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __atomic64_or_barrier(mask, (long *)addr);
- return (old & mask) != 0;
+ return old & mask;
}
static inline bool arch_test_and_clear_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __atomic64_and_barrier(mask, (long *)addr);
- return (old & ~mask) != 0;
+ old = __atomic64_and_barrier(~mask, (long *)addr);
+ return old & mask;
}
static inline bool arch_test_and_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __atomic64_xor_barrier(mask, (long *)addr);
- return (old & mask) != 0;
+ return old & mask;
}
static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- *addr |= 1 << (nr & 7);
+ *addr |= mask;
}
static inline void arch___clear_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- *addr &= ~(1 << (nr & 7));
+ *addr &= ~mask;
}
static inline void arch___change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- *addr ^= 1 << (nr & 7);
+ *addr ^= mask;
}
static inline bool arch___test_and_set_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
- unsigned char ch;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- ch = *addr;
- *addr |= 1 << (nr & 7);
- return (ch >> (nr & 7)) & 1;
+ old = *addr;
+ *addr |= mask;
+ return old & mask;
}
static inline bool arch___test_and_clear_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
- unsigned char ch;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- ch = *addr;
- *addr &= ~(1 << (nr & 7));
- return (ch >> (nr & 7)) & 1;
+ old = *addr;
+ *addr &= ~mask;
+ return old & mask;
}
static inline bool arch___test_and_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
- unsigned char ch;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- ch = *addr;
- *addr ^= 1 << (nr & 7);
- return (ch >> (nr & 7)) & 1;
+ old = *addr;
+ *addr ^= mask;
+ return old & mask;
}
static inline bool arch_test_bit(unsigned long nr,
const volatile unsigned long *ptr)
{
- const volatile unsigned char *addr;
+ const volatile unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- addr = ((const volatile unsigned char *)ptr);
- addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
- return (*addr >> (nr & 7)) & 1;
+ return *addr & mask;
}
static inline bool arch_test_and_set_bit_lock(unsigned long nr,