diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/barrier.h | 65 | ||||
-rw-r--r-- | include/asm-generic/bitops/instrumented-atomic.h | 3 | ||||
-rw-r--r-- | include/asm-generic/bitops/instrumented-lock.h | 3 | ||||
-rw-r--r-- | include/asm-generic/error-injection.h | 4 | ||||
-rw-r--r-- | include/asm-generic/futex.h | 31 | ||||
-rw-r--r-- | include/asm-generic/hyperv-tlfs.h | 33 | ||||
-rw-r--r-- | include/asm-generic/logic_io.h | 2 | ||||
-rw-r--r-- | include/asm-generic/mshyperv.h | 6 | ||||
-rw-r--r-- | include/asm-generic/sections.h | 24 |
9 files changed, 92 insertions, 79 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 640f09479bdf..3d503e74037f 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -14,6 +14,7 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> +#include <linux/kcsan-checks.h> #include <asm/rwonce.h> #ifndef nop @@ -21,6 +22,31 @@ #endif /* + * Architectures that want generic instrumentation can define __ prefixed + * variants of all barriers. + */ + +#ifdef __mb +#define mb() do { kcsan_mb(); __mb(); } while (0) +#endif + +#ifdef __rmb +#define rmb() do { kcsan_rmb(); __rmb(); } while (0) +#endif + +#ifdef __wmb +#define wmb() do { kcsan_wmb(); __wmb(); } while (0) +#endif + +#ifdef __dma_rmb +#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) +#endif + +#ifdef __dma_wmb +#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0) +#endif + +/* * Force strict CPU ordering. And yes, this is required on UP too when we're * talking to devices. * @@ -62,15 +88,15 @@ #ifdef CONFIG_SMP #ifndef smp_mb -#define smp_mb() __smp_mb() +#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0) #endif #ifndef smp_rmb -#define smp_rmb() __smp_rmb() +#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) #endif #ifndef smp_wmb -#define smp_wmb() __smp_wmb() +#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) #endif #else /* !CONFIG_SMP */ @@ -123,19 +149,19 @@ do { \ #ifdef CONFIG_SMP #ifndef smp_store_mb -#define smp_store_mb(var, value) __smp_store_mb(var, value) +#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) #endif #ifndef smp_mb__before_atomic -#define smp_mb__before_atomic() __smp_mb__before_atomic() +#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) #endif #ifndef smp_mb__after_atomic -#define smp_mb__after_atomic() __smp_mb__after_atomic() +#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) #endif #ifndef smp_store_release -#define smp_store_release(p, v) __smp_store_release(p, v) +#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #endif #ifndef smp_load_acquire @@ -178,13 +204,13 @@ do { \ #endif /* CONFIG_SMP */ /* Barriers for virtual machine guests when talking to an SMP host */ -#define virt_mb() __smp_mb() -#define virt_rmb() __smp_rmb() -#define virt_wmb() __smp_wmb() -#define virt_store_mb(var, value) __smp_store_mb(var, value) -#define virt_mb__before_atomic() __smp_mb__before_atomic() -#define virt_mb__after_atomic() __smp_mb__after_atomic() -#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0) +#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) +#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) +#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) +#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) +#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) +#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #define virt_load_acquire(p) __smp_load_acquire(p) /** @@ -251,5 +277,16 @@ do { \ #define pmem_wmb() wmb() #endif +/* + * ioremap_wc() maps I/O memory as memory with write-combining attributes. For + * this kind of memory accesses, the CPU may wait for prior accesses to be + * merged with subsequent ones. In some situation, such wait is bad for the + * performance. io_stop_wc() can be used to prevent the merging of + * write-combining memory accesses before this macro with those after it. + */ +#ifndef io_stop_wc +#define io_stop_wc do { } while (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h index 81915dcd4b4e..c90192b1c755 100644 --- a/include/asm-generic/bitops/instrumented-atomic.h +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -67,6 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { + kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit(nr, addr); } @@ -80,6 +81,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { + kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_clear_bit(nr, addr); } @@ -93,6 +95,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { + kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_change_bit(nr, addr); } diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index 75ef606f7145..eb64bd4f11f3 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -22,6 +22,7 @@ */ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { + kcsan_release(); instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } @@ -37,6 +38,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) */ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { + kcsan_release(); instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } @@ -71,6 +73,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { + kcsan_release(); instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_clear_bit_unlock_is_negative_byte(nr, addr); } diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h index 7ddd9dc10ce9..fbca56bd9cbc 100644 --- a/include/asm-generic/error-injection.h +++ b/include/asm-generic/error-injection.h @@ -20,7 +20,7 @@ struct pt_regs; #ifdef CONFIG_FUNCTION_ERROR_INJECTION /* - * Whitelist ganerating macro. Specify functions which can be + * Whitelist generating macro. Specify functions which can be * error-injectable using this macro. */ #define ALLOW_ERROR_INJECTION(fname, _etype) \ @@ -29,7 +29,7 @@ static struct error_injection_entry __used \ _eil_addr_##fname = { \ .addr = (unsigned long)fname, \ .etype = EI_ETYPE_##_etype, \ - }; + } void override_function_with_return(struct pt_regs *regs); #else diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index f4c3470480c7..2a19215baae5 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -6,15 +6,22 @@ #include <linux/uaccess.h> #include <asm/errno.h> +#ifndef futex_atomic_cmpxchg_inatomic #ifndef CONFIG_SMP /* * The following implementation only for uniprocessor machines. * It relies on preempt_disable() ensuring mutual exclusion. * */ +#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \ + futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval) +#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \ + futex_atomic_op_inuser_local(op, oparg, oval, uaddr) +#endif /* CONFIG_SMP */ +#endif /** - * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant * argument and comparison of the previous * futex value with another constant. * @@ -28,7 +35,7 @@ * -ENOSYS - Operation not supported */ static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr) { int oldval, ret; u32 tmp; @@ -75,7 +82,7 @@ out_pagefault_enable: } /** - * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the + * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the * uaddr with newval if the current value is * oldval. * @uval: pointer to store content of @uaddr @@ -87,10 +94,9 @@ out_pagefault_enable: * 0 - On success * -EFAULT - User access resulted in a page fault * -EAGAIN - Atomic operation was unable to complete due to contention - * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) */ static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { u32 val; @@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return 0; } -#else -static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) -{ - return -ENOSYS; -} - -static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) -{ - return -ENOSYS; -} - -#endif /* CONFIG_SMP */ #endif diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index 8ed6733d5146..8f97c2927bee 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -540,39 +540,6 @@ enum hv_interrupt_source { HV_INTERRUPT_SOURCE_IOAPIC, }; -union hv_msi_address_register { - u32 as_uint32; - struct { - u32 reserved1:2; - u32 destination_mode:1; - u32 redirection_hint:1; - u32 reserved2:8; - u32 destination_id:8; - u32 msi_base:12; - }; -} __packed; - -union hv_msi_data_register { - u32 as_uint32; - struct { - u32 vector:8; - u32 delivery_mode:3; - u32 reserved1:3; - u32 level_assert:1; - u32 trigger_mode:1; - u32 reserved2:16; - }; -} __packed; - -/* HvRetargetDeviceInterrupt hypercall */ -union hv_msi_entry { - u64 as_uint64; - struct { - union hv_msi_address_register address; - union hv_msi_data_register data; - } __packed; -}; - union hv_ioapic_rte { u64 as_uint64; diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h index a53116b8c57e..8a59b6e567df 100644 --- a/include/asm-generic/logic_io.h +++ b/include/asm-generic/logic_io.h @@ -34,7 +34,7 @@ void __iomem *ioremap(phys_addr_t offset, size_t size); #define iounmap iounmap -void iounmap(void __iomem *addr); +void iounmap(void volatile __iomem *addr); #define __raw_readb __raw_readb u8 __raw_readb(const volatile void __iomem *addr); diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 3e2248ac328e..c08758b6b364 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -49,8 +49,8 @@ struct ms_hyperv_info { }; extern struct ms_hyperv_info ms_hyperv; -extern void __percpu **hyperv_pcpu_input_arg; -extern void __percpu **hyperv_pcpu_output_arg; +extern void * __percpu *hyperv_pcpu_input_arg; +extern void * __percpu *hyperv_pcpu_output_arg; extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); @@ -269,6 +269,8 @@ bool hv_isolation_type_snp(void); u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size); void hyperv_cleanup(void); bool hv_query_ext_cap(u64 cap_query); +void *hv_map_memory(void *addr, unsigned long size); +void hv_unmap_memory(void *addr); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } static inline bool hv_is_hibernation_supported(void) { return false; } diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 1dfadb2e878d..690f741764e1 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -130,18 +130,24 @@ static inline bool init_section_intersects(void *virt, size_t size) /** * is_kernel_core_data - checks if the pointer address is located in the - * .data section + * .data or .bss section * * @addr: address to check * - * Returns: true if the address is located in .data, false otherwise. + * Returns: true if the address is located in .data or .bss, false otherwise. * Note: On some archs it may return true for core RODATA, and false * for others. But will always be true for core RW data. */ static inline bool is_kernel_core_data(unsigned long addr) { - return addr >= (unsigned long)_sdata && - addr < (unsigned long)_edata; + if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata) + return true; + + if (addr >= (unsigned long)__bss_start && + addr < (unsigned long)__bss_stop) + return true; + + return false; } /** @@ -193,12 +199,16 @@ static inline bool __is_kernel_text(unsigned long addr) * @addr: address to check * * Returns: true if the address is located in the kernel range, false otherwise. - * Note: an internal helper, only check the range of _stext to _end. + * Note: an internal helper, check the range of _stext to _end, + * and range from __init_begin to __init_end, which can be outside + * of the _stext to _end range. */ static inline bool __is_kernel(unsigned long addr) { - return addr >= (unsigned long)_stext && - addr < (unsigned long)_end; + return ((addr >= (unsigned long)_stext && + addr < (unsigned long)_end) || + (addr >= (unsigned long)__init_begin && + addr < (unsigned long)__init_end)); } #endif /* _ASM_GENERIC_SECTIONS_H_ */ |