diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/barrier.h | 65 | ||||
-rw-r--r-- | include/asm-generic/bitops.h | 1 | ||||
-rw-r--r-- | include/asm-generic/bitops/find.h | 188 | ||||
-rw-r--r-- | include/asm-generic/bitops/instrumented-atomic.h | 3 | ||||
-rw-r--r-- | include/asm-generic/bitops/instrumented-lock.h | 3 | ||||
-rw-r--r-- | include/asm-generic/bitops/le.h | 64 | ||||
-rw-r--r-- | include/asm-generic/error-injection.h | 4 | ||||
-rw-r--r-- | include/asm-generic/futex.h | 31 | ||||
-rw-r--r-- | include/asm-generic/hyperv-tlfs.h | 33 | ||||
-rw-r--r-- | include/asm-generic/logic_io.h | 2 | ||||
-rw-r--r-- | include/asm-generic/mshyperv.h | 6 | ||||
-rw-r--r-- | include/asm-generic/pgalloc.h | 24 | ||||
-rw-r--r-- | include/asm-generic/sections.h | 24 |
13 files changed, 110 insertions, 338 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 640f09479bdf..fd7e8fbaeef1 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -14,6 +14,7 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> +#include <linux/kcsan-checks.h> #include <asm/rwonce.h> #ifndef nop @@ -21,6 +22,31 @@ #endif /* + * Architectures that want generic instrumentation can define __ prefixed + * variants of all barriers. + */ + +#ifdef __mb +#define mb() do { kcsan_mb(); __mb(); } while (0) +#endif + +#ifdef __rmb +#define rmb() do { kcsan_rmb(); __rmb(); } while (0) +#endif + +#ifdef __wmb +#define wmb() do { kcsan_wmb(); __wmb(); } while (0) +#endif + +#ifdef __dma_rmb +#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) +#endif + +#ifdef __dma_wmb +#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0) +#endif + +/* * Force strict CPU ordering. And yes, this is required on UP too when we're * talking to devices. * @@ -62,15 +88,15 @@ #ifdef CONFIG_SMP #ifndef smp_mb -#define smp_mb() __smp_mb() +#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0) #endif #ifndef smp_rmb -#define smp_rmb() __smp_rmb() +#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) #endif #ifndef smp_wmb -#define smp_wmb() __smp_wmb() +#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) #endif #else /* !CONFIG_SMP */ @@ -123,19 +149,19 @@ do { \ #ifdef CONFIG_SMP #ifndef smp_store_mb -#define smp_store_mb(var, value) __smp_store_mb(var, value) +#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) #endif #ifndef smp_mb__before_atomic -#define smp_mb__before_atomic() __smp_mb__before_atomic() +#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) #endif #ifndef smp_mb__after_atomic -#define smp_mb__after_atomic() __smp_mb__after_atomic() +#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) #endif #ifndef smp_store_release -#define smp_store_release(p, v) __smp_store_release(p, v) +#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #endif #ifndef smp_load_acquire @@ -178,13 +204,13 @@ do { \ #endif /* CONFIG_SMP */ /* Barriers for virtual machine guests when talking to an SMP host */ -#define virt_mb() __smp_mb() -#define virt_rmb() __smp_rmb() -#define virt_wmb() __smp_wmb() -#define virt_store_mb(var, value) __smp_store_mb(var, value) -#define virt_mb__before_atomic() __smp_mb__before_atomic() -#define virt_mb__after_atomic() __smp_mb__after_atomic() -#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0) +#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) +#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) +#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) +#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) +#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) +#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #define virt_load_acquire(p) __smp_load_acquire(p) /** @@ -251,5 +277,16 @@ do { \ #define pmem_wmb() wmb() #endif +/* + * ioremap_wc() maps I/O memory as memory with write-combining attributes. For + * this kind of memory accesses, the CPU may wait for prior accesses to be + * merged with subsequent ones. In some situation, such wait is bad for the + * performance. io_stop_wc() can be used to prevent the merging of + * write-combining memory accesses before this macro with those after it. + */ +#ifndef io_stop_wc +#define io_stop_wc() do { } while (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index df9b5bc3d282..a47b8a71d6fe 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -20,7 +20,6 @@ #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> -#include <asm-generic/bitops/find.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h deleted file mode 100644 index 0d132ee2a291..000000000000 --- a/include/asm-generic/bitops/find.h +++ /dev/null @@ -1,188 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_BITOPS_FIND_H_ -#define _ASM_GENERIC_BITOPS_FIND_H_ - -extern unsigned long _find_next_bit(const unsigned long *addr1, - const unsigned long *addr2, unsigned long nbits, - unsigned long start, unsigned long invert, unsigned long le); -extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); -extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); -extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); - -#ifndef find_next_bit -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - * - * Returns the bit number for the next set bit - * If no bits are set, returns @size. - */ -static inline -unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr, NULL, size, offset, 0UL, 0); -} -#endif - -#ifndef find_next_and_bit -/** - * find_next_and_bit - find the next set bit in both memory regions - * @addr1: The first address to base the search on - * @addr2: The second address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - * - * Returns the bit number for the next set bit - * If no bits are set, returns @size. - */ -static inline -unsigned long find_next_and_bit(const unsigned long *addr1, - const unsigned long *addr2, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr1 & *addr2 & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); -} -#endif - -#ifndef find_next_zero_bit -/** - * find_next_zero_bit - find the next cleared bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - * - * Returns the bit number of the next zero bit - * If no bits are zero, returns @size. - */ -static inline -unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr | ~GENMASK(size - 1, offset); - return val == ~0UL ? size : ffz(val); - } - - return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); -} -#endif - -#ifdef CONFIG_GENERIC_FIND_FIRST_BIT - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum number of bits to search - * - * Returns the bit number of the first set bit. - * If no bits are set, returns @size. - */ -static inline -unsigned long find_first_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr & GENMASK(size - 1, 0); - - return val ? __ffs(val) : size; - } - - return _find_first_bit(addr, size); -} - -/** - * find_first_zero_bit - find the first cleared bit in a memory region - * @addr: The address to start the search at - * @size: The maximum number of bits to search - * - * Returns the bit number of the first cleared bit. - * If no bits are zero, returns @size. - */ -static inline -unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr | ~GENMASK(size - 1, 0); - - return val == ~0UL ? size : ffz(val); - } - - return _find_first_zero_bit(addr, size); -} -#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ - -#ifndef find_first_bit -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) -#endif -#ifndef find_first_zero_bit -#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) -#endif - -#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ - -#ifndef find_last_bit -/** - * find_last_bit - find the last set bit in a memory region - * @addr: The address to start the search at - * @size: The number of bits to search - * - * Returns the bit number of the last set bit, or size. - */ -static inline -unsigned long find_last_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr & GENMASK(size - 1, 0); - - return val ? __fls(val) : size; - } - - return _find_last_bit(addr, size); -} -#endif - -/** - * find_next_clump8 - find next 8-bit clump with set bits in a memory region - * @clump: location to store copy of found clump - * @addr: address to base the search on - * @size: bitmap size in number of bits - * @offset: bit offset at which to start searching - * - * Returns the bit offset for the next set clump; the found clump value is - * copied to the location pointed by @clump. If no bits are set, returns @size. - */ -extern unsigned long find_next_clump8(unsigned long *clump, - const unsigned long *addr, - unsigned long size, unsigned long offset); - -#define find_first_clump8(clump, bits, size) \ - find_next_clump8((clump), (bits), (size), 0) - -#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h index 81915dcd4b4e..c90192b1c755 100644 --- a/include/asm-generic/bitops/instrumented-atomic.h +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -67,6 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { + kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit(nr, addr); } @@ -80,6 +81,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { + kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_clear_bit(nr, addr); } @@ -93,6 +95,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { + kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_change_bit(nr, addr); } diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index 75ef606f7145..eb64bd4f11f3 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -22,6 +22,7 @@ */ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { + kcsan_release(); instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } @@ -37,6 +38,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) */ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { + kcsan_release(); instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } @@ -71,6 +73,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { + kcsan_release(); instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_clear_bit_unlock_is_negative_byte(nr, addr); } diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 5a28629cbf4d..d51beff60375 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -2,83 +2,19 @@ #ifndef _ASM_GENERIC_BITOPS_LE_H_ #define _ASM_GENERIC_BITOPS_LE_H_ -#include <asm-generic/bitops/find.h> #include <asm/types.h> #include <asm/byteorder.h> -#include <linux/swab.h> #if defined(__LITTLE_ENDIAN) #define BITOP_LE_SWIZZLE 0 -static inline unsigned long find_next_zero_bit_le(const void *addr, - unsigned long size, unsigned long offset) -{ - return find_next_zero_bit(addr, size, offset); -} - -static inline unsigned long find_next_bit_le(const void *addr, - unsigned long size, unsigned long offset) -{ - return find_next_bit(addr, size, offset); -} - -static inline unsigned long find_first_zero_bit_le(const void *addr, - unsigned long size) -{ - return find_first_zero_bit(addr, size); -} - #elif defined(__BIG_ENDIAN) #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) -#ifndef find_next_zero_bit_le -static inline -unsigned long find_next_zero_bit_le(const void *addr, unsigned - long size, unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val = *(const unsigned long *)addr; - - if (unlikely(offset >= size)) - return size; - - val = swab(val) | ~GENMASK(size - 1, offset); - return val == ~0UL ? size : ffz(val); - } - - return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); -} -#endif - -#ifndef find_next_bit_le -static inline -unsigned long find_next_bit_le(const void *addr, unsigned - long size, unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val = *(const unsigned long *)addr; - - if (unlikely(offset >= size)) - return size; - - val = swab(val) & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr, NULL, size, offset, 0UL, 1); -} #endif -#ifndef find_first_zero_bit_le -#define find_first_zero_bit_le(addr, size) \ - find_next_zero_bit_le((addr), (size), 0) -#endif - -#else -#error "Please fix <asm/byteorder.h>" -#endif static inline int test_bit_le(int nr, const void *addr) { diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h index 7ddd9dc10ce9..fbca56bd9cbc 100644 --- a/include/asm-generic/error-injection.h +++ b/include/asm-generic/error-injection.h @@ -20,7 +20,7 @@ struct pt_regs; #ifdef CONFIG_FUNCTION_ERROR_INJECTION /* - * Whitelist ganerating macro. Specify functions which can be + * Whitelist generating macro. Specify functions which can be * error-injectable using this macro. */ #define ALLOW_ERROR_INJECTION(fname, _etype) \ @@ -29,7 +29,7 @@ static struct error_injection_entry __used \ _eil_addr_##fname = { \ .addr = (unsigned long)fname, \ .etype = EI_ETYPE_##_etype, \ - }; + } void override_function_with_return(struct pt_regs *regs); #else diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index f4c3470480c7..2a19215baae5 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -6,15 +6,22 @@ #include <linux/uaccess.h> #include <asm/errno.h> +#ifndef futex_atomic_cmpxchg_inatomic #ifndef CONFIG_SMP /* * The following implementation only for uniprocessor machines. * It relies on preempt_disable() ensuring mutual exclusion. * */ +#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \ + futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval) +#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \ + futex_atomic_op_inuser_local(op, oparg, oval, uaddr) +#endif /* CONFIG_SMP */ +#endif /** - * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant * argument and comparison of the previous * futex value with another constant. * @@ -28,7 +35,7 @@ * -ENOSYS - Operation not supported */ static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr) { int oldval, ret; u32 tmp; @@ -75,7 +82,7 @@ out_pagefault_enable: } /** - * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the + * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the * uaddr with newval if the current value is * oldval. * @uval: pointer to store content of @uaddr @@ -87,10 +94,9 @@ out_pagefault_enable: * 0 - On success * -EFAULT - User access resulted in a page fault * -EAGAIN - Atomic operation was unable to complete due to contention - * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) */ static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { u32 val; @@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return 0; } -#else -static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) -{ - return -ENOSYS; -} - -static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) -{ - return -ENOSYS; -} - -#endif /* CONFIG_SMP */ #endif diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index 8ed6733d5146..8f97c2927bee 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -540,39 +540,6 @@ enum hv_interrupt_source { HV_INTERRUPT_SOURCE_IOAPIC, }; -union hv_msi_address_register { - u32 as_uint32; - struct { - u32 reserved1:2; - u32 destination_mode:1; - u32 redirection_hint:1; - u32 reserved2:8; - u32 destination_id:8; - u32 msi_base:12; - }; -} __packed; - -union hv_msi_data_register { - u32 as_uint32; - struct { - u32 vector:8; - u32 delivery_mode:3; - u32 reserved1:3; - u32 level_assert:1; - u32 trigger_mode:1; - u32 reserved2:16; - }; -} __packed; - -/* HvRetargetDeviceInterrupt hypercall */ -union hv_msi_entry { - u64 as_uint64; - struct { - union hv_msi_address_register address; - union hv_msi_data_register data; - } __packed; -}; - union hv_ioapic_rte { u64 as_uint64; diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h index a53116b8c57e..8a59b6e567df 100644 --- a/include/asm-generic/logic_io.h +++ b/include/asm-generic/logic_io.h @@ -34,7 +34,7 @@ void __iomem *ioremap(phys_addr_t offset, size_t size); #define iounmap iounmap -void iounmap(void __iomem *addr); +void iounmap(void volatile __iomem *addr); #define __raw_readb __raw_readb u8 __raw_readb(const volatile void __iomem *addr); diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 3e2248ac328e..c08758b6b364 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -49,8 +49,8 @@ struct ms_hyperv_info { }; extern struct ms_hyperv_info ms_hyperv; -extern void __percpu **hyperv_pcpu_input_arg; -extern void __percpu **hyperv_pcpu_output_arg; +extern void * __percpu *hyperv_pcpu_input_arg; +extern void * __percpu *hyperv_pcpu_output_arg; extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); @@ -269,6 +269,8 @@ bool hv_isolation_type_snp(void); u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size); void hyperv_cleanup(void); bool hv_query_ext_cap(u64 cap_query); +void *hv_map_memory(void *addr, unsigned long size); +void hv_unmap_memory(void *addr); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } static inline bool hv_is_hibernation_supported(void) { return false; } diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 02932efad3ab..977bea16cf1b 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -147,6 +147,15 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #if CONFIG_PGTABLE_LEVELS > 3 +static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + gfp_t gfp = GFP_PGTABLE_USER; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + return (pud_t *)get_zeroed_page(gfp); +} + #ifndef __HAVE_ARCH_PUD_ALLOC_ONE /** * pud_alloc_one - allocate a page for PUD-level page table @@ -159,20 +168,23 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) */ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - gfp_t gfp = GFP_PGTABLE_USER; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - return (pud_t *)get_zeroed_page(gfp); + return __pud_alloc_one(mm, addr); } #endif -static inline void pud_free(struct mm_struct *mm, pud_t *pud) +static inline void __pud_free(struct mm_struct *mm, pud_t *pud) { BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); free_page((unsigned long)pud); } +#ifndef __HAVE_ARCH_PUD_FREE +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + __pud_free(mm, pud); +} +#endif + #endif /* CONFIG_PGTABLE_LEVELS > 3 */ #ifndef __HAVE_ARCH_PGD_FREE diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 1dfadb2e878d..690f741764e1 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -130,18 +130,24 @@ static inline bool init_section_intersects(void *virt, size_t size) /** * is_kernel_core_data - checks if the pointer address is located in the - * .data section + * .data or .bss section * * @addr: address to check * - * Returns: true if the address is located in .data, false otherwise. + * Returns: true if the address is located in .data or .bss, false otherwise. * Note: On some archs it may return true for core RODATA, and false * for others. But will always be true for core RW data. */ static inline bool is_kernel_core_data(unsigned long addr) { - return addr >= (unsigned long)_sdata && - addr < (unsigned long)_edata; + if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata) + return true; + + if (addr >= (unsigned long)__bss_start && + addr < (unsigned long)__bss_stop) + return true; + + return false; } /** @@ -193,12 +199,16 @@ static inline bool __is_kernel_text(unsigned long addr) * @addr: address to check * * Returns: true if the address is located in the kernel range, false otherwise. - * Note: an internal helper, only check the range of _stext to _end. + * Note: an internal helper, check the range of _stext to _end, + * and range from __init_begin to __init_end, which can be outside + * of the _stext to _end range. */ static inline bool __is_kernel(unsigned long addr) { - return addr >= (unsigned long)_stext && - addr < (unsigned long)_end; + return ((addr >= (unsigned long)_stext && + addr < (unsigned long)_end) || + (addr >= (unsigned long)__init_begin && + addr < (unsigned long)__init_end)); } #endif /* _ASM_GENERIC_SECTIONS_H_ */ |