diff options
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/cpufeatures.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_page_track.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable-3level.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_types.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/pkeys.h | 15 | ||||
-rw-r--r-- | arch/x86/include/asm/purgatory.h | 20 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 70 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_32.h | 127 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 128 |
11 files changed, 80 insertions, 295 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 4e7772387c6e..b04bb6dfed7f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -289,7 +289,8 @@ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */ +#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index d74747b031ec..c4eda791f877 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node { }; void kvm_page_track_init(struct kvm *kvm); +void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont); diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 72277b1028a5..50d35e3185f5 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } -#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ - defined(CONFIG_PARAVIRT)) static inline void native_pud_clear(pud_t *pudp) { } -#endif static inline void pud_clear(pud_t *pudp) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1cfb36b8c024..585ee0d42d18 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); # define set_pud(pudp, pud) native_set_pud(pudp, pud) #endif -#ifndef __PAGETABLE_PMD_FOLDED +#ifndef __PAGETABLE_PUD_FOLDED #define pud_clear(pud) native_pud_clear(pud) #endif diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 8b4de22d6429..62484333673d 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd) } #if CONFIG_PGTABLE_LEVELS > 3 +#include <asm-generic/5level-fixup.h> + typedef struct { pudval_t pud; } pud_t; static inline pud_t native_make_pud(pmdval_t val) @@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud) return pud.pud; } #else +#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopud.h> static inline pudval_t native_pud_val(pud_t pud) @@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) return pmd.pmd; } #else +#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> static inline pmdval_t native_pmd_val(pmd_t pmd) diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 34684adb6899..b3b09b98896d 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { + /* + * "Allocated" pkeys are those that have been returned + * from pkey_alloc(). pkey 0 is special, and never + * returned from pkey_alloc(). + */ + if (pkey <= 0) + return false; + if (pkey >= arch_max_pkey()) + return false; return mm_pkey_allocation_map(mm) & (1U << pkey); } @@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm) static inline int mm_pkey_free(struct mm_struct *mm, int pkey) { - /* - * pkey 0 is special, always allocated and can never - * be freed. - */ - if (!pkey) - return -EINVAL; if (!mm_pkey_is_allocated(mm, pkey)) return -EINVAL; diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h new file mode 100644 index 000000000000..d7da2729903d --- /dev/null +++ b/arch/x86/include/asm/purgatory.h @@ -0,0 +1,20 @@ +#ifndef _ASM_X86_PURGATORY_H +#define _ASM_X86_PURGATORY_H + +#ifndef __ASSEMBLY__ +#include <linux/purgatory.h> + +extern void purgatory(void); +/* + * These forward declarations serve two purposes: + * + * 1) Make sparse happy when checking arch/purgatory + * 2) Document that these are required to be global so the symbol + * lookup in kexec works + */ +extern unsigned long purgatory_backup_dest; +extern unsigned long purgatory_backup_src; +extern unsigned long purgatory_backup_sz; +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PURGATORY_H */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 6fa85944af83..fc5abff9b7fd 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr) static inline void __flush_tlb_all(void) { - if (static_cpu_has(X86_FEATURE_PGE)) + if (boot_cpu_has(X86_FEATURE_PGE)) __flush_tlb_global(); else __flush_tlb(); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index ea148313570f..68766b276d9e 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -3,19 +3,14 @@ /* * User space memory access functions */ -#include <linux/errno.h> #include <linux/compiler.h> #include <linux/kasan-checks.h> -#include <linux/thread_info.h> #include <linux/string.h> #include <asm/asm.h> #include <asm/page.h> #include <asm/smap.h> #include <asm/extable.h> -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with @@ -384,6 +379,18 @@ do { \ : "=r" (err), ltype(x) \ : "m" (__m(addr)), "i" (errret), "0" (err)) +#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ + asm volatile("\n" \ + "1: mov"itype" %2,%"rtype"1\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (err), ltype(x) \ + : "m" (__m(addr)), "i" (errret), "0" (err)) + /* * This doesn't do __uaccess_begin/end - the exception handling * around it must do that. @@ -675,59 +682,6 @@ extern struct movsl_mask { # include <asm/uaccess_64.h> #endif -unsigned long __must_check _copy_from_user(void *to, const void __user *from, - unsigned n); -unsigned long __must_check _copy_to_user(void __user *to, const void *from, - unsigned n); - -extern void __compiletime_error("usercopy buffer size is too small") -__bad_copy_user(void); - -static inline void copy_user_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - -static __always_inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - int sz = __compiletime_object_size(to); - - might_fault(); - - kasan_check_write(to, n); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(to, n, false); - n = _copy_from_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - return n; -} - -static __always_inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - int sz = __compiletime_object_size(from); - - kasan_check_read(from, n); - - might_fault(); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(from, n, true); - n = _copy_to_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - return n; -} - /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 7d3bdd1ed697..aeda9bb8af50 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -4,149 +4,52 @@ /* * User space memory access functions */ -#include <linux/errno.h> -#include <linux/thread_info.h> #include <linux/string.h> #include <asm/asm.h> #include <asm/page.h> -unsigned long __must_check __copy_to_user_ll - (void __user *to, const void *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll - (void *to, const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nozero - (void *to, const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache - (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_user_ll + (void *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nocache_nozero (void *to, const void __user *from, unsigned long n); -/** - * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * The caller should also make sure he pins the user space address - * so that we don't result in page fault and sleep. - */ -static __always_inline unsigned long __must_check -__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) -{ - check_object_size(from, n, true); - return __copy_to_user_ll(to, from, n); -} - -/** - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ static __always_inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - might_fault(); - return __copy_to_user_inatomic(to, from, n); + return __copy_user_ll((__force void *)to, from, n); } static __always_inline unsigned long -__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) -{ - return __copy_from_user_ll_nozero(to, from, n); -} - -/** - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - * - * An alternate version - __copy_from_user_inatomic() - may be called from - * atomic context and will fail rather than sleep. In this case the - * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h - * for explanation of why this is needed. - */ -static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - might_fault(); - check_object_size(to, n, false); - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __uaccess_begin(); - __get_user_size(*(u8 *)to, from, 1, ret, 1); - __uaccess_end(); - return ret; - case 2: - __uaccess_begin(); - __get_user_size(*(u16 *)to, from, 2, ret, 2); - __uaccess_end(); - return ret; - case 4: - __uaccess_begin(); - __get_user_size(*(u32 *)to, from, 4, ret, 4); - __uaccess_end(); - return ret; - } - } - return __copy_from_user_ll(to, from, n); -} - -static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - might_fault(); if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: + ret = 0; __uaccess_begin(); - __get_user_size(*(u8 *)to, from, 1, ret, 1); + __get_user_asm_nozero(*(u8 *)to, from, ret, + "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: + ret = 0; __uaccess_begin(); - __get_user_size(*(u16 *)to, from, 2, ret, 2); + __get_user_asm_nozero(*(u16 *)to, from, ret, + "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: + ret = 0; __uaccess_begin(); - __get_user_size(*(u32 *)to, from, 4, ret, 4); + __get_user_asm_nozero(*(u32 *)to, from, ret, + "l", "k", "=r", 4); __uaccess_end(); return ret; } } - return __copy_from_user_ll_nocache(to, from, n); + return __copy_user_ll(to, (__force const void *)from, n); } static __always_inline unsigned long diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 673059a109fe..c5504b9a472e 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -5,7 +5,6 @@ * User space memory access functions */ #include <linux/compiler.h> -#include <linux/errno.h> #include <linux/lockdep.h> #include <linux/kasan-checks.h> #include <asm/alternative.h> @@ -46,58 +45,54 @@ copy_user_generic(void *to, const void *from, unsigned len) return ret; } -__must_check unsigned long -copy_in_user(void __user *to, const void __user *from, unsigned len); - -static __always_inline __must_check -int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) +static __always_inline __must_check unsigned long +raw_copy_from_user(void *dst, const void __user *src, unsigned long size) { int ret = 0; - check_object_size(dst, size, false); if (!__builtin_constant_p(size)) return copy_user_generic(dst, (__force void *)src, size); switch (size) { case 1: __uaccess_begin(); - __get_user_asm(*(u8 *)dst, (u8 __user *)src, + __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: __uaccess_begin(); - __get_user_asm(*(u16 *)dst, (u16 __user *)src, + __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: __uaccess_begin(); - __get_user_asm(*(u32 *)dst, (u32 __user *)src, + __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, ret, "l", "k", "=r", 4); __uaccess_end(); return ret; case 8: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 8); __uaccess_end(); return ret; case 10: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 10); if (likely(!ret)) - __get_user_asm(*(u16 *)(8 + (char *)dst), + __get_user_asm_nozero(*(u16 *)(8 + (char *)dst), (u16 __user *)(8 + (char __user *)src), ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 16: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 16); if (likely(!ret)) - __get_user_asm(*(u64 *)(8 + (char *)dst), + __get_user_asm_nozero(*(u64 *)(8 + (char *)dst), (u64 __user *)(8 + (char __user *)src), ret, "q", "", "=r", 8); __uaccess_end(); @@ -107,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) } } -static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) -{ - might_fault(); - kasan_check_write(dst, size); - return __copy_from_user_nocheck(dst, src, size); -} - -static __always_inline __must_check -int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) +static __always_inline __must_check unsigned long +raw_copy_to_user(void __user *dst, const void *src, unsigned long size) { int ret = 0; - check_object_size(src, size, true); if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst, src, size); switch (size) { @@ -176,100 +162,16 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) -{ - might_fault(); - kasan_check_read(src, size); - return __copy_to_user_nocheck(dst, src, size); -} - -static __always_inline __must_check -int __copy_in_user(void __user *dst, const void __user *src, unsigned size) -{ - int ret = 0; - - might_fault(); - if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst, - (__force void *)src, size); - switch (size) { - case 1: { - u8 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u8 __user *)src, - ret, "b", "b", "=q", 1); - if (likely(!ret)) - __put_user_asm(tmp, (u8 __user *)dst, - ret, "b", "b", "iq", 1); - __uaccess_end(); - return ret; - } - case 2: { - u16 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u16 __user *)src, - ret, "w", "w", "=r", 2); - if (likely(!ret)) - __put_user_asm(tmp, (u16 __user *)dst, - ret, "w", "w", "ir", 2); - __uaccess_end(); - return ret; - } - - case 4: { - u32 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u32 __user *)src, - ret, "l", "k", "=r", 4); - if (likely(!ret)) - __put_user_asm(tmp, (u32 __user *)dst, - ret, "l", "k", "ir", 4); - __uaccess_end(); - return ret; - } - case 8: { - u64 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u64 __user *)src, - ret, "q", "", "=r", 8); - if (likely(!ret)) - __put_user_asm(tmp, (u64 __user *)dst, - ret, "q", "", "er", 8); - __uaccess_end(); - return ret; - } - default: - return copy_user_generic((__force void *)dst, - (__force void *)src, size); - } -} - -static __must_check __always_inline int -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) -{ - kasan_check_write(dst, size); - return __copy_from_user_nocheck(dst, src, size); -} - -static __must_check __always_inline int -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) +unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size) { - kasan_check_read(src, size); - return __copy_to_user_nocheck(dst, src, size); + return copy_user_generic((__force void *)dst, + (__force void *)src, size); } extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); static inline int -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) -{ - might_fault(); - kasan_check_write(dst, size); - return __copy_user_nocache(dst, src, size, 1); -} - -static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { |