diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-23 19:46:16 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-23 19:46:16 +0300 |
commit | 032c7ed958174957a4d6eac61806f66e1123d815 (patch) | |
tree | 0468862c64b825b437181700f2e9ba2870a83b1c | |
parent | f9893351acaecf0a414baf9942b48d5bb5c688c6 (diff) | |
parent | 66dd3474702aa98d5844367e1577cdad78ef7c65 (diff) | |
download | linux-032c7ed958174957a4d6eac61806f66e1123d815.tar.xz |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull more arm64 updates from Will Deacon:
"A small selection of further arm64 fixes and updates. Most of these
are fixes that came in during the merge window, with the exception of
the HAVE_MOVE_PMD mremap() speed-up which we discussed back in 2018
and somehow forgot to enable upstream.
- Improve performance of Spectre-v2 mitigation on Falkor CPUs (if
you're lucky enough to have one)
- Select HAVE_MOVE_PMD. This has been shown to improve mremap()
performance, which is used heavily by the Android runtime GC, and
it seems we forgot to enable this upstream back in 2018.
- Ensure linker flags are consistent between LLVM and BFD
- Fix stale comment in Spectre mitigation rework
- Fix broken copyright header
- Fix KASLR randomisation of the linear map
- Prevent arm64-specific prctl()s from compat tasks (return -EINVAL)"
Link: https://lore.kernel.org/kvmarm/20181108181201.88826-3-joelaf@google.com/
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: proton-pack: Update comment to reflect new function name
arm64: spectre-v2: Favour CPU-specific mitigation at EL2
arm64: link with -z norelro regardless of CONFIG_RELOCATABLE
arm64: Fix a broken copyright header in gen_vdso_offsets.sh
arm64: mremap speedup - Enable HAVE_MOVE_PMD
arm64: mm: use single quantity to represent the PA to VA translation
arm64: reject prctl(PR_PAC_RESET_KEYS) on compat tasks
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/Makefile | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/fpsimd.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/pointer_auth.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/proton-pack.c | 40 | ||||
-rwxr-xr-x | arch/arm64/kernel/vdso/gen_vdso_offsets.sh | 2 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 30 |
9 files changed, 43 insertions, 51 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 08fa3a1c50f0..f858c352f72a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -123,6 +123,7 @@ config ARM64 select GENERIC_VDSO_TIME_NS select HANDLE_DOMAIN_IRQ select HARDIRQS_SW_RESEND + select HAVE_MOVE_PMD select HAVE_PCI select HAVE_ACPI_APEI if (ACPI && EFI) select HAVE_ALIGNED_STRUCT_PAGE if SLUB diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 88a44e7326b5..5789c2d18d43 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -10,13 +10,13 @@ # # Copyright (C) 1995-2001 by Russell King -LDFLAGS_vmlinux :=--no-undefined -X +LDFLAGS_vmlinux :=--no-undefined -X -z norelro ifeq ($(CONFIG_RELOCATABLE), y) # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour # for relative relocs, since this leads to better Image compression # with the relocation offsets always being zero. -LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \ +LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \ $(call ld-option, --no-apply-dynamic-relocs) endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 43640d797455..cd61239bae8c 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -169,7 +169,6 @@ extern u64 vabits_actual; #define PAGE_END (_PAGE_END(vabits_actual)) -extern s64 physvirt_offset; extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) @@ -245,7 +244,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) */ #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) -#define __lm_to_phys(addr) (((addr) + physvirt_offset)) +#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __virt_to_phys_nodebug(x) ({ \ @@ -263,7 +262,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) #endif /* CONFIG_DEBUG_VIRTUAL */ -#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) +#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) /* diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index a11bf52e0c38..4ff12a7adcfd 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -24,6 +24,8 @@ #define VMALLOC_START (MODULES_END) #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) +#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) + #define FIRST_USER_ADDRESS 0UL #ifndef __ASSEMBLY__ @@ -34,8 +36,6 @@ #include <linux/mm_types.h> #include <linux/sched.h> -extern struct page *vmemmap; - #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a6d688c10745..062b21f30f94 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -678,7 +678,7 @@ int sve_set_current_vl(unsigned long arg) vl = arg & PR_SVE_VL_LEN_MASK; flags = arg & ~vl; - if (!system_supports_sve()) + if (!system_supports_sve() || is_compat_task()) return -EINVAL; ret = sve_set_vector_length(current, vl, flags); @@ -691,7 +691,7 @@ int sve_set_current_vl(unsigned long arg) /* PR_SVE_GET_VL */ int sve_get_current_vl(void) { - if (!system_supports_sve()) + if (!system_supports_sve() || is_compat_task()) return -EINVAL; return sve_prctl_status(0); diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index 1e77736a4f66..adb955fd9bdd 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/compat.h> #include <linux/errno.h> #include <linux/prctl.h> #include <linux/random.h> @@ -17,6 +18,9 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) if (!system_supports_address_auth() && !system_supports_generic_auth()) return -EINVAL; + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + if (!arg) { ptrauth_keys_init_user(keys); return 0; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 68b710f1b43f..25f3c80b5ffe 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -67,7 +67,8 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2. * - Mitigated in hardware and listed in our "safe list". * - Mitigated in software by firmware. - * - Mitigated in software by a CPU-specific dance in the kernel. + * - Mitigated in software by a CPU-specific dance in the kernel and a + * firmware call at EL2. * - Vulnerable. * * It's not unlikely for different CPUs in a big.LITTLE system to fall into @@ -204,8 +205,8 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn) __SMCCC_WORKAROUND_1_SMC_SZ; /* - * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if - * we're a guest. Skip the hyp-vectors work. + * Vinz Clortho takes the hyp_vecs start/end "keys" at + * the door when we're a guest. Skip the hyp-vectors work. */ if (!is_hyp_mode_available()) { __this_cpu_write(bp_hardening_data.fn, fn); @@ -259,6 +260,16 @@ static void qcom_link_stack_sanitisation(void) : "=&r" (tmp)); } +static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void) +{ + u32 midr = read_cpuid_id(); + if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) && + ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1)) + return NULL; + + return qcom_link_stack_sanitisation; +} + static enum mitigation_state spectre_v2_enable_fw_mitigation(void) { bp_hardening_cb_t cb; @@ -284,26 +295,15 @@ static enum mitigation_state spectre_v2_enable_fw_mitigation(void) return SPECTRE_VULNERABLE; } + /* + * Prefer a CPU-specific workaround if it exists. Note that we + * still rely on firmware for the mitigation at EL2. + */ + cb = spectre_v2_get_sw_mitigation_cb() ?: cb; install_bp_hardening_cb(cb); return SPECTRE_MITIGATED; } -static enum mitigation_state spectre_v2_enable_sw_mitigation(void) -{ - u32 midr; - - if (spectre_v2_mitigations_off()) - return SPECTRE_VULNERABLE; - - midr = read_cpuid_id(); - if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) && - ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1)) - return SPECTRE_VULNERABLE; - - install_bp_hardening_cb(qcom_link_stack_sanitisation); - return SPECTRE_MITIGATED; -} - void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) { enum mitigation_state state; @@ -313,8 +313,6 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) state = spectre_v2_get_cpu_hw_mitigation_state(); if (state == SPECTRE_VULNERABLE) state = spectre_v2_enable_fw_mitigation(); - if (state == SPECTRE_VULNERABLE) - state = spectre_v2_enable_sw_mitigation(); update_mitigation_state(&spectre_v2_state, state); } diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh index 0664acaf61ff..8b806eacd0a6 100755 --- a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh +++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh @@ -8,7 +8,7 @@ # Doing this inside the Makefile will break the $(filter-out) function, # causing Kbuild to rebuild the vdso-offsets header file every time. # -# Author: Will Deacon <will.deacon@arm.com +# Author: Will Deacon <will.deacon@arm.com> # LC_ALL=C diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index a53c1e0fb017..095540667f0f 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -53,12 +53,6 @@ s64 memstart_addr __ro_after_init = -1; EXPORT_SYMBOL(memstart_addr); -s64 physvirt_offset __ro_after_init; -EXPORT_SYMBOL(physvirt_offset); - -struct page *vmemmap __ro_after_init; -EXPORT_SYMBOL(vmemmap); - /* * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of * memory as some devices, namely the Raspberry Pi 4, have peripherals with @@ -289,20 +283,6 @@ void __init arm64_memblock_init(void) memstart_addr = round_down(memblock_start_of_DRAM(), ARM64_MEMSTART_ALIGN); - physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; - - vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); - - /* - * If we are running with a 52-bit kernel VA config on a system that - * does not support it, we have to offset our vmemmap and physvirt_offset - * s.t. we avoid the 52-bit portion of the direct linear map - */ - if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { - vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; - physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); - } - /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be @@ -318,6 +298,16 @@ void __init arm64_memblock_init(void) } /* + * If we are running with a 52-bit kernel VA config on a system that + * does not support it, we have to place the available physical + * memory in the 48-bit addressable part of the linear region, i.e., + * we have to move it upward. Since memstart_addr represents the + * physical address of PAGE_OFFSET, we have to *subtract* from it. + */ + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) + memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); + + /* * Apply the memory limit if it was set. Since the kernel may be loaded * high up in memory, add back the kernel region that must be accessible * via the linear mapping. |