diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-29 03:34:54 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-29 03:34:54 +0300 |
commit | 542034175ca715d28522a59c1f88a58349f2765c (patch) | |
tree | 9be4252d2769109fb0b49a653a6e754df963935b /arch/arm64 | |
parent | bb2d9e5448ab9e86ecc79d8984dc6d3a8a9fba1e (diff) | |
parent | e1df27213941725962f68c64349bbcc60ece0314 (diff) | |
download | linux-542034175ca715d28522a59c1f88a58349f2765c.tar.xz |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"I think we have a bit less than usual on the architecture side, but
that's somewhat balanced out by a large crop of perf/PMU driver
updates and extensions to our selftests.
CPU features and system registers:
- Advertise hinted conditional branch support (FEAT_HBC) to userspace
- Avoid false positive "SANITY CHECK" warning when xCR registers
differ outside of the length field
Documentation:
- Fix macro name typo in SME documentation
Entry code:
- Unmask exceptions earlier on the system call entry path
Memory management:
- Don't bother clearing PTE_RDONLY for dirty ptes in pte_wrprotect()
and pte_modify()
Perf and PMU drivers:
- Initial support for Coresight TRBE devices on ACPI systems (the
coresight driver changes will come later)
- Fix hw_breakpoint single-stepping when called from bpf
- Fixes for DDR PMU on i.MX8MP SoC
- Add NUMA-awareness to Hisilicon PCIe PMU driver
- Fix locking dependency issue in Arm DMC620 PMU driver
- Workaround Hisilicon erratum 162001900 in the SMMUv3 PMU driver
- Add support for Arm CMN-700 r3 parts to the CMN PMU driver
- Add support for recent Arm Cortex CPU PMUs
- Update Hisilicon PMU maintainers
Selftests:
- Add a bunch of new features to the hwcap test (JSCVT, PMULL, AES,
SHA1, etc)
- Fix SSVE test to leave streaming-mode after grabbing the signal
context
- Add new test for SVE vector-length changes with SME enabled
Miscellaneous:
- Allow compiler to warn on suspicious looking system register
expressions
- Work around SDEI firmware bug by aborting any running handlers on a
kernel crash
- Fix some harmless warnings when building with W=1
- Remove some unused function declarations
- Other minor fixes and cleanup"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (62 commits)
drivers/perf: hisi: Update HiSilicon PMU maintainers
arm_pmu: acpi: Add a representative platform device for TRBE
arm_pmu: acpi: Refactor arm_spe_acpi_register_device()
kselftest/arm64: Fix hwcaps selftest build
hw_breakpoint: fix single-stepping when using bpf_overflow_handler
arm64/sysreg: refactor deprecated strncpy
kselftest/arm64: add jscvt feature to hwcap test
kselftest/arm64: add pmull feature to hwcap test
kselftest/arm64: add AES feature check to hwcap test
kselftest/arm64: add SHA1 and related features to hwcap test
arm64: sysreg: Generate C compiler warnings on {read,write}_sysreg_s arguments
kselftest/arm64: build BTI tests in output directory
perf/imx_ddr: don't enable counter0 if none of 4 counters are used
perf/imx_ddr: speed up overflow frequency of cycle
drivers/perf: hisi: Schedule perf session according to locality
kselftest/arm64: fix a memleak in zt_regs_run()
perf/arm-dmc620: Fix dmc620_pmu_irqs_lock/cpu_hotplug_lock circular lock dependency
perf/smmuv3: Add MODULE_ALIAS for module auto loading
perf/smmuv3: Enable HiSilicon Erratum 162001900 quirk for HIP08/09
kselftest/arm64: Size sycall-abi buffers for the actual maximum VL
...
Diffstat (limited to 'arch/arm64')
26 files changed, 134 insertions, 112 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a2511b30d0f6..29db061db9bb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1793,9 +1793,6 @@ config ARM64_PAN The feature is detected at runtime, and will remain as a 'nop' instruction if the cpu does not implement the feature. -config AS_HAS_LDAPR - def_bool $(as-instr,.arch_extension rcpc) - config AS_HAS_LSE_ATOMICS def_bool $(as-instr,.arch_extension lse) @@ -1933,6 +1930,9 @@ config AS_HAS_ARMV8_3 config AS_HAS_CFI_NEGATE_RA_STATE def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n) +config AS_HAS_LDAPR + def_bool $(as-instr,.arch_extension rcpc) + endmenu # "ARMv8.3 architectural features" menu "ARMv8.4 architectural features" diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index bd68e1b7f29f..4d537d56eb84 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -42,6 +42,9 @@ #define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \ spe_interrupt) + sizeof(u16)) +#define ACPI_MADT_GICC_TRBE (offsetof(struct acpi_madt_generic_interrupt, \ + trbe_interrupt) + sizeof(u16)) + /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI pgprot_t __acpi_get_mem_attribute(phys_addr_t addr); diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 692b1ec663b2..521267478d18 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -138,6 +138,7 @@ #define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16) #define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16) #define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS) +#define KERNEL_HWCAP_HBC __khwcap2_feature(HBC) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 577773870b66..85d26143faa5 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -118,31 +118,4 @@ #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY) #endif -/* - * To make optimal use of block mappings when laying out the linear - * mapping, round down the base of physical memory to a size that can - * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE - * (64k granule), or a multiple that can be mapped using contiguous bits - * in the page tables: 32 * PMD_SIZE (16k granule) - */ -#if defined(CONFIG_ARM64_4K_PAGES) -#define ARM64_MEMSTART_SHIFT PUD_SHIFT -#elif defined(CONFIG_ARM64_16K_PAGES) -#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT -#else -#define ARM64_MEMSTART_SHIFT PMD_SHIFT -#endif - -/* - * sparsemem vmemmap imposes an additional requirement on the alignment of - * memstart_addr, due to the fact that the base of the vmemmap region - * has a direct correspondence, and needs to appear sufficiently aligned - * in the virtual address space. - */ -#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS -#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) -#else -#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) -#endif - #endif /* __ASM_KERNEL_PGTABLE_H */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 4384eaa0aeb7..94b68850cb9f 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -64,7 +64,6 @@ extern void arm64_memblock_init(void); extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); -extern void init_mem_pgprot(void); extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0bd18de9fd97..72c2e8431360 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -103,6 +103,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) +#define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) @@ -120,7 +121,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) (__boundary - 1 < (end) - 1) ? __boundary : (end); \ }) -#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) +#define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) @@ -212,7 +213,7 @@ static inline pte_t pte_wrprotect(pte_t pte) * clear), set the PTE_DIRTY bit. */ if (pte_hw_dirty(pte)) - pte = pte_mkdirty(pte); + pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); @@ -823,7 +824,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) PTE_ATTRINDX_MASK; /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) - pte = pte_mkdirty(pte); + pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index 4292d9bafb9d..484cb6972e99 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -17,6 +17,9 @@ #include <asm/virt.h> +DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); +DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); + extern unsigned long sdei_exit_mode; /* Software Delegated Exception entry point from firmware*/ @@ -29,6 +32,9 @@ asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num, unsigned long pc, unsigned long pstate); +/* Abort a running handler. Context is discarded. */ +void __sdei_handler_abort(void); + /* * The above entry point does the minimum to call C code. This function does * anything else, before calling the driver. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b481935e9314..16464bf9a8aa 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -803,15 +803,21 @@ /* * For registers without architectural names, or simply unsupported by * GAS. + * + * __check_r forces warnings to be generated by the compiler when + * evaluating r which wouldn't normally happen due to being passed to + * the assembler via __stringify(r). */ #define read_sysreg_s(r) ({ \ u64 __val; \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ __val; \ }) #define write_sysreg_s(v, r) do { \ u64 __val = (u64)(v); \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ } while (0) diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index a2cac4305b1e..53026f45a509 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -103,5 +103,6 @@ #define HWCAP2_SME_B16B16 (1UL << 41) #define HWCAP2_SME_F16F16 (1UL << 42) #define HWCAP2_MOPS (1UL << 43) +#define HWCAP2_HBC (1UL << 44) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f9d456fe132d..a5f533f63b60 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -222,7 +222,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0), @@ -2708,12 +2708,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .desc = "Enhanced Virtualization Traps", .capability = ARM64_HAS_EVT, .type = ARM64_CPUCAP_SYSTEM_FEATURE, - .sys_reg = SYS_ID_AA64MMFR2_EL1, - .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_EL1_EVT_SHIFT, - .field_width = 4, - .min_field_value = ID_AA64MMFR2_EL1_EVT_IMP, .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) }, {}, }; @@ -2844,6 +2840,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS), + HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), #ifdef CONFIG_ARM64_SME HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index d1f68599c29f..f372295207fb 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -9,8 +9,6 @@ #include <linux/acpi.h> #include <linux/cpuidle.h> #include <linux/cpu_pm.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/psci.h> #ifdef CONFIG_ACPI_PROCESSOR_IDLE diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 58622dc85917..98fda8500535 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -126,6 +126,7 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_SME_B16B16] = "smeb16b16", [KERNEL_HWCAP_SME_F16F16] = "smef16f16", [KERNEL_HWCAP_MOPS] = "mops", + [KERNEL_HWCAP_HBC] = "hbc", }; #ifdef CONFIG_COMPAT diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 6b2e0c367702..0fc94207e69a 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -355,6 +355,35 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) } #endif /* CONFIG_ARM64_ERRATUM_1463225 */ +/* + * As per the ABI exit SME streaming mode and clear the SVE state not + * shared with FPSIMD on syscall entry. + */ +static inline void fp_user_discard(void) +{ + /* + * If SME is active then exit streaming mode. If ZA is active + * then flush the SVE registers but leave userspace access to + * both SVE and SME enabled, otherwise disable SME for the + * task and fall through to disabling SVE too. This means + * that after a syscall we never have any streaming mode + * register state to track, if this changes the KVM code will + * need updating. + */ + if (system_supports_sme()) + sme_smstop_sm(); + + if (!system_supports_sve()) + return; + + if (test_thread_flag(TIF_SVE)) { + unsigned int sve_vq_minus_one; + + sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; + sve_flush_live(true, sve_vq_minus_one); + } +} + UNHANDLED(el1t, 64, sync) UNHANDLED(el1t, 64, irq) UNHANDLED(el1t, 64, fiq) @@ -644,6 +673,8 @@ static void noinstr el0_svc(struct pt_regs *regs) { enter_from_user_mode(regs); cortex_a76_erratum_1463225_svc_handler(); + fp_user_discard(); + local_daif_restore(DAIF_PROCCTX); do_el0_svc(regs); exit_to_user_mode(regs); } @@ -783,6 +814,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs) { enter_from_user_mode(regs); cortex_a76_erratum_1463225_svc_handler(); + local_daif_restore(DAIF_PROCCTX); do_el0_svc_compat(regs); exit_to_user_mode(regs); } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a40e5e50fa55..6ad61de03d0a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -986,9 +986,13 @@ SYM_CODE_START(__sdei_asm_handler) mov x19, x1 -#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK) + /* Store the registered-event for crash_smp_send_stop() */ ldrb w4, [x19, #SDEI_EVENT_PRIORITY] -#endif + cbnz w4, 1f + adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 + b 2f +1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 +2: str x19, [x5] #ifdef CONFIG_VMAP_STACK /* @@ -1055,6 +1059,14 @@ SYM_CODE_START(__sdei_asm_handler) ldr_l x2, sdei_exit_mode + /* Clear the registered-event seen by crash_smp_send_stop() */ + ldrb w3, [x4, #SDEI_EVENT_PRIORITY] + cbnz w3, 1f + adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 + b 2f +1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 +2: str xzr, [x5] + alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 sdei_handler_exit exit_mode=x2 alternative_else_nop_endif @@ -1065,4 +1077,15 @@ alternative_else_nop_endif #endif SYM_CODE_END(__sdei_asm_handler) NOKPROBE(__sdei_asm_handler) + +SYM_CODE_START(__sdei_handler_abort) + mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME + adr x1, 1f + ldr_l x2, sdei_exit_mode + sdei_handler_exit exit_mode=x2 + // exit the handler and jump to the next instruction. + // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx. +1: ret +SYM_CODE_END(__sdei_handler_abort) +NOKPROBE(__sdei_handler_abort) #endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 087c05aa960e..91e44ac7150f 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1179,9 +1179,6 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) */ u64 read_zcr_features(void) { - u64 zcr; - unsigned int vq_max; - /* * Set the maximum possible VL, and write zeroes to all other * bits to see if they stick. @@ -1189,12 +1186,8 @@ u64 read_zcr_features(void) sve_kernel_enable(NULL); write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); - zcr = read_sysreg_s(SYS_ZCR_EL1); - zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */ - vq_max = sve_vq_from_vl(sve_get_vl()); - zcr |= vq_max - 1; /* set LEN field to maximum effective value */ - - return zcr; + /* Return LEN value that would be written to get the maximum VL */ + return sve_vq_from_vl(sve_get_vl()) - 1; } void __init sve_setup(void) @@ -1349,9 +1342,6 @@ void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) */ u64 read_smcr_features(void) { - u64 smcr; - unsigned int vq_max; - sme_kernel_enable(NULL); /* @@ -1360,12 +1350,8 @@ u64 read_smcr_features(void) write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK, SYS_SMCR_EL1); - smcr = read_sysreg_s(SYS_SMCR_EL1); - smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */ - vq_max = sve_vq_from_vl(sme_get_vl()); - smcr |= vq_max - 1; /* set LEN field to maximum effective value */ - - return smcr; + /* Return LEN value that would be written to get the maximum VL */ + return sve_vq_from_vl(sme_get_vl()) - 1; } void __init sme_setup(void) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 757a0de07f91..7b236994f0e1 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -113,7 +113,7 @@ SYM_CODE_START(primary_entry) */ #if VA_BITS > 48 mrs_s x0, SYS_ID_AA64MMFR2_EL1 - tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT + tst x0, ID_AA64MMFR2_EL1_VARange_MASK mov x0, #VA_BITS mov x25, #VA_BITS_MIN csel x25, x25, x0, eq @@ -756,7 +756,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) b.ne 2f mrs_s x0, SYS_ID_AA64MMFR2_EL1 - and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT) + and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK cbnz x0, 2f update_early_cpu_boot_status \ diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index db2a1861bb97..35225632d70a 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr, perf_bp_event(bp, regs); /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(bp)) + if (uses_default_overflow_handler(bp)) step = 1; unlock: rcu_read_unlock(); @@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, static int watchpoint_report(struct perf_event *wp, unsigned long addr, struct pt_regs *regs) { - int step = is_default_overflow_handler(wp); + int step = uses_default_overflow_handler(wp); struct arch_hw_breakpoint *info = counter_arch_bp(wp); info->trigger = addr; diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 2fe2491b692c..aee12c75b738 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -262,9 +262,9 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases) if (!len) return; - len = min(len, ARRAY_SIZE(buf) - 1); - strncpy(buf, cmdline, len); - buf[len] = 0; + len = strscpy(buf, cmdline, ARRAY_SIZE(buf)); + if (len == -E2BIG) + len = ARRAY_SIZE(buf) - 1; if (strcmp(buf, "--") == 0) return; diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index 2276689b5411..f872c57e9909 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -11,8 +11,6 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/of_pci.h> -#include <linux/of_platform.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 187aa2b175b4..20d7ef82de90 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -891,7 +891,8 @@ static int sve_set_common(struct task_struct *target, break; default: WARN_ON_ONCE(1); - return -EINVAL; + ret = -EINVAL; + goto out; } /* diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 830be01af32d..255d12f881c2 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -47,6 +47,9 @@ DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); #endif +DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); +DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); + static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) { unsigned long *p; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index edd63894d61e..960b98b43506 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -1044,10 +1044,8 @@ void crash_smp_send_stop(void) * If this cpu is the only one alive at this point in time, online or * not, there are no stop messages to be sent around, so just back out. */ - if (num_other_online_cpus() == 0) { - sdei_mask_local_cpu(); - return; - } + if (num_other_online_cpus() == 0) + goto skip_ipi; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); @@ -1066,7 +1064,9 @@ void crash_smp_send_stop(void) pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(&mask)); +skip_ipi: sdei_mask_local_cpu(); + sdei_handler_abort(); } bool smp_crash_stop_failed(void) diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index b1ae2f2eaf77..9a70d9746b66 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -8,7 +8,6 @@ #include <linux/randomize_kstack.h> #include <linux/syscalls.h> -#include <asm/daifflags.h> #include <asm/debug-monitors.h> #include <asm/exception.h> #include <asm/fpsimd.h> @@ -101,8 +100,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, * (Similarly for HVC and SMC elsewhere.) */ - local_daif_restore(DAIF_PROCCTX); - if (flags & _TIF_MTE_ASYNC_FAULT) { /* * Process the asynchronous tag check fault before the actual @@ -153,38 +150,8 @@ trace_exit: syscall_trace_exit(regs); } -/* - * As per the ABI exit SME streaming mode and clear the SVE state not - * shared with FPSIMD on syscall entry. - */ -static inline void fp_user_discard(void) -{ - /* - * If SME is active then exit streaming mode. If ZA is active - * then flush the SVE registers but leave userspace access to - * both SVE and SME enabled, otherwise disable SME for the - * task and fall through to disabling SVE too. This means - * that after a syscall we never have any streaming mode - * register state to track, if this changes the KVM code will - * need updating. - */ - if (system_supports_sme()) - sme_smstop_sm(); - - if (!system_supports_sve()) - return; - - if (test_thread_flag(TIF_SVE)) { - unsigned int sve_vq_minus_one; - - sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; - sve_flush_live(true, sve_vq_minus_one); - } -} - void do_el0_svc(struct pt_regs *regs) { - fp_user_discard(); el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); } diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 6028f1fe2d1c..45354f2ddf70 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -50,9 +50,7 @@ SECTIONS . = ALIGN(4); .altinstructions : { - __alt_instructions = .; *(.altinstructions) - __alt_instructions_end = .; } .dynamic : { *(.dynamic) } :text :dynamic diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index d31c3a9290c5..4fcb88a445ef 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -73,6 +73,33 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit; #define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) +/* + * To make optimal use of block mappings when laying out the linear + * mapping, round down the base of physical memory to a size that can + * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE + * (64k granule), or a multiple that can be mapped using contiguous bits + * in the page tables: 32 * PMD_SIZE (16k granule) + */ +#if defined(CONFIG_ARM64_4K_PAGES) +#define ARM64_MEMSTART_SHIFT PUD_SHIFT +#elif defined(CONFIG_ARM64_16K_PAGES) +#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT +#else +#define ARM64_MEMSTART_SHIFT PMD_SHIFT +#endif + +/* + * sparsemem vmemmap imposes an additional requirement on the alignment of + * memstart_addr, due to the fact that the base of the vmemmap region + * has a direct correspondence, and needs to appear sufficiently aligned + * in the virtual address space. + */ +#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS +#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) +#else +#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) +#endif + static int __init reserve_crashkernel_low(unsigned long long low_size) { unsigned long long low_base; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 2baeec419f62..14fdf645edc8 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -447,7 +447,7 @@ SYM_FUNC_START(__cpu_setup) * via capabilities. */ mrs x9, ID_AA64MMFR1_EL1 - and x9, x9, #0xf + and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK cbz x9, 1f orr tcr, tcr, #TCR_HA // hardware Access flag update 1: |