diff options
Diffstat (limited to 'arch/arm64/kernel')
30 files changed, 1145 insertions, 2333 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 6cc97730790e..cce308586fcc 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -14,15 +14,22 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong CFLAGS_syscall.o += -fno-stack-protector +# It's not safe to invoke KCOV when portions of the kernel environment aren't +# available or are out-of-sync with HW state. Since `noinstr` doesn't always +# inhibit KCOV instrumentation, disable it for the entire compilation unit. +KCOV_INSTRUMENT_entry.o := n +KCOV_INSTRUMENT_idle.o := n + # Object file lists. obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-common.o entry-fpsimd.o process.o ptrace.o \ setup.o signal.o sys.o stacktrace.o time.o traps.o \ - io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \ + io.o vdso.o hyp-stub.o psci.o cpu_ops.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ - syscall.o proton-pack.o idreg-override.o + syscall.o proton-pack.o idreg-override.o idle.o \ + patching.o targets += efi-entry.o diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index cada0b816c8a..f3851724fe35 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -239,6 +239,18 @@ done: } } +static pgprot_t __acpi_get_writethrough_mem_attribute(void) +{ + /* + * Although UEFI specifies the use of Normal Write-through for + * EFI_MEMORY_WT, it is seldom used in practice and not implemented + * by most (all?) CPUs. Rather than allocate a MAIR just for this + * purpose, emit a warning and use Normal Non-cacheable instead. + */ + pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n"); + return __pgprot(PROT_NORMAL_NC); +} + pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) { /* @@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is * mapped to a corresponding MAIR attribute encoding. * The EFI memory attribute advises all possible capabilities - * of a memory region. We use the most efficient capability. + * of a memory region. */ u64 attr; @@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) attr = efi_mem_attributes(addr); if (attr & EFI_MEMORY_WB) return PAGE_KERNEL; - if (attr & EFI_MEMORY_WT) - return __pgprot(PROT_NORMAL_WT); if (attr & EFI_MEMORY_WC) return __pgprot(PROT_NORMAL_NC); + if (attr & EFI_MEMORY_WT) + return __acpi_get_writethrough_mem_attribute(); return __pgprot(PROT_DEVICE_nGnRnE); } @@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) default: if (region->attribute & EFI_MEMORY_WB) prot = PAGE_KERNEL; - else if (region->attribute & EFI_MEMORY_WT) - prot = __pgprot(PROT_NORMAL_WT); else if (region->attribute & EFI_MEMORY_WC) prot = __pgprot(PROT_NORMAL_NC); + else if (region->attribute & EFI_MEMORY_WT) + prot = __acpi_get_writethrough_mem_attribute(); } } return __ioremap(phys, size, prot); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 6f0044cb233e..c85670692afa 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -27,6 +27,7 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + DEFINE(TSK_CPU, offsetof(struct task_struct, cpu)); BLANK(); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); @@ -46,6 +47,8 @@ int main(void) DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user)); #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); +#endif +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); #endif #ifdef CONFIG_ARM64_MTE @@ -99,7 +102,6 @@ int main(void) DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT); DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); BLANK(); - DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); BLANK(); DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val)); @@ -140,6 +142,15 @@ int main(void) DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); + DEFINE(ARM_SMCCC_1_2_REGS_X0_OFFS, offsetof(struct arm_smccc_1_2_regs, a0)); + DEFINE(ARM_SMCCC_1_2_REGS_X2_OFFS, offsetof(struct arm_smccc_1_2_regs, a2)); + DEFINE(ARM_SMCCC_1_2_REGS_X4_OFFS, offsetof(struct arm_smccc_1_2_regs, a4)); + DEFINE(ARM_SMCCC_1_2_REGS_X6_OFFS, offsetof(struct arm_smccc_1_2_regs, a6)); + DEFINE(ARM_SMCCC_1_2_REGS_X8_OFFS, offsetof(struct arm_smccc_1_2_regs, a8)); + DEFINE(ARM_SMCCC_1_2_REGS_X10_OFFS, offsetof(struct arm_smccc_1_2_regs, a10)); + DEFINE(ARM_SMCCC_1_2_REGS_X12_OFFS, offsetof(struct arm_smccc_1_2_regs, a12)); + DEFINE(ARM_SMCCC_1_2_REGS_X14_OFFS, offsetof(struct arm_smccc_1_2_regs, a14)); + DEFINE(ARM_SMCCC_1_2_REGS_X16_OFFS, offsetof(struct arm_smccc_1_2_regs, a16)); BLANK(); DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); @@ -155,7 +166,9 @@ int main(void) #endif #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia)); +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); +#endif BLANK(); #endif return 0; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index efed2830d141..125d5c9471ac 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -76,6 +76,7 @@ #include <asm/cpufeature.h> #include <asm/cpu_ops.h> #include <asm/fpsimd.h> +#include <asm/insn.h> #include <asm/kvm_host.h> #include <asm/mmu_context.h> #include <asm/mte.h> @@ -108,6 +109,24 @@ bool arm64_use_ng_mappings = false; EXPORT_SYMBOL(arm64_use_ng_mappings); /* + * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs + * support it? + */ +static bool __read_mostly allow_mismatched_32bit_el0; + +/* + * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have + * seen at least one CPU capable of 32-bit EL0. + */ +DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); + +/* + * Mask of CPUs supporting 32-bit EL0. + * Only valid if arm64_mismatched_32bit_el0 is enabled. + */ +static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly; + +/* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This * will be used to determine if a new booting CPU should @@ -400,6 +419,11 @@ static const struct arm64_ftr_bits ftr_dczid[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_gmid[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_isar0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0), @@ -617,6 +641,9 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 1, CRm = 2 */ ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), + /* Op1 = 1, CRn = 0, CRm = 0 */ + ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), + /* Op1 = 3, CRn = 0, CRm = 0 */ { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 }, ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), @@ -767,7 +794,7 @@ static void __init sort_ftr_regs(void) * Any bits that are not covered by an arm64_ftr_bits entry are considered * RES0 for the system-wide value, and must strictly match. */ -static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) +static void init_cpu_ftr_reg(u32 sys_reg, u64 new) { u64 val = 0; u64 strict_mask = ~0x0ULL; @@ -863,6 +890,31 @@ static void __init init_cpu_hwcaps_indirect_list(void) static void __init setup_boot_cpu_capabilities(void); +static void init_32bit_cpu_features(struct cpuinfo_32bit *info) +{ + init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); + init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); + init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); + init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); + init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); + init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); + init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); + init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); + init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); + init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); + init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); + init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); + init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); + init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); + init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); + init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); + init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); + init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); + init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); + init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); + init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); +} + void __init init_cpu_features(struct cpuinfo_arm64 *info) { /* Before we start using the tables, make sure it is sorted */ @@ -882,35 +934,17 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); - if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { - init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); - init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); - init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); - init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); - init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); - init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); - init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); - init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); - init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); - init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); - init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); - init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); - init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); - init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); - init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); - init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); - init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); - init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); - init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); - init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); - init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); - } + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) + init_32bit_cpu_features(&info->aarch32); if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); sve_init_vq_map(); } + if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) + init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); + /* * Initialize the indirect array of CPU hwcaps capabilities pointers * before we handle the boot CPU below. @@ -975,21 +1009,29 @@ static void relax_cpu_ftr_reg(u32 sys_id, int field) WARN_ON(!ftrp->width); } -static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info, - struct cpuinfo_arm64 *boot) +static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info, + struct cpuinfo_arm64 *boot) +{ + static bool boot_cpu_32bit_regs_overridden = false; + + if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden) + return; + + if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0)) + return; + + boot->aarch32 = info->aarch32; + init_32bit_cpu_features(&boot->aarch32); + boot_cpu_32bit_regs_overridden = true; +} + +static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info, + struct cpuinfo_32bit *boot) { int taint = 0; u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); /* - * If we don't have AArch32 at all then skip the checks entirely - * as the register values may be UNKNOWN and we're not going to be - * using them for anything. - */ - if (!id_aa64pfr0_32bit_el0(pfr0)) - return taint; - - /* * If we don't have AArch32 at EL1, then relax the strictness of * EL1-dependent register fields to avoid spurious sanity check fails. */ @@ -1135,10 +1177,29 @@ void update_cpu_features(int cpu, } /* + * The kernel uses the LDGM/STGM instructions and the number of tags + * they read/write depends on the GMID_EL1.BS field. Check that the + * value is the same on all CPUs. + */ + if (IS_ENABLED(CONFIG_ARM64_MTE) && + id_aa64pfr1_mte(info->reg_id_aa64pfr1)) { + taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu, + info->reg_gmid, boot->reg_gmid); + } + + /* + * If we don't have AArch32 at all then skip the checks entirely + * as the register values may be UNKNOWN and we're not going to be + * using them for anything. + * * This relies on a sanitised view of the AArch64 ID registers * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last. */ - taint |= update_32bit_cpu_features(cpu, info, boot); + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { + lazy_init_32bit_cpu_features(info, boot); + taint |= update_32bit_cpu_features(cpu, &info->aarch32, + &boot->aarch32); + } /* * Mismatched CPU features are a recipe for disaster. Don't even @@ -1248,6 +1309,28 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) return feature_matches(val, entry); } +const struct cpumask *system_32bit_el0_cpumask(void) +{ + if (!system_supports_32bit_el0()) + return cpu_none_mask; + + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) + return cpu_32bit_el0_mask; + + return cpu_possible_mask; +} + +static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope) +{ + if (!has_cpuid_feature(entry, scope)) + return allow_mismatched_32bit_el0; + + if (scope == SCOPE_SYSTEM) + pr_info("detected: 32-bit EL0 Support\n"); + + return true; +} + static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope) { bool has_sre; @@ -1866,10 +1949,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_copy_el2regs, }, { - .desc = "32-bit EL0 Support", - .capability = ARM64_HAS_32BIT_EL0, + .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE, .type = ARM64_CPUCAP_SYSTEM_FEATURE, - .matches = has_cpuid_feature, + .matches = has_32bit_el0, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_EL0_SHIFT, @@ -2378,7 +2460,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { {}, }; -static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) +static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) { switch (cap->hwcap_type) { case CAP_HWCAP: @@ -2423,7 +2505,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) return rc; } -static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) +static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) { /* We support emulation of accesses to CPU ID feature registers */ cpu_set_named_feature(CPUID); @@ -2598,7 +2680,7 @@ static void check_early_cpu_features(void) } static void -verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) +__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) { for (; caps->matches; caps++) @@ -2609,6 +2691,14 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) } } +static void verify_local_elf_hwcaps(void) +{ + __verify_local_elf_hwcaps(arm64_elf_hwcaps); + + if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1))) + __verify_local_elf_hwcaps(compat_elf_hwcaps); +} + static void verify_sve_features(void) { u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); @@ -2673,11 +2763,7 @@ static void verify_local_cpu_capabilities(void) * on all secondary CPUs. */ verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU); - - verify_local_elf_hwcaps(arm64_elf_hwcaps); - - if (system_supports_32bit_el0()) - verify_local_elf_hwcaps(compat_elf_hwcaps); + verify_local_elf_hwcaps(); if (system_supports_sve()) verify_sve_features(); @@ -2812,6 +2898,34 @@ void __init setup_cpu_features(void) ARCH_DMA_MINALIGN); } +static int enable_mismatched_32bit_el0(unsigned int cpu) +{ + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); + bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0); + + if (cpu_32bit) { + cpumask_set_cpu(cpu, cpu_32bit_el0_mask); + static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0); + setup_elf_hwcaps(compat_elf_hwcaps); + } + + return 0; +} + +static int __init init_32bit_el0_mask(void) +{ + if (!allow_mismatched_32bit_el0) + return 0; + + if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL)) + return -ENOMEM; + + return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "arm64/mismatched_32bit_el0:online", + enable_mismatched_32bit_el0, NULL); +} +subsys_initcall_sync(init_32bit_el0_mask); + static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) { cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); @@ -2905,8 +3019,8 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn) } static struct undef_hook mrs_hook = { - .instr_mask = 0xfff00000, - .instr_val = 0xd5300000, + .instr_mask = 0xffff0000, + .instr_val = 0xd5380000, .pstate_mask = PSR_AA32_MODE_MASK, .pstate_val = PSR_MODE_EL0t, .fn = emulate_mrs, diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 51fcf99d5351..87731fea5e41 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -246,7 +246,7 @@ static struct kobj_type cpuregs_kobj_type = { struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \ \ if (info->reg_midr) \ - return sprintf(buf, "0x%016x\n", info->reg_##_field); \ + return sprintf(buf, "0x%016llx\n", info->reg_##_field); \ else \ return 0; \ } \ @@ -344,6 +344,32 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); } +static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info) +{ + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); + info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1); + info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); + info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); + info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); + info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); + info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); + info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); + info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); + info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); + info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); + info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); + info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); + info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1); + info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1); + info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); + info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1); + + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); +} + static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) { info->reg_cntfrq = arch_timer_get_cntfrq(); @@ -371,31 +397,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); - /* Update the 32bit ID registers only if AArch32 is implemented */ - if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { - info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); - info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1); - info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); - info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); - info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); - info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); - info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); - info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); - info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); - info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); - info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); - info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); - info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); - info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1); - info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1); - info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); - info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); - info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1); - - info->reg_mvfr0 = read_cpuid(MVFR0_EL1); - info->reg_mvfr1 = read_cpuid(MVFR1_EL1); - info->reg_mvfr2 = read_cpuid(MVFR2_EL1); - } + if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) + info->reg_gmid = read_cpuid(GMID_EL1); + + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) + __cpuinfo_store_cpu_32bit(&info->aarch32); if (IS_ENABLED(CONFIG_ARM64_SVE) && id_aa64pfr0_sve(info->reg_id_aa64pfr0)) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 340d04e13617..12ce14a98b7c 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -6,7 +6,11 @@ */ #include <linux/context_tracking.h> +#include <linux/linkage.h> +#include <linux/lockdep.h> #include <linux/ptrace.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/thread_info.h> #include <asm/cpufeature.h> @@ -15,7 +19,11 @@ #include <asm/exception.h> #include <asm/kprobes.h> #include <asm/mmu.h> +#include <asm/processor.h> +#include <asm/sdei.h> +#include <asm/stacktrace.h> #include <asm/sysreg.h> +#include <asm/system_misc.h> /* * This is intended to match the logic in irqentry_enter(), handling the kernel @@ -67,7 +75,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) } } -void noinstr arm64_enter_nmi(struct pt_regs *regs) +static void noinstr arm64_enter_nmi(struct pt_regs *regs) { regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); @@ -80,7 +88,7 @@ void noinstr arm64_enter_nmi(struct pt_regs *regs) ftrace_nmi_enter(); } -void noinstr arm64_exit_nmi(struct pt_regs *regs) +static void noinstr arm64_exit_nmi(struct pt_regs *regs) { bool restore = regs->lockdep_hardirqs; @@ -97,7 +105,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs) __nmi_exit(); } -asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) +static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) arm64_enter_nmi(regs); @@ -105,7 +113,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) enter_from_kernel_mode(regs); } -asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) +static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) arm64_exit_nmi(regs); @@ -113,6 +121,65 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) exit_to_kernel_mode(regs); } +static void __sched arm64_preempt_schedule_irq(void) +{ + lockdep_assert_irqs_disabled(); + + /* + * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC + * priority masking is used the GIC irqchip driver will clear DAIF.IF + * using gic_arch_enable_irqs() for normal IRQs. If anything is set in + * DAIF we must have handled an NMI, so skip preemption. + */ + if (system_uses_irq_prio_masking() && read_sysreg(daif)) + return; + + /* + * Preempting a task from an IRQ means we leave copies of PSTATE + * on the stack. cpufeature's enable calls may modify PSTATE, but + * resuming one of these preempted tasks would undo those changes. + * + * Only allow a task to be preempted once cpufeatures have been + * enabled. + */ + if (system_capabilities_finalized()) + preempt_schedule_irq(); +} + +static void do_interrupt_handler(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + if (on_thread_stack()) + call_on_irq_stack(regs, handler); + else + handler(regs); +} + +extern void (*handle_arch_irq)(struct pt_regs *); +extern void (*handle_arch_fiq)(struct pt_regs *); + +static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, + unsigned int esr) +{ + arm64_enter_nmi(regs); + + console_verbose(); + + pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n", + vector, smp_processor_id(), esr, + esr_get_class_string(esr)); + + __show_regs(regs); + panic("Unhandled exception"); +} + +#define UNHANDLED(el, regsize, vector) \ +asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ +{ \ + const char *desc = #regsize "-bit " #el " " #vector; \ + __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ +} + #ifdef CONFIG_ARM64_ERRATUM_1463225 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); @@ -162,6 +229,11 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) } #endif /* CONFIG_ARM64_ERRATUM_1463225 */ +UNHANDLED(el1t, 64, sync) +UNHANDLED(el1t, 64, irq) +UNHANDLED(el1t, 64, fiq) +UNHANDLED(el1t, 64, error) + static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); @@ -193,15 +265,6 @@ static void noinstr el1_undef(struct pt_regs *regs) exit_to_kernel_mode(regs); } -static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr) -{ - enter_from_kernel_mode(regs); - local_daif_inherit(regs); - bad_mode(regs, 0, esr); - local_daif_mask(); - exit_to_kernel_mode(regs); -} - static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) { regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); @@ -245,7 +308,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) exit_to_kernel_mode(regs); } -asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs) +asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -275,10 +338,50 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs) el1_fpac(regs, esr); break; default: - el1_inv(regs, esr); + __panic_unhandled(regs, "64-bit el1h sync", esr); } } +static void noinstr el1_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + enter_el1_irq_or_nmi(regs); + do_interrupt_handler(regs, handler); + + /* + * Note: thread_info::preempt_count includes both thread_info::count + * and thread_info::need_resched, and is not equivalent to + * preempt_count(). + */ + if (IS_ENABLED(CONFIG_PREEMPTION) && + READ_ONCE(current_thread_info()->preempt_count) == 0) + arm64_preempt_schedule_irq(); + + exit_el1_irq_or_nmi(regs); +} + +asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_fiq); +} + +asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + local_daif_restore(DAIF_ERRCTX); + arm64_enter_nmi(regs); + do_serror(regs, esr); + arm64_exit_nmi(regs); +} + asmlinkage void noinstr enter_from_user_mode(void) { lockdep_hardirqs_off(CALLER_ADDR0); @@ -398,7 +501,7 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) enter_from_user_mode(); do_debug_exception(far, esr, regs); - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_daif_restore(DAIF_PROCCTX); } static void noinstr el0_svc(struct pt_regs *regs) @@ -415,7 +518,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) do_ptrauth_fault(regs, esr); } -asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -468,6 +571,56 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) } } +static void noinstr el0_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + enter_from_user_mode(); + + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + if (regs->pc & BIT(55)) + arm64_apply_bp_hardening(); + + do_interrupt_handler(regs, handler); +} + +static void noinstr __el0_irq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_fiq); +} + +asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + +static void __el0_error_handler_common(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + enter_from_user_mode(); + local_daif_restore(DAIF_ERRCTX); + arm64_enter_nmi(regs); + do_serror(regs, esr); + arm64_exit_nmi(regs); + local_daif_restore(DAIF_PROCCTX); +} + +asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) +{ + __el0_error_handler_common(regs); +} + #ifdef CONFIG_COMPAT static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) { @@ -483,7 +636,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs) do_el0_svc_compat(regs); } -asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -526,4 +679,71 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) el0_inv(regs, esr); } } + +asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + +asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) +{ + __el0_error_handler_common(regs); +} +#else /* CONFIG_COMPAT */ +UNHANDLED(el0t, 32, sync) +UNHANDLED(el0t, 32, irq) +UNHANDLED(el0t, 32, fiq) +UNHANDLED(el0t, 32, error) #endif /* CONFIG_COMPAT */ + +#ifdef CONFIG_VMAP_STACK +asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) +{ + unsigned int esr = read_sysreg(esr_el1); + unsigned long far = read_sysreg(far_el1); + + arm64_enter_nmi(regs); + panic_bad_stack(regs, esr, far); +} +#endif /* CONFIG_VMAP_STACK */ + +#ifdef CONFIG_ARM_SDE_INTERFACE +asmlinkage noinstr unsigned long +__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) +{ + unsigned long ret; + + /* + * We didn't take an exception to get here, so the HW hasn't + * set/cleared bits in PSTATE that we may rely on. + * + * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to + * whether PSTATE bits are inherited unchanged or generated from + * scratch, and the TF-A implementation always clears PAN and always + * clears UAO. There are no other known implementations. + * + * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how + * PSTATE is modified upon architectural exceptions, and so PAN is + * either inherited or set per SCTLR_ELx.SPAN, and UAO is always + * cleared. + * + * We must explicitly reset PAN to the expected state, including + * clearing it when the host isn't using it, in case a VM had it set. + */ + if (system_uses_hw_pan()) + set_pstate_pan(1); + else if (cpu_has_pan()) + set_pstate_pan(0); + + arm64_enter_nmi(regs); + ret = do_sdei_event(regs, arg); + arm64_exit_nmi(regs); + + return ret; +} +#endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 3ecec60d3295..0a7a64753878 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -63,16 +63,24 @@ SYM_FUNC_END(sve_set_vq) * and the rest zeroed. All the other SVE registers will be zeroed. */ SYM_FUNC_START(sve_load_from_fpsimd_state) - sve_load_vq x1, x2, x3 - fpsimd_restore x0, 8 - _for n, 0, 15, _sve_pfalse \n - _sve_wrffr 0 - ret + sve_load_vq x1, x2, x3 + fpsimd_restore x0, 8 + sve_flush_p_ffr + ret SYM_FUNC_END(sve_load_from_fpsimd_state) -/* Zero all SVE registers but the first 128-bits of each vector */ +/* + * Zero all SVE registers but the first 128-bits of each vector + * + * VQ must already be configured by caller, any further updates of VQ + * will need to ensure that the register state remains valid. + * + * x0 = VQ - 1 + */ SYM_FUNC_START(sve_flush_live) - sve_flush + cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state + sve_flush_z +1: sve_flush_p_ffr ret SYM_FUNC_END(sve_flush_live) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 3513984a88bd..863d44f73028 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -33,12 +33,6 @@ * Context tracking and irqflag tracing need to instrument transitions between * user and kernel mode. */ - .macro user_exit_irqoff -#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) - bl enter_from_user_mode -#endif - .endm - .macro user_enter_irqoff #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) bl exit_to_user_mode @@ -51,16 +45,7 @@ .endr .endm -/* - * Bad Abort numbers - *----------------- - */ -#define BAD_SYNC 0 -#define BAD_IRQ 1 -#define BAD_FIQ 2 -#define BAD_ERROR 3 - - .macro kernel_ventry, el, label, regsize = 64 + .macro kernel_ventry, el:req, ht:req, regsize:req, label:req .align 7 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 .if \el == 0 @@ -87,7 +72,7 @@ alternative_else_nop_endif tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp - b el\()\el\()_\label + b el\el\ht\()_\regsize\()_\label 0: /* @@ -119,7 +104,7 @@ alternative_else_nop_endif sub sp, sp, x0 mrs x0, tpidrro_el0 #endif - b el\()\el\()_\label + b el\el\ht\()_\regsize\()_\label .endm .macro tramp_alias, dst, sym @@ -275,7 +260,7 @@ alternative_else_nop_endif mte_set_kernel_gcr x22, x23 - scs_load tsk, x20 + scs_load tsk .else add x21, sp, #PT_REGS_SIZE get_current_task tsk @@ -285,7 +270,7 @@ alternative_else_nop_endif stp lr, x21, [sp, #S_LR] /* - * For exceptions from EL0, create a terminal frame record. + * For exceptions from EL0, create a final frame record. * For exceptions from EL1, create a synthetic frame record so the * interrupted code shows up in the backtrace. */ @@ -375,7 +360,7 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: - scs_save tsk, x0 + scs_save tsk #ifdef CONFIG_ARM64_PTR_AUTH alternative_if ARM64_HAS_ADDRESS_AUTH @@ -486,63 +471,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0) SYM_CODE_END(__swpan_exit_el0) #endif - .macro irq_stack_entry - mov x19, sp // preserve the original sp -#ifdef CONFIG_SHADOW_CALL_STACK - mov x24, scs_sp // preserve the original shadow stack -#endif - - /* - * Compare sp with the base of the task stack. - * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, - * and should switch to the irq stack. - */ - ldr x25, [tsk, TSK_STACK] - eor x25, x25, x19 - and x25, x25, #~(THREAD_SIZE - 1) - cbnz x25, 9998f - - ldr_this_cpu x25, irq_stack_ptr, x26 - mov x26, #IRQ_STACK_SIZE - add x26, x25, x26 - - /* switch to the irq stack */ - mov sp, x26 - -#ifdef CONFIG_SHADOW_CALL_STACK - /* also switch to the irq shadow stack */ - ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26 -#endif - -9998: - .endm - - /* - * The callee-saved regs (x19-x29) should be preserved between - * irq_stack_entry and irq_stack_exit, but note that kernel_entry - * uses x20-x23 to store data for later use. - */ - .macro irq_stack_exit - mov sp, x19 -#ifdef CONFIG_SHADOW_CALL_STACK - mov scs_sp, x24 -#endif - .endm - /* GPRs used by entry code */ tsk .req x28 // current thread_info /* * Interrupt handling. */ - .macro irq_handler, handler:req - ldr_l x1, \handler - mov x0, sp - irq_stack_entry - blr x1 - irq_stack_exit - .endm - .macro gic_prio_kentry_setup, tmp:req #ifdef CONFIG_ARM64_PSEUDO_NMI alternative_if ARM64_HAS_IRQ_PRIO_MASKING @@ -552,45 +486,6 @@ tsk .req x28 // current thread_info #endif .endm - .macro el1_interrupt_handler, handler:req - enable_da - - mov x0, sp - bl enter_el1_irq_or_nmi - - irq_handler \handler - -#ifdef CONFIG_PREEMPTION - ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count -alternative_if ARM64_HAS_IRQ_PRIO_MASKING - /* - * DA were cleared at start of handling, and IF are cleared by - * the GIC irqchip driver using gic_arch_enable_irqs() for - * normal IRQs. If anything is set, it means we come back from - * an NMI instead of a normal IRQ, so skip preemption - */ - mrs x0, daif - orr x24, x24, x0 -alternative_else_nop_endif - cbnz x24, 1f // preempt count != 0 || NMI return path - bl arm64_preempt_schedule_irq // irq en/disable is done inside -1: -#endif - - mov x0, sp - bl exit_el1_irq_or_nmi - .endm - - .macro el0_interrupt_handler, handler:req - user_exit_irqoff - enable_da - - tbz x22, #55, 1f - bl do_el0_irq_bp_hardening -1: - irq_handler \handler - .endm - .text /* @@ -600,32 +495,25 @@ alternative_else_nop_endif .align 11 SYM_CODE_START(vectors) - kernel_ventry 1, sync_invalid // Synchronous EL1t - kernel_ventry 1, irq_invalid // IRQ EL1t - kernel_ventry 1, fiq_invalid // FIQ EL1t - kernel_ventry 1, error_invalid // Error EL1t - - kernel_ventry 1, sync // Synchronous EL1h - kernel_ventry 1, irq // IRQ EL1h - kernel_ventry 1, fiq // FIQ EL1h - kernel_ventry 1, error // Error EL1h - - kernel_ventry 0, sync // Synchronous 64-bit EL0 - kernel_ventry 0, irq // IRQ 64-bit EL0 - kernel_ventry 0, fiq // FIQ 64-bit EL0 - kernel_ventry 0, error // Error 64-bit EL0 - -#ifdef CONFIG_COMPAT - kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 - kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 - kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0 - kernel_ventry 0, error_compat, 32 // Error 32-bit EL0 -#else - kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 - kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 - kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 - kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 -#endif + kernel_ventry 1, t, 64, sync // Synchronous EL1t + kernel_ventry 1, t, 64, irq // IRQ EL1t + kernel_ventry 1, t, 64, fiq // FIQ EL1h + kernel_ventry 1, t, 64, error // Error EL1t + + kernel_ventry 1, h, 64, sync // Synchronous EL1h + kernel_ventry 1, h, 64, irq // IRQ EL1h + kernel_ventry 1, h, 64, fiq // FIQ EL1h + kernel_ventry 1, h, 64, error // Error EL1h + + kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 + kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 + kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 + kernel_ventry 0, t, 64, error // Error 64-bit EL0 + + kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 + kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 + kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 + kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) #ifdef CONFIG_VMAP_STACK @@ -656,147 +544,46 @@ __bad_stack: ASM_BUG() #endif /* CONFIG_VMAP_STACK */ -/* - * Invalid mode handlers - */ - .macro inv_entry, el, reason, regsize = 64 + + .macro entry_handler el:req, ht:req, regsize:req, label:req +SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) kernel_entry \el, \regsize mov x0, sp - mov x1, #\reason - mrs x2, esr_el1 - bl bad_mode - ASM_BUG() + bl el\el\ht\()_\regsize\()_\label\()_handler + .if \el == 0 + b ret_to_user + .else + b ret_to_kernel + .endif +SYM_CODE_END(el\el\ht\()_\regsize\()_\label) .endm -SYM_CODE_START_LOCAL(el0_sync_invalid) - inv_entry 0, BAD_SYNC -SYM_CODE_END(el0_sync_invalid) - -SYM_CODE_START_LOCAL(el0_irq_invalid) - inv_entry 0, BAD_IRQ -SYM_CODE_END(el0_irq_invalid) - -SYM_CODE_START_LOCAL(el0_fiq_invalid) - inv_entry 0, BAD_FIQ -SYM_CODE_END(el0_fiq_invalid) - -SYM_CODE_START_LOCAL(el0_error_invalid) - inv_entry 0, BAD_ERROR -SYM_CODE_END(el0_error_invalid) - -SYM_CODE_START_LOCAL(el1_sync_invalid) - inv_entry 1, BAD_SYNC -SYM_CODE_END(el1_sync_invalid) - -SYM_CODE_START_LOCAL(el1_irq_invalid) - inv_entry 1, BAD_IRQ -SYM_CODE_END(el1_irq_invalid) - -SYM_CODE_START_LOCAL(el1_fiq_invalid) - inv_entry 1, BAD_FIQ -SYM_CODE_END(el1_fiq_invalid) - -SYM_CODE_START_LOCAL(el1_error_invalid) - inv_entry 1, BAD_ERROR -SYM_CODE_END(el1_error_invalid) - /* - * EL1 mode handlers. + * Early exception handlers */ - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el1_sync) - kernel_entry 1 - mov x0, sp - bl el1_sync_handler - kernel_exit 1 -SYM_CODE_END(el1_sync) - - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el1_irq) - kernel_entry 1 - el1_interrupt_handler handle_arch_irq - kernel_exit 1 -SYM_CODE_END(el1_irq) - -SYM_CODE_START_LOCAL_NOALIGN(el1_fiq) - kernel_entry 1 - el1_interrupt_handler handle_arch_fiq - kernel_exit 1 -SYM_CODE_END(el1_fiq) - -/* - * EL0 mode handlers. - */ - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_sync) - kernel_entry 0 - mov x0, sp - bl el0_sync_handler - b ret_to_user -SYM_CODE_END(el0_sync) - -#ifdef CONFIG_COMPAT - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat) - kernel_entry 0, 32 - mov x0, sp - bl el0_sync_compat_handler - b ret_to_user -SYM_CODE_END(el0_sync_compat) - - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) - kernel_entry 0, 32 - b el0_irq_naked -SYM_CODE_END(el0_irq_compat) - -SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat) - kernel_entry 0, 32 - b el0_fiq_naked -SYM_CODE_END(el0_fiq_compat) - -SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) - kernel_entry 0, 32 - b el0_error_naked -SYM_CODE_END(el0_error_compat) -#endif - - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_irq) - kernel_entry 0 -el0_irq_naked: - el0_interrupt_handler handle_arch_irq - b ret_to_user -SYM_CODE_END(el0_irq) - -SYM_CODE_START_LOCAL_NOALIGN(el0_fiq) - kernel_entry 0 -el0_fiq_naked: - el0_interrupt_handler handle_arch_fiq - b ret_to_user -SYM_CODE_END(el0_fiq) - -SYM_CODE_START_LOCAL(el1_error) - kernel_entry 1 - mrs x1, esr_el1 - enable_dbg - mov x0, sp - bl do_serror + entry_handler 1, t, 64, sync + entry_handler 1, t, 64, irq + entry_handler 1, t, 64, fiq + entry_handler 1, t, 64, error + + entry_handler 1, h, 64, sync + entry_handler 1, h, 64, irq + entry_handler 1, h, 64, fiq + entry_handler 1, h, 64, error + + entry_handler 0, t, 64, sync + entry_handler 0, t, 64, irq + entry_handler 0, t, 64, fiq + entry_handler 0, t, 64, error + + entry_handler 0, t, 32, sync + entry_handler 0, t, 32, irq + entry_handler 0, t, 32, fiq + entry_handler 0, t, 32, error + +SYM_CODE_START_LOCAL(ret_to_kernel) kernel_exit 1 -SYM_CODE_END(el1_error) - -SYM_CODE_START_LOCAL(el0_error) - kernel_entry 0 -el0_error_naked: - mrs x25, esr_el1 - user_exit_irqoff - enable_dbg - mov x0, sp - mov x1, x25 - bl do_serror - enable_da - b ret_to_user -SYM_CODE_END(el0_error) +SYM_CODE_END(ret_to_kernel) /* * "slow" syscall return path. @@ -979,8 +766,8 @@ SYM_FUNC_START(cpu_switch_to) mov sp, x9 msr sp_el0, x1 ptrauth_keys_install_kernel x1, x8, x9, x10 - scs_save x0, x8 - scs_load x1, x8 + scs_save x0 + scs_load x1 ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) @@ -998,6 +785,42 @@ SYM_CODE_START(ret_from_fork) SYM_CODE_END(ret_from_fork) NOKPROBE(ret_from_fork) +/* + * void call_on_irq_stack(struct pt_regs *regs, + * void (*func)(struct pt_regs *)); + * + * Calls func(regs) using this CPU's irq stack and shadow irq stack. + */ +SYM_FUNC_START(call_on_irq_stack) +#ifdef CONFIG_SHADOW_CALL_STACK + stp scs_sp, xzr, [sp, #-16]! + ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 +#endif + /* Create a frame record to save our LR and SP (implicit in FP) */ + stp x29, x30, [sp, #-16]! + mov x29, sp + + ldr_this_cpu x16, irq_stack_ptr, x17 + mov x15, #IRQ_STACK_SIZE + add x16, x16, x15 + + /* Move to the new stack and call the function there */ + mov sp, x16 + blr x1 + + /* + * Restore the SP from the FP, and restore the FP and LR from the frame + * record. + */ + mov sp, x29 + ldp x29, x30, [sp], #16 +#ifdef CONFIG_SHADOW_CALL_STACK + ldp scs_sp, xzr, [sp], #16 +#endif + ret +SYM_FUNC_END(call_on_irq_stack) +NOKPROBE(call_on_irq_stack) + #ifdef CONFIG_ARM_SDE_INTERFACE #include <asm/sdei.h> diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index ad3dd34a83cf..e57b23f95284 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -957,8 +957,10 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) * disabling the trap, otherwise update our in-memory copy. */ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { - sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1); - sve_flush_live(); + unsigned long vq_minus_one = + sve_vq_from_vl(current->thread.sve_vl) - 1; + sve_set_vq(vq_minus_one); + sve_flush_live(vq_minus_one); fpsimd_bind_task_to_cpu(); } else { fpsimd_to_sve(current); diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index b5d3ddaf69d9..7f467bd9db7a 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -15,6 +15,7 @@ #include <asm/debug-monitors.h> #include <asm/ftrace.h> #include <asm/insn.h> +#include <asm/patching.h> #ifdef CONFIG_DYNAMIC_FTRACE /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 6928cb67d3a0..c5c994a73a64 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -16,6 +16,7 @@ #include <asm/asm_pointer_auth.h> #include <asm/assembler.h> #include <asm/boot.h> +#include <asm/bug.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/cache.h> @@ -195,7 +196,7 @@ SYM_CODE_END(preserve_boot_args) and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1) mov \istart, \ptrs mul \istart, \istart, \count - add \iend, \iend, \istart // iend += (count - 1) * ptrs + add \iend, \iend, \istart // iend += count * ptrs // our entries span multiple tables lsr \istart, \vstart, \shift @@ -353,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables) #endif 1: ldr_l x4, idmap_ptrs_per_pgd - mov x5, x3 // __pa(__idmap_text_start) adr_l x6, __idmap_text_end // __pa(__idmap_text_end) map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 @@ -390,28 +390,48 @@ SYM_FUNC_START_LOCAL(__create_page_tables) ret x28 SYM_FUNC_END(__create_page_tables) + /* + * Initialize CPU registers with task-specific and cpu-specific context. + * + * Create a final frame record at task_pt_regs(current)->stackframe, so + * that the unwinder can identify the final frame record of any task by + * its location in the task stack. We reserve the entire pt_regs space + * for consistency with user tasks and kthreads. + */ + .macro init_cpu_task tsk, tmp1, tmp2 + msr sp_el0, \tsk + + ldr \tmp1, [\tsk, #TSK_STACK] + add sp, \tmp1, #THREAD_SIZE + sub sp, sp, #PT_REGS_SIZE + + stp xzr, xzr, [sp, #S_STACKFRAME] + add x29, sp, #S_STACKFRAME + + scs_load \tsk + + adr_l \tmp1, __per_cpu_offset + ldr w\tmp2, [\tsk, #TSK_CPU] + ldr \tmp1, [\tmp1, \tmp2, lsl #3] + set_this_cpu_offset \tmp1 + .endm + /* * The following fragment of code is executed with the MMU enabled. * * x0 = __PHYS_OFFSET */ SYM_FUNC_START_LOCAL(__primary_switched) - adrp x4, init_thread_union - add sp, x4, #THREAD_SIZE - adr_l x5, init_task - msr sp_el0, x5 // Save thread_info + adr_l x4, init_task + init_cpu_task x4, x5, x6 adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb - stp xzr, x30, [sp, #-16]! + stp x29, x30, [sp, #-16]! mov x29, sp -#ifdef CONFIG_SHADOW_CALL_STACK - adr_l scs_sp, init_shadow_call_stack // Set shadow call stack -#endif - str_l x21, __fdt_pointer, x5 // Save FDT pointer ldr_l x4, kimage_vaddr // Save the offset between @@ -443,10 +463,9 @@ SYM_FUNC_START_LOCAL(__primary_switched) 0: #endif bl switch_to_vhe // Prefer VHE if possible - add sp, sp, #16 - mov x29, #0 - mov x30, #0 - b start_kernel + ldp x29, x30, [sp], #16 + bl start_kernel + ASM_BUG() SYM_FUNC_END(__primary_switched) .pushsection ".rodata", "a" @@ -548,7 +567,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 -1: str w0, [x1] // This CPU has booted in EL1 +1: str w0, [x1] // Save CPU boot mode dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret @@ -629,21 +648,17 @@ SYM_FUNC_START_LOCAL(__secondary_switched) isb adr_l x0, secondary_data - ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack - cbz x1, __secondary_too_slow - mov sp, x1 ldr x2, [x0, #CPU_BOOT_TASK] cbz x2, __secondary_too_slow - msr sp_el0, x2 - scs_load x2, x3 - mov x29, #0 - mov x30, #0 + + init_cpu_task x2, x1, x3 #ifdef CONFIG_ARM64_PTR_AUTH ptrauth_keys_init_cpu x2, x3, x4, x5 #endif - b secondary_start_kernel + bl secondary_start_kernel + ASM_BUG() SYM_FUNC_END(__secondary_switched) SYM_FUNC_START_LOCAL(__secondary_too_slow) diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c new file mode 100644 index 000000000000..a2cfbacec2bb --- /dev/null +++ b/arch/arm64/kernel/idle.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Low-level idle sequences + */ + +#include <linux/cpu.h> +#include <linux/irqflags.h> + +#include <asm/barrier.h> +#include <asm/cpuidle.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h> + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + * + * If the CPU supports priority masking we must do additional work to + * ensure that interrupts are not masked at the PMR (because the core will + * not wake up if we block the wake up signal in the interrupt controller). + */ +void noinstr cpu_do_idle(void) +{ + struct arm_cpuidle_irq_context context; + + arm_cpuidle_save_irq_context(&context); + + dsb(sy); + wfi(); + + arm_cpuidle_restore_irq_context(&context); +} + +/* + * This is our default idle handler. + */ +void noinstr arch_cpu_idle(void) +{ + /* + * This should do all the clock switching and wait for interrupt + * tricks + */ + cpu_do_idle(); + raw_local_irq_enable(); +} diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c deleted file mode 100644 index 51cb8dc98d00..000000000000 --- a/arch/arm64/kernel/insn.c +++ /dev/null @@ -1,1699 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013 Huawei Ltd. - * Author: Jiang Liu <liuj97@gmail.com> - * - * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com> - */ -#include <linux/bitops.h> -#include <linux/bug.h> -#include <linux/compiler.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/spinlock.h> -#include <linux/stop_machine.h> -#include <linux/types.h> -#include <linux/uaccess.h> - -#include <asm/cacheflush.h> -#include <asm/debug-monitors.h> -#include <asm/fixmap.h> -#include <asm/insn.h> -#include <asm/kprobes.h> -#include <asm/sections.h> - -#define AARCH64_INSN_SF_BIT BIT(31) -#define AARCH64_INSN_N_BIT BIT(22) -#define AARCH64_INSN_LSL_12 BIT(22) - -static const int aarch64_insn_encoding_class[] = { - AARCH64_INSN_CLS_UNKNOWN, - AARCH64_INSN_CLS_UNKNOWN, - AARCH64_INSN_CLS_UNKNOWN, - AARCH64_INSN_CLS_UNKNOWN, - AARCH64_INSN_CLS_LDST, - AARCH64_INSN_CLS_DP_REG, - AARCH64_INSN_CLS_LDST, - AARCH64_INSN_CLS_DP_FPSIMD, - AARCH64_INSN_CLS_DP_IMM, - AARCH64_INSN_CLS_DP_IMM, - AARCH64_INSN_CLS_BR_SYS, - AARCH64_INSN_CLS_BR_SYS, - AARCH64_INSN_CLS_LDST, - AARCH64_INSN_CLS_DP_REG, - AARCH64_INSN_CLS_LDST, - AARCH64_INSN_CLS_DP_FPSIMD, -}; - -enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn) -{ - return aarch64_insn_encoding_class[(insn >> 25) & 0xf]; -} - -bool __kprobes aarch64_insn_is_steppable_hint(u32 insn) -{ - if (!aarch64_insn_is_hint(insn)) - return false; - - switch (insn & 0xFE0) { - case AARCH64_INSN_HINT_XPACLRI: - case AARCH64_INSN_HINT_PACIA_1716: - case AARCH64_INSN_HINT_PACIB_1716: - case AARCH64_INSN_HINT_PACIAZ: - case AARCH64_INSN_HINT_PACIASP: - case AARCH64_INSN_HINT_PACIBZ: - case AARCH64_INSN_HINT_PACIBSP: - case AARCH64_INSN_HINT_BTI: - case AARCH64_INSN_HINT_BTIC: - case AARCH64_INSN_HINT_BTIJ: - case AARCH64_INSN_HINT_BTIJC: - case AARCH64_INSN_HINT_NOP: - return true; - default: - return false; - } -} - -bool aarch64_insn_is_branch_imm(u32 insn) -{ - return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) || - aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) || - aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || - aarch64_insn_is_bcond(insn)); -} - -static DEFINE_RAW_SPINLOCK(patch_lock); - -static bool is_exit_text(unsigned long addr) -{ - /* discarded with init text/data */ - return system_state < SYSTEM_RUNNING && - addr >= (unsigned long)__exittext_begin && - addr < (unsigned long)__exittext_end; -} - -static bool is_image_text(unsigned long addr) -{ - return core_kernel_text(addr) || is_exit_text(addr); -} - -static void __kprobes *patch_map(void *addr, int fixmap) -{ - unsigned long uintaddr = (uintptr_t) addr; - bool image = is_image_text(uintaddr); - struct page *page; - - if (image) - page = phys_to_page(__pa_symbol(addr)); - else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) - page = vmalloc_to_page(addr); - else - return addr; - - BUG_ON(!page); - return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + - (uintaddr & ~PAGE_MASK)); -} - -static void __kprobes patch_unmap(int fixmap) -{ - clear_fixmap(fixmap); -} -/* - * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always - * little-endian. - */ -int __kprobes aarch64_insn_read(void *addr, u32 *insnp) -{ - int ret; - __le32 val; - - ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); - if (!ret) - *insnp = le32_to_cpu(val); - - return ret; -} - -static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) -{ - void *waddr = addr; - unsigned long flags = 0; - int ret; - - raw_spin_lock_irqsave(&patch_lock, flags); - waddr = patch_map(addr, FIX_TEXT_POKE0); - - ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); - - patch_unmap(FIX_TEXT_POKE0); - raw_spin_unlock_irqrestore(&patch_lock, flags); - - return ret; -} - -int __kprobes aarch64_insn_write(void *addr, u32 insn) -{ - return __aarch64_insn_write(addr, cpu_to_le32(insn)); -} - -bool __kprobes aarch64_insn_uses_literal(u32 insn) -{ - /* ldr/ldrsw (literal), prfm */ - - return aarch64_insn_is_ldr_lit(insn) || - aarch64_insn_is_ldrsw_lit(insn) || - aarch64_insn_is_adr_adrp(insn) || - aarch64_insn_is_prfm_lit(insn); -} - -bool __kprobes aarch64_insn_is_branch(u32 insn) -{ - /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */ - - return aarch64_insn_is_b(insn) || - aarch64_insn_is_bl(insn) || - aarch64_insn_is_cbz(insn) || - aarch64_insn_is_cbnz(insn) || - aarch64_insn_is_tbz(insn) || - aarch64_insn_is_tbnz(insn) || - aarch64_insn_is_ret(insn) || - aarch64_insn_is_ret_auth(insn) || - aarch64_insn_is_br(insn) || - aarch64_insn_is_br_auth(insn) || - aarch64_insn_is_blr(insn) || - aarch64_insn_is_blr_auth(insn) || - aarch64_insn_is_bcond(insn); -} - -int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) -{ - u32 *tp = addr; - int ret; - - /* A64 instructions must be word aligned */ - if ((uintptr_t)tp & 0x3) - return -EINVAL; - - ret = aarch64_insn_write(tp, insn); - if (ret == 0) - caches_clean_inval_pou((uintptr_t)tp, - (uintptr_t)tp + AARCH64_INSN_SIZE); - - return ret; -} - -struct aarch64_insn_patch { - void **text_addrs; - u32 *new_insns; - int insn_cnt; - atomic_t cpu_count; -}; - -static int __kprobes aarch64_insn_patch_text_cb(void *arg) -{ - int i, ret = 0; - struct aarch64_insn_patch *pp = arg; - - /* The first CPU becomes master */ - if (atomic_inc_return(&pp->cpu_count) == 1) { - for (i = 0; ret == 0 && i < pp->insn_cnt; i++) - ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], - pp->new_insns[i]); - /* Notify other processors with an additional increment. */ - atomic_inc(&pp->cpu_count); - } else { - while (atomic_read(&pp->cpu_count) <= num_online_cpus()) - cpu_relax(); - isb(); - } - - return ret; -} - -int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) -{ - struct aarch64_insn_patch patch = { - .text_addrs = addrs, - .new_insns = insns, - .insn_cnt = cnt, - .cpu_count = ATOMIC_INIT(0), - }; - - if (cnt <= 0) - return -EINVAL; - - return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, - cpu_online_mask); -} - -static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, - u32 *maskp, int *shiftp) -{ - u32 mask; - int shift; - - switch (type) { - case AARCH64_INSN_IMM_26: - mask = BIT(26) - 1; - shift = 0; - break; - case AARCH64_INSN_IMM_19: - mask = BIT(19) - 1; - shift = 5; - break; - case AARCH64_INSN_IMM_16: - mask = BIT(16) - 1; - shift = 5; - break; - case AARCH64_INSN_IMM_14: - mask = BIT(14) - 1; - shift = 5; - break; - case AARCH64_INSN_IMM_12: - mask = BIT(12) - 1; - shift = 10; - break; - case AARCH64_INSN_IMM_9: - mask = BIT(9) - 1; - shift = 12; - break; - case AARCH64_INSN_IMM_7: - mask = BIT(7) - 1; - shift = 15; - break; - case AARCH64_INSN_IMM_6: - case AARCH64_INSN_IMM_S: - mask = BIT(6) - 1; - shift = 10; - break; - case AARCH64_INSN_IMM_R: - mask = BIT(6) - 1; - shift = 16; - break; - case AARCH64_INSN_IMM_N: - mask = 1; - shift = 22; - break; - default: - return -EINVAL; - } - - *maskp = mask; - *shiftp = shift; - - return 0; -} - -#define ADR_IMM_HILOSPLIT 2 -#define ADR_IMM_SIZE SZ_2M -#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) -#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) -#define ADR_IMM_LOSHIFT 29 -#define ADR_IMM_HISHIFT 5 - -u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn) -{ - u32 immlo, immhi, mask; - int shift; - - switch (type) { - case AARCH64_INSN_IMM_ADR: - shift = 0; - immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; - immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; - insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; - mask = ADR_IMM_SIZE - 1; - break; - default: - if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { - pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n", - type); - return 0; - } - } - - return (insn >> shift) & mask; -} - -u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, - u32 insn, u64 imm) -{ - u32 immlo, immhi, mask; - int shift; - - if (insn == AARCH64_BREAK_FAULT) - return AARCH64_BREAK_FAULT; - - switch (type) { - case AARCH64_INSN_IMM_ADR: - shift = 0; - immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; - imm >>= ADR_IMM_HILOSPLIT; - immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; - imm = immlo | immhi; - mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | - (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); - break; - default: - if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { - pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", - type); - return AARCH64_BREAK_FAULT; - } - } - - /* Update the immediate field. */ - insn &= ~(mask << shift); - insn |= (imm & mask) << shift; - - return insn; -} - -u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type, - u32 insn) -{ - int shift; - - switch (type) { - case AARCH64_INSN_REGTYPE_RT: - case AARCH64_INSN_REGTYPE_RD: - shift = 0; - break; - case AARCH64_INSN_REGTYPE_RN: - shift = 5; - break; - case AARCH64_INSN_REGTYPE_RT2: - case AARCH64_INSN_REGTYPE_RA: - shift = 10; - break; - case AARCH64_INSN_REGTYPE_RM: - shift = 16; - break; - default: - pr_err("%s: unknown register type encoding %d\n", __func__, - type); - return 0; - } - - return (insn >> shift) & GENMASK(4, 0); -} - -static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, - u32 insn, - enum aarch64_insn_register reg) -{ - int shift; - - if (insn == AARCH64_BREAK_FAULT) - return AARCH64_BREAK_FAULT; - - if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { - pr_err("%s: unknown register encoding %d\n", __func__, reg); - return AARCH64_BREAK_FAULT; - } - - switch (type) { - case AARCH64_INSN_REGTYPE_RT: - case AARCH64_INSN_REGTYPE_RD: - shift = 0; - break; - case AARCH64_INSN_REGTYPE_RN: - shift = 5; - break; - case AARCH64_INSN_REGTYPE_RT2: - case AARCH64_INSN_REGTYPE_RA: - shift = 10; - break; - case AARCH64_INSN_REGTYPE_RM: - case AARCH64_INSN_REGTYPE_RS: - shift = 16; - break; - default: - pr_err("%s: unknown register type encoding %d\n", __func__, - type); - return AARCH64_BREAK_FAULT; - } - - insn &= ~(GENMASK(4, 0) << shift); - insn |= reg << shift; - - return insn; -} - -static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type, - u32 insn) -{ - u32 size; - - switch (type) { - case AARCH64_INSN_SIZE_8: - size = 0; - break; - case AARCH64_INSN_SIZE_16: - size = 1; - break; - case AARCH64_INSN_SIZE_32: - size = 2; - break; - case AARCH64_INSN_SIZE_64: - size = 3; - break; - default: - pr_err("%s: unknown size encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - insn &= ~GENMASK(31, 30); - insn |= size << 30; - - return insn; -} - -static inline long branch_imm_common(unsigned long pc, unsigned long addr, - long range) -{ - long offset; - - if ((pc & 0x3) || (addr & 0x3)) { - pr_err("%s: A64 instructions must be word aligned\n", __func__); - return range; - } - - offset = ((long)addr - (long)pc); - - if (offset < -range || offset >= range) { - pr_err("%s: offset out of range\n", __func__); - return range; - } - - return offset; -} - -u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, - enum aarch64_insn_branch_type type) -{ - u32 insn; - long offset; - - /* - * B/BL support [-128M, 128M) offset - * ARM64 virtual address arrangement guarantees all kernel and module - * texts are within +/-128M. - */ - offset = branch_imm_common(pc, addr, SZ_128M); - if (offset >= SZ_128M) - return AARCH64_BREAK_FAULT; - - switch (type) { - case AARCH64_INSN_BRANCH_LINK: - insn = aarch64_insn_get_bl_value(); - break; - case AARCH64_INSN_BRANCH_NOLINK: - insn = aarch64_insn_get_b_value(); - break; - default: - pr_err("%s: unknown branch encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, - offset >> 2); -} - -u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, - enum aarch64_insn_register reg, - enum aarch64_insn_variant variant, - enum aarch64_insn_branch_type type) -{ - u32 insn; - long offset; - - offset = branch_imm_common(pc, addr, SZ_1M); - if (offset >= SZ_1M) - return AARCH64_BREAK_FAULT; - - switch (type) { - case AARCH64_INSN_BRANCH_COMP_ZERO: - insn = aarch64_insn_get_cbz_value(); - break; - case AARCH64_INSN_BRANCH_COMP_NONZERO: - insn = aarch64_insn_get_cbnz_value(); - break; - default: - pr_err("%s: unknown branch encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, - offset >> 2); -} - -u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, - enum aarch64_insn_condition cond) -{ - u32 insn; - long offset; - - offset = branch_imm_common(pc, addr, SZ_1M); - - insn = aarch64_insn_get_bcond_value(); - - if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) { - pr_err("%s: unknown condition encoding %d\n", __func__, cond); - return AARCH64_BREAK_FAULT; - } - insn |= cond; - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, - offset >> 2); -} - -u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op) -{ - return aarch64_insn_get_hint_value() | op; -} - -u32 __kprobes aarch64_insn_gen_nop(void) -{ - return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); -} - -u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, - enum aarch64_insn_branch_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_BRANCH_NOLINK: - insn = aarch64_insn_get_br_value(); - break; - case AARCH64_INSN_BRANCH_LINK: - insn = aarch64_insn_get_blr_value(); - break; - case AARCH64_INSN_BRANCH_RETURN: - insn = aarch64_insn_get_ret_value(); - break; - default: - pr_err("%s: unknown branch encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); -} - -u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, - enum aarch64_insn_register base, - enum aarch64_insn_register offset, - enum aarch64_insn_size_type size, - enum aarch64_insn_ldst_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_LDST_LOAD_REG_OFFSET: - insn = aarch64_insn_get_ldr_reg_value(); - break; - case AARCH64_INSN_LDST_STORE_REG_OFFSET: - insn = aarch64_insn_get_str_reg_value(); - break; - default: - pr_err("%s: unknown load/store encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_ldst_size(size, insn); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, - base); - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, - offset); -} - -u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, - enum aarch64_insn_register reg2, - enum aarch64_insn_register base, - int offset, - enum aarch64_insn_variant variant, - enum aarch64_insn_ldst_type type) -{ - u32 insn; - int shift; - - switch (type) { - case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX: - insn = aarch64_insn_get_ldp_pre_value(); - break; - case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX: - insn = aarch64_insn_get_stp_pre_value(); - break; - case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX: - insn = aarch64_insn_get_ldp_post_value(); - break; - case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX: - insn = aarch64_insn_get_stp_post_value(); - break; - default: - pr_err("%s: unknown load/store encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - if ((offset & 0x3) || (offset < -256) || (offset > 252)) { - pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n", - __func__, offset); - return AARCH64_BREAK_FAULT; - } - shift = 2; - break; - case AARCH64_INSN_VARIANT_64BIT: - if ((offset & 0x7) || (offset < -512) || (offset > 504)) { - pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n", - __func__, offset); - return AARCH64_BREAK_FAULT; - } - shift = 3; - insn |= AARCH64_INSN_SF_BIT; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, - reg1); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, - reg2); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, - base); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn, - offset >> shift); -} - -u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, - enum aarch64_insn_register base, - enum aarch64_insn_register state, - enum aarch64_insn_size_type size, - enum aarch64_insn_ldst_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_LDST_LOAD_EX: - insn = aarch64_insn_get_load_ex_value(); - break; - case AARCH64_INSN_LDST_STORE_EX: - insn = aarch64_insn_get_store_ex_value(); - break; - default: - pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_ldst_size(size, insn); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, - reg); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, - base); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, - AARCH64_INSN_REG_ZR); - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, - state); -} - -u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result, - enum aarch64_insn_register address, - enum aarch64_insn_register value, - enum aarch64_insn_size_type size) -{ - u32 insn = aarch64_insn_get_ldadd_value(); - - switch (size) { - case AARCH64_INSN_SIZE_32: - case AARCH64_INSN_SIZE_64: - break; - default: - pr_err("%s: unimplemented size encoding %d\n", __func__, size); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_ldst_size(size, insn); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, - result); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, - address); - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, - value); -} - -u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address, - enum aarch64_insn_register value, - enum aarch64_insn_size_type size) -{ - /* - * STADD is simply encoded as an alias for LDADD with XZR as - * the destination register. - */ - return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address, - value, size); -} - -static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type, - enum aarch64_insn_prfm_target target, - enum aarch64_insn_prfm_policy policy, - u32 insn) -{ - u32 imm_type = 0, imm_target = 0, imm_policy = 0; - - switch (type) { - case AARCH64_INSN_PRFM_TYPE_PLD: - break; - case AARCH64_INSN_PRFM_TYPE_PLI: - imm_type = BIT(0); - break; - case AARCH64_INSN_PRFM_TYPE_PST: - imm_type = BIT(1); - break; - default: - pr_err("%s: unknown prfm type encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (target) { - case AARCH64_INSN_PRFM_TARGET_L1: - break; - case AARCH64_INSN_PRFM_TARGET_L2: - imm_target = BIT(0); - break; - case AARCH64_INSN_PRFM_TARGET_L3: - imm_target = BIT(1); - break; - default: - pr_err("%s: unknown prfm target encoding %d\n", __func__, target); - return AARCH64_BREAK_FAULT; - } - - switch (policy) { - case AARCH64_INSN_PRFM_POLICY_KEEP: - break; - case AARCH64_INSN_PRFM_POLICY_STRM: - imm_policy = BIT(0); - break; - default: - pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy); - return AARCH64_BREAK_FAULT; - } - - /* In this case, imm5 is encoded into Rt field. */ - insn &= ~GENMASK(4, 0); - insn |= imm_policy | (imm_target << 1) | (imm_type << 3); - - return insn; -} - -u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, - enum aarch64_insn_prfm_type type, - enum aarch64_insn_prfm_target target, - enum aarch64_insn_prfm_policy policy) -{ - u32 insn = aarch64_insn_get_prfm_value(); - - insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn); - - insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, - base); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0); -} - -u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - int imm, enum aarch64_insn_variant variant, - enum aarch64_insn_adsb_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_ADSB_ADD: - insn = aarch64_insn_get_add_imm_value(); - break; - case AARCH64_INSN_ADSB_SUB: - insn = aarch64_insn_get_sub_imm_value(); - break; - case AARCH64_INSN_ADSB_ADD_SETFLAGS: - insn = aarch64_insn_get_adds_imm_value(); - break; - case AARCH64_INSN_ADSB_SUB_SETFLAGS: - insn = aarch64_insn_get_subs_imm_value(); - break; - default: - pr_err("%s: unknown add/sub encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - /* We can't encode more than a 24bit value (12bit + 12bit shift) */ - if (imm & ~(BIT(24) - 1)) - goto out; - - /* If we have something in the top 12 bits... */ - if (imm & ~(SZ_4K - 1)) { - /* ... and in the low 12 bits -> error */ - if (imm & (SZ_4K - 1)) - goto out; - - imm >>= 12; - insn |= AARCH64_INSN_LSL_12; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); - -out: - pr_err("%s: invalid immediate encoding %d\n", __func__, imm); - return AARCH64_BREAK_FAULT; -} - -u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - int immr, int imms, - enum aarch64_insn_variant variant, - enum aarch64_insn_bitfield_type type) -{ - u32 insn; - u32 mask; - - switch (type) { - case AARCH64_INSN_BITFIELD_MOVE: - insn = aarch64_insn_get_bfm_value(); - break; - case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED: - insn = aarch64_insn_get_ubfm_value(); - break; - case AARCH64_INSN_BITFIELD_MOVE_SIGNED: - insn = aarch64_insn_get_sbfm_value(); - break; - default: - pr_err("%s: unknown bitfield encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - mask = GENMASK(4, 0); - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT; - mask = GENMASK(5, 0); - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - if (immr & ~mask) { - pr_err("%s: invalid immr encoding %d\n", __func__, immr); - return AARCH64_BREAK_FAULT; - } - if (imms & ~mask) { - pr_err("%s: invalid imms encoding %d\n", __func__, imms); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); - - insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); -} - -u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, - int imm, int shift, - enum aarch64_insn_variant variant, - enum aarch64_insn_movewide_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_MOVEWIDE_ZERO: - insn = aarch64_insn_get_movz_value(); - break; - case AARCH64_INSN_MOVEWIDE_KEEP: - insn = aarch64_insn_get_movk_value(); - break; - case AARCH64_INSN_MOVEWIDE_INVERSE: - insn = aarch64_insn_get_movn_value(); - break; - default: - pr_err("%s: unknown movewide encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - if (imm & ~(SZ_64K - 1)) { - pr_err("%s: invalid immediate encoding %d\n", __func__, imm); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - if (shift != 0 && shift != 16) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - if (shift != 0 && shift != 16 && shift != 32 && shift != 48) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn |= (shift >> 4) << 21; - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); -} - -u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - enum aarch64_insn_register reg, - int shift, - enum aarch64_insn_variant variant, - enum aarch64_insn_adsb_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_ADSB_ADD: - insn = aarch64_insn_get_add_value(); - break; - case AARCH64_INSN_ADSB_SUB: - insn = aarch64_insn_get_sub_value(); - break; - case AARCH64_INSN_ADSB_ADD_SETFLAGS: - insn = aarch64_insn_get_adds_value(); - break; - case AARCH64_INSN_ADSB_SUB_SETFLAGS: - insn = aarch64_insn_get_subs_value(); - break; - default: - pr_err("%s: unknown add/sub encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - if (shift & ~(SZ_32 - 1)) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - if (shift & ~(SZ_64 - 1)) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); -} - -u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - enum aarch64_insn_variant variant, - enum aarch64_insn_data1_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_DATA1_REVERSE_16: - insn = aarch64_insn_get_rev16_value(); - break; - case AARCH64_INSN_DATA1_REVERSE_32: - insn = aarch64_insn_get_rev32_value(); - break; - case AARCH64_INSN_DATA1_REVERSE_64: - if (variant != AARCH64_INSN_VARIANT_64BIT) { - pr_err("%s: invalid variant for reverse64 %d\n", - __func__, variant); - return AARCH64_BREAK_FAULT; - } - insn = aarch64_insn_get_rev64_value(); - break; - default: - pr_err("%s: unknown data1 encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); -} - -u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - enum aarch64_insn_register reg, - enum aarch64_insn_variant variant, - enum aarch64_insn_data2_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_DATA2_UDIV: - insn = aarch64_insn_get_udiv_value(); - break; - case AARCH64_INSN_DATA2_SDIV: - insn = aarch64_insn_get_sdiv_value(); - break; - case AARCH64_INSN_DATA2_LSLV: - insn = aarch64_insn_get_lslv_value(); - break; - case AARCH64_INSN_DATA2_LSRV: - insn = aarch64_insn_get_lsrv_value(); - break; - case AARCH64_INSN_DATA2_ASRV: - insn = aarch64_insn_get_asrv_value(); - break; - case AARCH64_INSN_DATA2_RORV: - insn = aarch64_insn_get_rorv_value(); - break; - default: - pr_err("%s: unknown data2 encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); -} - -u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - enum aarch64_insn_register reg1, - enum aarch64_insn_register reg2, - enum aarch64_insn_variant variant, - enum aarch64_insn_data3_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_DATA3_MADD: - insn = aarch64_insn_get_madd_value(); - break; - case AARCH64_INSN_DATA3_MSUB: - insn = aarch64_insn_get_msub_value(); - break; - default: - pr_err("%s: unknown data3 encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, - reg1); - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, - reg2); -} - -u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - enum aarch64_insn_register reg, - int shift, - enum aarch64_insn_variant variant, - enum aarch64_insn_logic_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_LOGIC_AND: - insn = aarch64_insn_get_and_value(); - break; - case AARCH64_INSN_LOGIC_BIC: - insn = aarch64_insn_get_bic_value(); - break; - case AARCH64_INSN_LOGIC_ORR: - insn = aarch64_insn_get_orr_value(); - break; - case AARCH64_INSN_LOGIC_ORN: - insn = aarch64_insn_get_orn_value(); - break; - case AARCH64_INSN_LOGIC_EOR: - insn = aarch64_insn_get_eor_value(); - break; - case AARCH64_INSN_LOGIC_EON: - insn = aarch64_insn_get_eon_value(); - break; - case AARCH64_INSN_LOGIC_AND_SETFLAGS: - insn = aarch64_insn_get_ands_value(); - break; - case AARCH64_INSN_LOGIC_BIC_SETFLAGS: - insn = aarch64_insn_get_bics_value(); - break; - default: - pr_err("%s: unknown logical encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - if (shift & ~(SZ_32 - 1)) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - if (shift & ~(SZ_64 - 1)) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); -} - -/* - * MOV (register) is architecturally an alias of ORR (shifted register) where - * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m> - */ -u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - enum aarch64_insn_variant variant) -{ - return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR, - src, 0, variant, - AARCH64_INSN_LOGIC_ORR); -} - -u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr, - enum aarch64_insn_register reg, - enum aarch64_insn_adr_type type) -{ - u32 insn; - s32 offset; - - switch (type) { - case AARCH64_INSN_ADR_TYPE_ADR: - insn = aarch64_insn_get_adr_value(); - offset = addr - pc; - break; - case AARCH64_INSN_ADR_TYPE_ADRP: - insn = aarch64_insn_get_adrp_value(); - offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12; - break; - default: - pr_err("%s: unknown adr encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - if (offset < -SZ_1M || offset >= SZ_1M) - return AARCH64_BREAK_FAULT; - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset); -} - -/* - * Decode the imm field of a branch, and return the byte offset as a - * signed value (so it can be used when computing a new branch - * target). - */ -s32 aarch64_get_branch_offset(u32 insn) -{ - s32 imm; - - if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { - imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); - return (imm << 6) >> 4; - } - - if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || - aarch64_insn_is_bcond(insn)) { - imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn); - return (imm << 13) >> 11; - } - - if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) { - imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn); - return (imm << 18) >> 16; - } - - /* Unhandled instruction */ - BUG(); -} - -/* - * Encode the displacement of a branch in the imm field and return the - * updated instruction. - */ -u32 aarch64_set_branch_offset(u32 insn, s32 offset) -{ - if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, - offset >> 2); - - if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || - aarch64_insn_is_bcond(insn)) - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, - offset >> 2); - - if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn, - offset >> 2); - - /* Unhandled instruction */ - BUG(); -} - -s32 aarch64_insn_adrp_get_offset(u32 insn) -{ - BUG_ON(!aarch64_insn_is_adrp(insn)); - return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12; -} - -u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset) -{ - BUG_ON(!aarch64_insn_is_adrp(insn)); - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, - offset >> 12); -} - -/* - * Extract the Op/CR data from a msr/mrs instruction. - */ -u32 aarch64_insn_extract_system_reg(u32 insn) -{ - return (insn & 0x1FFFE0) >> 5; -} - -bool aarch32_insn_is_wide(u32 insn) -{ - return insn >= 0xe800; -} - -/* - * Macros/defines for extracting register numbers from instruction. - */ -u32 aarch32_insn_extract_reg_num(u32 insn, int offset) -{ - return (insn & (0xf << offset)) >> offset; -} - -#define OPC2_MASK 0x7 -#define OPC2_OFFSET 5 -u32 aarch32_insn_mcr_extract_opc2(u32 insn) -{ - return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET; -} - -#define CRM_MASK 0xf -u32 aarch32_insn_mcr_extract_crm(u32 insn) -{ - return insn & CRM_MASK; -} - -static bool __kprobes __check_eq(unsigned long pstate) -{ - return (pstate & PSR_Z_BIT) != 0; -} - -static bool __kprobes __check_ne(unsigned long pstate) -{ - return (pstate & PSR_Z_BIT) == 0; -} - -static bool __kprobes __check_cs(unsigned long pstate) -{ - return (pstate & PSR_C_BIT) != 0; -} - -static bool __kprobes __check_cc(unsigned long pstate) -{ - return (pstate & PSR_C_BIT) == 0; -} - -static bool __kprobes __check_mi(unsigned long pstate) -{ - return (pstate & PSR_N_BIT) != 0; -} - -static bool __kprobes __check_pl(unsigned long pstate) -{ - return (pstate & PSR_N_BIT) == 0; -} - -static bool __kprobes __check_vs(unsigned long pstate) -{ - return (pstate & PSR_V_BIT) != 0; -} - -static bool __kprobes __check_vc(unsigned long pstate) -{ - return (pstate & PSR_V_BIT) == 0; -} - -static bool __kprobes __check_hi(unsigned long pstate) -{ - pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return (pstate & PSR_C_BIT) != 0; -} - -static bool __kprobes __check_ls(unsigned long pstate) -{ - pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return (pstate & PSR_C_BIT) == 0; -} - -static bool __kprobes __check_ge(unsigned long pstate) -{ - pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return (pstate & PSR_N_BIT) == 0; -} - -static bool __kprobes __check_lt(unsigned long pstate) -{ - pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return (pstate & PSR_N_BIT) != 0; -} - -static bool __kprobes __check_gt(unsigned long pstate) -{ - /*PSR_N_BIT ^= PSR_V_BIT */ - unsigned long temp = pstate ^ (pstate << 3); - - temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ - return (temp & PSR_N_BIT) == 0; -} - -static bool __kprobes __check_le(unsigned long pstate) -{ - /*PSR_N_BIT ^= PSR_V_BIT */ - unsigned long temp = pstate ^ (pstate << 3); - - temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ - return (temp & PSR_N_BIT) != 0; -} - -static bool __kprobes __check_al(unsigned long pstate) -{ - return true; -} - -/* - * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that - * it behaves identically to 0b1110 ("al"). - */ -pstate_check_t * const aarch32_opcode_cond_checks[16] = { - __check_eq, __check_ne, __check_cs, __check_cc, - __check_mi, __check_pl, __check_vs, __check_vc, - __check_hi, __check_ls, __check_ge, __check_lt, - __check_gt, __check_le, __check_al, __check_al -}; - -static bool range_of_ones(u64 val) -{ - /* Doesn't handle full ones or full zeroes */ - u64 sval = val >> __ffs64(val); - - /* One of Sean Eron Anderson's bithack tricks */ - return ((sval + 1) & (sval)) == 0; -} - -static u32 aarch64_encode_immediate(u64 imm, - enum aarch64_insn_variant variant, - u32 insn) -{ - unsigned int immr, imms, n, ones, ror, esz, tmp; - u64 mask; - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - esz = 32; - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |= AARCH64_INSN_SF_BIT; - esz = 64; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - mask = GENMASK(esz - 1, 0); - - /* Can't encode full zeroes, full ones, or value wider than the mask */ - if (!imm || imm == mask || imm & ~mask) - return AARCH64_BREAK_FAULT; - - /* - * Inverse of Replicate(). Try to spot a repeating pattern - * with a pow2 stride. - */ - for (tmp = esz / 2; tmp >= 2; tmp /= 2) { - u64 emask = BIT(tmp) - 1; - - if ((imm & emask) != ((imm >> tmp) & emask)) - break; - - esz = tmp; - mask = emask; - } - - /* N is only set if we're encoding a 64bit value */ - n = esz == 64; - - /* Trim imm to the element size */ - imm &= mask; - - /* That's how many ones we need to encode */ - ones = hweight64(imm); - - /* - * imms is set to (ones - 1), prefixed with a string of ones - * and a zero if they fit. Cap it to 6 bits. - */ - imms = ones - 1; - imms |= 0xf << ffs(esz); - imms &= BIT(6) - 1; - - /* Compute the rotation */ - if (range_of_ones(imm)) { - /* - * Pattern: 0..01..10..0 - * - * Compute how many rotate we need to align it right - */ - ror = __ffs64(imm); - } else { - /* - * Pattern: 0..01..10..01..1 - * - * Fill the unused top bits with ones, and check if - * the result is a valid immediate (all ones with a - * contiguous ranges of zeroes). - */ - imm |= ~mask; - if (!range_of_ones(~imm)) - return AARCH64_BREAK_FAULT; - - /* - * Compute the rotation to get a continuous set of - * ones, with the first bit set at position 0 - */ - ror = fls(~imm); - } - - /* - * immr is the number of bits we need to rotate back to the - * original set of ones. Note that this is relative to the - * element size... - */ - immr = (esz - ror) % esz; - - insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n); - insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); -} - -u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type, - enum aarch64_insn_variant variant, - enum aarch64_insn_register Rn, - enum aarch64_insn_register Rd, - u64 imm) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_LOGIC_AND: - insn = aarch64_insn_get_and_imm_value(); - break; - case AARCH64_INSN_LOGIC_ORR: - insn = aarch64_insn_get_orr_imm_value(); - break; - case AARCH64_INSN_LOGIC_EOR: - insn = aarch64_insn_get_eor_imm_value(); - break; - case AARCH64_INSN_LOGIC_AND_SETFLAGS: - insn = aarch64_insn_get_ands_imm_value(); - break; - default: - pr_err("%s: unknown logical encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); - return aarch64_encode_immediate(imm, variant, insn); -} - -u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, - enum aarch64_insn_register Rm, - enum aarch64_insn_register Rn, - enum aarch64_insn_register Rd, - u8 lsb) -{ - u32 insn; - - insn = aarch64_insn_get_extr_value(); - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - if (lsb > 31) - return AARCH64_BREAK_FAULT; - break; - case AARCH64_INSN_VARIANT_64BIT: - if (lsb > 63) - return AARCH64_BREAK_FAULT; - insn |= AARCH64_INSN_SF_BIT; - insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1); - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb); - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); - insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); -} diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index 9a8a0ae1e75f..fc98037e1220 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/jump_label.h> #include <asm/insn.h> +#include <asm/patching.h> void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 1a157ca33262..2aede780fb80 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -17,6 +17,7 @@ #include <asm/debug-monitors.h> #include <asm/insn.h> +#include <asm/patching.h> #include <asm/traps.h> struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c new file mode 100644 index 000000000000..771f543464e0 --- /dev/null +++ b/arch/arm64/kernel/patching.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/stop_machine.h> +#include <linux/uaccess.h> + +#include <asm/cacheflush.h> +#include <asm/fixmap.h> +#include <asm/insn.h> +#include <asm/kprobes.h> +#include <asm/patching.h> +#include <asm/sections.h> + +static DEFINE_RAW_SPINLOCK(patch_lock); + +static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + +static void __kprobes *patch_map(void *addr, int fixmap) +{ + unsigned long uintaddr = (uintptr_t) addr; + bool image = is_image_text(uintaddr); + struct page *page; + + if (image) + page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); + else + return addr; + + BUG_ON(!page); + return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap) +{ + clear_fixmap(fixmap); +} +/* + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always + * little-endian. + */ +int __kprobes aarch64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + unsigned long flags = 0; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes aarch64_insn_write(void *addr, u32 insn) +{ + return __aarch64_insn_write(addr, cpu_to_le32(insn)); +} + +int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) +{ + u32 *tp = addr; + int ret; + + /* A64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + + ret = aarch64_insn_write(tp, insn); + if (ret == 0) + caches_clean_inval_pou((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); + + return ret; +} + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +static int __kprobes aarch64_insn_patch_text_cb(void *arg) +{ + int i, ret = 0; + struct aarch64_insn_patch *pp = arg; + + /* The first CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == 1) { + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], + pp->new_insns[i]); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); + } else { + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) + cpu_relax(); + isb(); + } + + return ret; +} + +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) +{ + struct aarch64_insn_patch patch = { + .text_addrs = addrs, + .new_insns = insns, + .insn_cnt = cnt, + .cpu_count = ATOMIC_INIT(0), + }; + + if (cnt <= 0) + return -EINVAL; + + return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); +} diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 88ff471b0bce..4a72c2727309 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < entry->max_stack && - tail && !((unsigned long)tail & 0xf)) + tail && !((unsigned long)tail & 0x7)) tail = user_backtrace(tail, entry); } else { #ifdef CONFIG_COMPAT diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index f594957e29bd..d07788dad388 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -165,10 +165,7 @@ armv8pmu_events_sysfs_show(struct device *dev, } #define ARMV8_EVENT_ATTR(name, config) \ - (&((struct perf_pmu_events_attr) { \ - .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \ - .id = config, \ - }).attr.attr) + PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config) static struct attribute *armv8_pmuv3_event_attrs[] = { ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR), @@ -312,13 +309,46 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr, struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; - return snprintf(page, PAGE_SIZE, "0x%08x\n", slots); + return sysfs_emit(page, "0x%08x\n", slots); } static DEVICE_ATTR_RO(slots); +static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr, + char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT) + & ARMV8_PMU_BUS_SLOTS_MASK; + + return sysfs_emit(page, "0x%08x\n", bus_slots); +} + +static DEVICE_ATTR_RO(bus_slots); + +static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, + char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT) + & ARMV8_PMU_BUS_WIDTH_MASK; + u32 val = 0; + + /* Encoded as Log2(number of bytes), plus one */ + if (bus_width > 2 && bus_width < 13) + val = 1 << (bus_width - 1); + + return sysfs_emit(page, "0x%08x\n", val); +} + +static DEVICE_ATTR_RO(bus_width); + static struct attribute *armv8_pmuv3_caps_attrs[] = { &dev_attr_slots.attr, + &dev_attr_bus_slots.attr, + &dev_attr_bus_width.attr, NULL, }; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d607c9912025..6dbcc89f6662 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -7,26 +7,28 @@ * Copyright (C) 2013 Linaro Limited. * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org> */ +#include <linux/extable.h> #include <linux/kasan.h> #include <linux/kernel.h> #include <linux/kprobes.h> -#include <linux/extable.h> -#include <linux/slab.h> -#include <linux/stop_machine.h> #include <linux/sched/debug.h> #include <linux/set_memory.h> +#include <linux/slab.h> +#include <linux/stop_machine.h> #include <linux/stringify.h> +#include <linux/uaccess.h> #include <linux/vmalloc.h> -#include <asm/traps.h> -#include <asm/ptrace.h> + #include <asm/cacheflush.h> -#include <asm/debug-monitors.h> #include <asm/daifflags.h> -#include <asm/system_misc.h> +#include <asm/debug-monitors.h> #include <asm/insn.h> -#include <linux/uaccess.h> #include <asm/irq.h> +#include <asm/patching.h> +#include <asm/ptrace.h> #include <asm/sections.h> +#include <asm/system_misc.h> +#include <asm/traps.h> #include "decode-insn.h" @@ -277,23 +279,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* - * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accounting - * these specific fault cases. - */ - kprobes_inc_nmissed_count(cur); - - /* - * We come here because instructions in the pre/post - * handler caused the page_fault, this could happen - * if handler tries to access user space by - * copy_from_user(), get_user() etc. Let the - * user-specified handler try to fix it first. - */ - if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) - return 1; - - /* * In case the user-specified fault handler returned * zero, try to fix up. */ diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c index 25f67ec59635..22d0b3252476 100644 --- a/arch/arm64/kernel/probes/simulate-insn.c +++ b/arch/arm64/kernel/probes/simulate-insn.c @@ -10,6 +10,7 @@ #include <linux/kprobes.h> #include <asm/ptrace.h> +#include <asm/traps.h> #include "simulate-insn.h" diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index b4bb67f17a2c..5ba0ed036dee 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -18,7 +18,6 @@ #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> -#include <linux/lockdep.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/nospec.h> @@ -46,7 +45,6 @@ #include <linux/prctl.h> #include <asm/alternative.h> -#include <asm/arch_gicv3.h> #include <asm/compat.h> #include <asm/cpufeature.h> #include <asm/cacheflush.h> @@ -74,63 +72,6 @@ EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); -static void noinstr __cpu_do_idle(void) -{ - dsb(sy); - wfi(); -} - -static void noinstr __cpu_do_idle_irqprio(void) -{ - unsigned long pmr; - unsigned long daif_bits; - - daif_bits = read_sysreg(daif); - write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif); - - /* - * Unmask PMR before going idle to make sure interrupts can - * be raised. - */ - pmr = gic_read_pmr(); - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - - __cpu_do_idle(); - - gic_write_pmr(pmr); - write_sysreg(daif_bits, daif); -} - -/* - * cpu_do_idle() - * - * Idle the processor (wait for interrupt). - * - * If the CPU supports priority masking we must do additional work to - * ensure that interrupts are not masked at the PMR (because the core will - * not wake up if we block the wake up signal in the interrupt controller). - */ -void noinstr cpu_do_idle(void) -{ - if (system_uses_irq_prio_masking()) - __cpu_do_idle_irqprio(); - else - __cpu_do_idle(); -} - -/* - * This is our default idle handler. - */ -void noinstr arch_cpu_idle(void) -{ - /* - * This should do all the clock switching and wait for interrupt - * tricks - */ - cpu_do_idle(); - raw_local_irq_enable(); -} - #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { @@ -435,6 +376,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.sp = (unsigned long)childregs; + /* + * For the benefit of the unwinder, set up childregs->stackframe + * as the final frame for the new task. + */ + p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; ptrace_hw_copy_thread(p); @@ -527,6 +473,15 @@ static void erratum_1418040_thread_switch(struct task_struct *prev, write_sysreg(val, cntkctl_el1); } +static void compat_thread_switch(struct task_struct *next) +{ + if (!is_compat_thread(task_thread_info(next))) + return; + + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) + set_tsk_thread_flag(next, TIF_NOTIFY_RESUME); +} + static void update_sctlr_el1(u64 sctlr) { /* @@ -568,6 +523,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, ssbs_thread_switch(next); erratum_1418040_thread_switch(prev, next); ptrauth_thread_switch_user(next); + compat_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -598,7 +554,7 @@ unsigned long get_wchan(struct task_struct *p) struct stackframe frame; unsigned long stack_page, ret = 0; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; stack_page = (unsigned long)try_get_task_stack(p); @@ -633,8 +589,15 @@ unsigned long arch_align_stack(unsigned long sp) */ void arch_setup_new_exec(void) { - current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; + unsigned long mmflags = 0; + + if (is_compat_task()) { + mmflags = MMCF_AARCH32; + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) + set_tsk_thread_flag(current, TIF_NOTIFY_RESUME); + } + current->mm->context.flags = mmflags; ptrauth_thread_init_user(); mte_thread_init_user(); @@ -724,22 +687,6 @@ static int __init tagged_addr_init(void) core_initcall(tagged_addr_init); #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ -asmlinkage void __sched arm64_preempt_schedule_irq(void) -{ - lockdep_assert_irqs_disabled(); - - /* - * Preempting a task from an IRQ means we leave copies of PSTATE - * on the stack. cpufeature's enable calls may modify PSTATE, but - * resuming one of these preempted tasks would undo those changes. - * - * Only allow a task to be preempted once cpufeatures have been - * enabled. - */ - if (system_capabilities_finalized()) - preempt_schedule_irq(); -} - #ifdef CONFIG_BINFMT_ELF int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, bool has_interp, bool is_interp) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index eb2f73939b7b..499b6b2f9757 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -122,7 +122,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return ((addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || - on_irq_stack(addr, NULL); + on_irq_stack(addr, sizeof(unsigned long), NULL); } /** diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 2c7ca449dd51..47f77d1234cb 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -162,31 +162,33 @@ static int init_sdei_scs(void) return err; } -static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) +static bool on_sdei_normal_stack(unsigned long sp, unsigned long size, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long high = low + SDEI_STACK_SIZE; - return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info); + return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); } -static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) +static bool on_sdei_critical_stack(unsigned long sp, unsigned long size, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long high = low + SDEI_STACK_SIZE; - return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info); + return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); } -bool _on_sdei_stack(unsigned long sp, struct stack_info *info) +bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info) { if (!IS_ENABLED(CONFIG_VMAP_STACK)) return false; - if (on_sdei_critical_stack(sp, info)) + if (on_sdei_critical_stack(sp, size, info)) return true; - if (on_sdei_normal_stack(sp, info)) + if (on_sdei_normal_stack(sp, size, info)) return true; return false; @@ -231,13 +233,13 @@ out_err: } /* - * __sdei_handler() returns one of: + * do_sdei_event() returns one of: * SDEI_EV_HANDLED - success, return to the interrupted context. * SDEI_EV_FAILED - failure, return this error code to firmare. * virtual-address - success, return to this address. */ -static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, - struct sdei_registered_event *arg) +unsigned long __kprobes do_sdei_event(struct pt_regs *regs, + struct sdei_registered_event *arg) { u32 mode; int i, err = 0; @@ -292,45 +294,3 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, return vbar + 0x480; } - -static void __kprobes notrace __sdei_pstate_entry(void) -{ - /* - * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to - * whether PSTATE bits are inherited unchanged or generated from - * scratch, and the TF-A implementation always clears PAN and always - * clears UAO. There are no other known implementations. - * - * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how - * PSTATE is modified upon architectural exceptions, and so PAN is - * either inherited or set per SCTLR_ELx.SPAN, and UAO is always - * cleared. - * - * We must explicitly reset PAN to the expected state, including - * clearing it when the host isn't using it, in case a VM had it set. - */ - if (system_uses_hw_pan()) - set_pstate_pan(1); - else if (cpu_has_pan()) - set_pstate_pan(0); -} - -asmlinkage noinstr unsigned long -__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) -{ - unsigned long ret; - - /* - * We didn't take an exception to get here, so the HW hasn't - * set/cleared bits in PSTATE that we may rely on. Initialize PAN. - */ - __sdei_pstate_entry(); - - arm64_enter_nmi(regs); - - ret = _sdei_handler(regs, arg); - - arm64_exit_nmi(regs); - - return ret; -} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 61845c0821d9..8ed66142b088 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void) u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; set_cpu_logical_map(0, mpidr); - /* - * clear __my_cpu_offset on boot CPU to avoid hang caused by - * using percpu variable early, for example, lockdep will - * access percpu variable inside lock_release - */ - set_my_cpu_offset(0); pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n", (unsigned long)mpidr, read_cpuid_id()); } @@ -381,7 +375,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) * faults in case uaccess_enable() is inadvertently called by the init * thread. */ - init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir); + init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); #endif if (boot_args[1] || boot_args[2] || boot_args[3]) { diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 6237486ff6bb..f8192f4ae0b8 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -911,6 +911,19 @@ static void do_signal(struct pt_regs *regs) restore_saved_sigmask(); } +static bool cpu_affinity_invalid(struct pt_regs *regs) +{ + if (!compat_user_mode(regs)) + return false; + + /* + * We're preemptible, but a reschedule will cause us to check the + * affinity again. + */ + return !cpumask_test_cpu(raw_smp_processor_id(), + system_32bit_el0_cpumask()); +} + asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { @@ -938,6 +951,19 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, if (thread_flags & _TIF_NOTIFY_RESUME) { tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); + + /* + * If we reschedule after checking the affinity + * then we must ensure that TIF_NOTIFY_RESUME + * is set so that we check the affinity again. + * Since tracehook_notify_resume() clears the + * flag, ensure that the compiler doesn't move + * it after the affinity check. + */ + barrier(); + + if (cpu_affinity_invalid(regs)) + force_sig(SIGKILL); } if (thread_flags & _TIF_FOREIGN_FPSTATE) diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S index d62447964ed9..d3d37f932b97 100644 --- a/arch/arm64/kernel/smccc-call.S +++ b/arch/arm64/kernel/smccc-call.S @@ -7,8 +7,34 @@ #include <asm/asm-offsets.h> #include <asm/assembler.h> +#include <asm/thread_info.h> + +/* + * If we have SMCCC v1.3 and (as is likely) no SVE state in + * the registers then set the SMCCC hint bit to say there's no + * need to preserve it. Do this by directly adjusting the SMCCC + * function value which is already stored in x0 ready to be called. + */ +SYM_FUNC_START(__arm_smccc_sve_check) + + ldr_l x16, smccc_has_sve_hint + cbz x16, 2f + + get_current_task x16 + ldr x16, [x16, #TSK_TI_FLAGS] + tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state? + tbnz x16, #TIF_SVE, 2f // Does that state include SVE? + +1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT + +2: ret +SYM_FUNC_END(__arm_smccc_sve_check) +EXPORT_SYMBOL(__arm_smccc_sve_check) .macro SMCCC instr +alternative_if ARM64_SVE + bl __arm_smccc_sve_check +alternative_else_nop_endif \instr #0 ldr x4, [sp] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] @@ -43,3 +69,60 @@ SYM_FUNC_START(__arm_smccc_hvc) SMCCC hvc SYM_FUNC_END(__arm_smccc_hvc) EXPORT_SYMBOL(__arm_smccc_hvc) + + .macro SMCCC_1_2 instr + /* Save `res` and free a GPR that won't be clobbered */ + stp x1, x19, [sp, #-16]! + + /* Ensure `args` won't be clobbered while loading regs in next step */ + mov x19, x0 + + /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */ + ldp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] + ldp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] + ldp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] + ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] + ldp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] + ldp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] + ldp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] + ldp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] + ldp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] + + \instr #0 + + /* Load the `res` from the stack */ + ldr x19, [sp] + + /* Store the registers x0 - x17 into the result structure */ + stp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] + stp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] + stp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] + stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] + stp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] + stp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] + stp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] + stp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] + stp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] + + /* Restore original x19 */ + ldp xzr, x19, [sp], #16 + ret +.endm + +/* + * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, + * struct arm_smccc_1_2_regs *res); + */ +SYM_FUNC_START(arm_smccc_1_2_hvc) + SMCCC_1_2 hvc +SYM_FUNC_END(arm_smccc_1_2_hvc) +EXPORT_SYMBOL(arm_smccc_1_2_hvc) + +/* + * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, + * struct arm_smccc_1_2_regs *res); + */ +SYM_FUNC_START(arm_smccc_1_2_smc) + SMCCC_1_2 smc +SYM_FUNC_END(arm_smccc_1_2_smc) +EXPORT_SYMBOL(arm_smccc_1_2_smc) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 9b4c1118194d..6f6ff072acbd 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -120,11 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) * page tables. */ secondary_data.task = idle; - secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; update_cpu_boot_status(CPU_MMU_OFF); - dcache_clean_inval_poc((unsigned long)&secondary_data, - (unsigned long)&secondary_data + - sizeof(secondary_data)); /* Now bring the CPU into our world */ ret = boot_secondary(cpu, idle); @@ -144,10 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_crit("CPU%u: failed to come online\n", cpu); secondary_data.task = NULL; - secondary_data.stack = NULL; - dcache_clean_inval_poc((unsigned long)&secondary_data, - (unsigned long)&secondary_data + - sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); if (status == CPU_MMU_OFF) status = READ_ONCE(__early_cpu_boot_status); @@ -206,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void) u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; struct mm_struct *mm = &init_mm; const struct cpu_operations *ops; - unsigned int cpu; - - cpu = task_cpu(current); - set_my_cpu_offset(per_cpu_offset(cpu)); + unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a @@ -228,7 +217,6 @@ asmlinkage notrace void secondary_start_kernel(void) init_gic_priority_masking(); rcu_cpu_starting(cpu); - preempt_disable(); trace_hardirqs_off(); /* @@ -356,7 +344,7 @@ void __cpu_die(unsigned int cpu) pr_crit("CPU%u: cpu didn't die\n", cpu); return; } - pr_notice("CPU%u: shutdown\n", cpu); + pr_debug("CPU%u: shutdown\n", cpu); /* * Now that the dying CPU is beyond the point of no return w.r.t. @@ -456,6 +444,11 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { + /* + * The runtime per-cpu areas have been allocated by + * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be + * freed shortly, so we must move over to the runtime per-cpu area. + */ set_my_cpu_offset(per_cpu_offset(smp_processor_id())); cpuinfo_store_boot_cpu(); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index de07147a7926..b189de5ca6cb 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -68,13 +68,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) unsigned long fp = frame->fp; struct stack_info info; - if (fp & 0xf) - return -EINVAL; - if (!tsk) tsk = current; - if (!on_accessible_stack(tsk, fp, &info)) + /* Final frame; nothing to unwind */ + if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) + return -ENOENT; + + if (fp & 0x7) + return -EINVAL; + + if (!on_accessible_stack(tsk, fp, 16, &info)) return -EINVAL; if (test_bit(info.type, frame->stacks_done)) @@ -128,12 +132,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = ptrauth_strip_insn_pac(frame->pc); - /* - * This is a terminal record, so we have finished unwinding. - */ - if (!frame->fp && !frame->pc) - return -ENOENT; - return 0; } NOKPROBE_SYMBOL(unwind_frame); diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index e3f72df9509d..938ce6fbee8a 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -7,6 +7,7 @@ #include <asm/alternative.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> +#include <asm/cpuidle.h> #include <asm/daifflags.h> #include <asm/debug-monitors.h> #include <asm/exec.h> @@ -91,6 +92,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) int ret = 0; unsigned long flags; struct sleep_stack_data state; + struct arm_cpuidle_irq_context context; /* Report any MTE async fault before going to suspend */ mte_suspend_enter(); @@ -103,12 +105,18 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) flags = local_daif_save(); /* - * Function graph tracer state gets incosistent when the kernel + * Function graph tracer state gets inconsistent when the kernel * calls functions that never return (aka suspend finishers) hence * disable graph tracing during their execution. */ pause_graph_tracing(); + /* + * Switch to using DAIF.IF instead of PMR in order to reliably + * resume if we're using pseudo-NMIs. + */ + arm_cpuidle_save_irq_context(&context); + if (__cpu_suspend_enter(&state)) { /* Call the suspend finisher */ ret = fn(arg); @@ -126,6 +134,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) RCU_NONIDLE(__cpu_suspend_exit()); } + arm_cpuidle_restore_irq_context(&context); + unpause_graph_tracing(); /* diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index a05d34f0e82a..b03e383d944a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -38,6 +38,7 @@ #include <asm/extable.h> #include <asm/insn.h> #include <asm/kprobes.h> +#include <asm/patching.h> #include <asm/traps.h> #include <asm/smp.h> #include <asm/stack_pointer.h> @@ -45,11 +46,102 @@ #include <asm/system_misc.h> #include <asm/sysreg.h> -static const char *handler[] = { - "Synchronous Abort", - "IRQ", - "FIQ", - "Error" +static bool __kprobes __check_eq(unsigned long pstate) +{ + return (pstate & PSR_Z_BIT) != 0; +} + +static bool __kprobes __check_ne(unsigned long pstate) +{ + return (pstate & PSR_Z_BIT) == 0; +} + +static bool __kprobes __check_cs(unsigned long pstate) +{ + return (pstate & PSR_C_BIT) != 0; +} + +static bool __kprobes __check_cc(unsigned long pstate) +{ + return (pstate & PSR_C_BIT) == 0; +} + +static bool __kprobes __check_mi(unsigned long pstate) +{ + return (pstate & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_pl(unsigned long pstate) +{ + return (pstate & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_vs(unsigned long pstate) +{ + return (pstate & PSR_V_BIT) != 0; +} + +static bool __kprobes __check_vc(unsigned long pstate) +{ + return (pstate & PSR_V_BIT) == 0; +} + +static bool __kprobes __check_hi(unsigned long pstate) +{ + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) != 0; +} + +static bool __kprobes __check_ls(unsigned long pstate) +{ + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) == 0; +} + +static bool __kprobes __check_ge(unsigned long pstate) +{ + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_lt(unsigned long pstate) +{ + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_gt(unsigned long pstate) +{ + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_le(unsigned long pstate) +{ + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_al(unsigned long pstate) +{ + return true; +} + +/* + * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that + * it behaves identically to 0b1110 ("al"). + */ +pstate_check_t * const aarch32_opcode_cond_checks[16] = { + __check_eq, __check_ne, __check_cs, __check_cc, + __check_mi, __check_pl, __check_vs, __check_vc, + __check_hi, __check_ls, __check_ge, __check_lt, + __check_gt, __check_le, __check_al, __check_al }; int show_unhandled_signals = 0; @@ -751,27 +843,8 @@ const char *esr_get_class_string(u32 esr) } /* - * bad_mode handles the impossible case in the exception vector. This is always - * fatal. - */ -asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr) -{ - arm64_enter_nmi(regs); - - console_verbose(); - - pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", - handler[reason], smp_processor_id(), esr, - esr_get_class_string(esr)); - - __show_regs(regs); - local_daif_mask(); - panic("bad mode"); -} - -/* * bad_el0_sync handles unexpected, but potentially recoverable synchronous - * exceptions taken from EL0. Unlike bad_mode, this returns. + * exceptions taken from EL0. */ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) { @@ -789,15 +862,11 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) __aligned(16); -asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) +void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) { unsigned long tsk_stk = (unsigned long)current->stack; unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); - unsigned int esr = read_sysreg(esr_el1); - unsigned long far = read_sysreg(far_el1); - - arm64_enter_nmi(regs); console_verbose(); pr_emerg("Insufficient stack space to handle exception!"); @@ -870,15 +939,11 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) } } -asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr) +void do_serror(struct pt_regs *regs, unsigned int esr) { - arm64_enter_nmi(regs); - /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) arm64_serror_panic(regs, esr); - - arm64_exit_nmi(regs); } /* GENERIC_BUG traps */ |