diff options
author | Takashi Iwai <tiwai@suse.de> | 2023-07-27 15:54:23 +0300 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2023-07-27 15:54:23 +0300 |
commit | 3b9adfbba5af9e1e83897e832fbe6f5778bfc5d3 (patch) | |
tree | a0f52a085c63c164643a5f49fae820752a58b2a2 /arch | |
parent | 8019a4ab3d80c7af391a646cccff953753fc025f (diff) | |
parent | f85739c0b2b0d98a32f5ca4fcc5501d2b76df4f6 (diff) | |
download | linux-3b9adfbba5af9e1e83897e832fbe6f5778bfc5d3.tar.xz |
Merge tag 'asoc-fix-v6.5-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v6.5
A collection of device specific fixes, none particularly remarkable.
There's a set of repetitive fixes for the RealTek drivers fixing an
issue with suspend that was replicated in multiple drivers.
Diffstat (limited to 'arch')
33 files changed, 227 insertions, 174 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8b6096753740..d3dd05bbfe23 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -727,6 +727,8 @@ struct kvm_vcpu_arch { #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) /* PMUSERENR for the guest EL0 is on physical CPU */ #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) +/* WFI instruction trapped */ +#define IN_WFI __vcpu_single_flag(sflags, BIT(7)) /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 8294a9a7e566..929d355eae0a 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -608,22 +608,26 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); /** - * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. + * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access + * flag in a page-table entry. * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. + * @size: Size of the address range to visit. + * @mkold: True if the access flag should be cleared. * * The offset of @addr within a page is ignored. * - * If there is a valid, leaf page-table entry used to translate @addr, then - * clear the access flag in that entry. + * Tests and conditionally clears the access flag for every valid, leaf + * page-table entry used to translate the range [@addr, @addr + @size). * * Note that it is the caller's responsibility to invalidate the TLB after * calling this function to ensure that the updated permissions are visible * to the CPUs. * - * Return: The old page-table entry prior to clearing the flag, 0 on failure. + * Return: True if any of the visited PTEs had the access flag set. */ -kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); +bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, + u64 size, bool mkold); /** * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a @@ -646,18 +650,6 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot); /** - * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the - * access flag set. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). - * @addr: Intermediate physical address to identify the page-table entry. - * - * The offset of @addr within a page is ignored. - * - * Return: True if the page-table entry has the access flag set, false otherwise. - */ -bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); - -/** * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point * of Coherency for guest stage-2 address * range. diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 5227db7640c8..261d6e9df2e1 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -78,6 +78,7 @@ extern u32 __boot_cpu_mode[2]; void __hyp_set_vectors(phys_addr_t phys_vector_base); void __hyp_reset_vectors(void); +bool is_kvm_arm_initialised(void); DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 7a1aeb95d7c3..89d54a5242d1 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -847,6 +847,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) int vec_set_vector_length(struct task_struct *task, enum vec_type type, unsigned long vl, unsigned long flags) { + bool free_sme = false; + if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | PR_SVE_SET_VL_ONEXEC)) return -EINVAL; @@ -897,21 +899,36 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, task->thread.fp_type = FP_STATE_FPSIMD; } - if (system_supports_sme() && type == ARM64_VEC_SME) { - task->thread.svcr &= ~(SVCR_SM_MASK | - SVCR_ZA_MASK); - clear_thread_flag(TIF_SME); + if (system_supports_sme()) { + if (type == ARM64_VEC_SME || + !(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) { + /* + * We are changing the SME VL or weren't using + * SME anyway, discard the state and force a + * reallocation. + */ + task->thread.svcr &= ~(SVCR_SM_MASK | + SVCR_ZA_MASK); + clear_thread_flag(TIF_SME); + free_sme = true; + } } if (task == current) put_cpu_fpsimd_context(); /* - * Force reallocation of task SVE and SME state to the correct - * size on next use: + * Free the changed states if they are not in use, SME will be + * reallocated to the correct size on next use and we just + * allocate SVE now in case it is needed for use in streaming + * mode. */ - sve_free(task); - if (system_supports_sme() && type == ARM64_VEC_SME) + if (system_supports_sve()) { + sve_free(task); + sve_alloc(task, true); + } + + if (free_sme) sme_free(task); task_set_vl(task, type, vl); diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c index 4236cf34d7d9..9941c5b04f15 100644 --- a/arch/arm64/kernel/vdso/vgettimeofday.c +++ b/arch/arm64/kernel/vdso/vgettimeofday.c @@ -6,6 +6,10 @@ * */ +int __kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +int __kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); + int __kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 0696732fa38c..6dcdae4d38cb 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -827,8 +827,8 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr); assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr); - /* This only happens on VHE, so use the CNTKCTL_EL1 accessor */ - sysreg_clear_set(cntkctl_el1, clr, set); + /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */ + sysreg_clear_set(cnthctl_el2, clr, set); } void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) @@ -1563,7 +1563,7 @@ no_vgic: void kvm_timer_init_vhe(void) { if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)) - sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV); + sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV); } int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c2c14059f6a8..72dc53a75d1c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -53,11 +53,16 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); -static bool vgic_present; +static bool vgic_present, kvm_arm_initialised; static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); +bool is_kvm_arm_initialised(void) +{ + return kvm_arm_initialised; +} + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; @@ -713,13 +718,15 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) */ preempt_disable(); kvm_vgic_vmcr_sync(vcpu); - vgic_v4_put(vcpu, true); + vcpu_set_flag(vcpu, IN_WFI); + vgic_v4_put(vcpu); preempt_enable(); kvm_vcpu_halt(vcpu); vcpu_clear_flag(vcpu, IN_WFIT); preempt_disable(); + vcpu_clear_flag(vcpu, IN_WFI); vgic_v4_load(vcpu); preempt_enable(); } @@ -787,7 +794,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { /* The distributor enable bits were changed */ preempt_disable(); - vgic_v4_put(vcpu, false); + vgic_v4_put(vcpu); vgic_v4_load(vcpu); preempt_enable(); } @@ -1867,8 +1874,17 @@ static void _kvm_arch_hardware_enable(void *discard) int kvm_arch_hardware_enable(void) { - int was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); + int was_enabled; + + /* + * Most calls to this function are made with migration + * disabled, but not with preemption disabled. The former is + * enough to ensure correctness, but most of the helpers + * expect the later and will throw a tantrum otherwise. + */ + preempt_disable(); + was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); _kvm_arch_hardware_enable(NULL); if (!was_enabled) { @@ -1876,6 +1892,8 @@ int kvm_arch_hardware_enable(void) kvm_timer_cpu_up(); } + preempt_enable(); + return 0; } @@ -2482,6 +2500,8 @@ static __init int kvm_arm_init(void) if (err) goto out_subs; + kvm_arm_initialised = true; + return 0; out_subs: diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 8f3f93fa119e..03f97d71984c 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -154,6 +154,12 @@ SYM_CODE_END(\label) esb stp x0, x1, [sp, #-16]! 662: + /* + * spectre vectors __bp_harden_hyp_vecs generate br instructions at runtime + * that jump at offset 8 at __kvm_hyp_vector. + * As hyp .text is guarded section, it needs bti j. + */ + bti j b \target check_preamble_length 661b, 662b @@ -165,6 +171,8 @@ check_preamble_length 661b, 662b nop stp x0, x1, [sp, #-16]! 662: + /* Check valid_vect */ + bti j b \target check_preamble_length 661b, 662b diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index c87c63133e10..7693a6757cd7 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -297,3 +297,13 @@ SYM_CODE_START(__kvm_hyp_host_forward_smc) ret SYM_CODE_END(__kvm_hyp_host_forward_smc) + +/* + * kvm_host_psci_cpu_entry is called through br instruction, which requires + * bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external + * functions, but bti c instead. + */ +SYM_CODE_START(kvm_host_psci_cpu_entry) + bti j + b __kvm_host_psci_cpu_entry +SYM_CODE_END(kvm_host_psci_cpu_entry) diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index 08508783ec3d..24543d2a3490 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -200,7 +200,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) __hyp_pa(init_params), 0); } -asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) +asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) { struct psci_boot_args *boot_args; struct kvm_cpu_context *host_ctxt; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index aa740a974e02..f7a93ef29250 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1195,25 +1195,54 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) return pte; } -kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) +struct stage2_age_data { + bool mkold; + bool young; +}; + +static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - kvm_pte_t pte = 0; - stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, - &pte, NULL, 0); + kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF; + struct stage2_age_data *data = ctx->arg; + + if (!kvm_pte_valid(ctx->old) || new == ctx->old) + return 0; + + data->young = true; + + /* + * stage2_age_walker() is always called while holding the MMU lock for + * write, so this will always succeed. Nonetheless, this deliberately + * follows the race detection pattern of the other stage-2 walkers in + * case the locking mechanics of the MMU notifiers is ever changed. + */ + if (data->mkold && !stage2_try_set_pte(ctx, new)) + return -EAGAIN; + /* * "But where's the TLBI?!", you scream. * "Over in the core code", I sigh. * * See the '->clear_flush_young()' callback on the KVM mmu notifier. */ - return pte; + return 0; } -bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) +bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, + u64 size, bool mkold) { - kvm_pte_t pte = 0; - stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0); - return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF; + struct stage2_age_data data = { + .mkold = mkold, + }; + struct kvm_pgtable_walker walker = { + .cb = stage2_age_walker, + .arg = &data, + .flags = KVM_PGTABLE_WALK_LEAF, + }; + + WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker)); + return data.young; } int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 6db9ef288ec3..d3b4feed460c 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1756,27 +1756,25 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { u64 size = (range->end - range->start) << PAGE_SHIFT; - kvm_pte_t kpte; - pte_t pte; if (!kvm->arch.mmu.pgt) return false; - WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); - - kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, - range->start << PAGE_SHIFT); - pte = __pte(kpte); - return pte_valid(pte) && pte_young(pte); + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, + range->start << PAGE_SHIFT, + size, true); } bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { + u64 size = (range->end - range->start) << PAGE_SHIFT; + if (!kvm->arch.mmu.pgt) return false; - return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, - range->start << PAGE_SHIFT); + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, + range->start << PAGE_SHIFT, + size, false); } phys_addr_t kvm_mmu_get_httbr(void) diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 994a494703c3..6ff3ec18c925 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -244,7 +244,7 @@ static int __init finalize_pkvm(void) { int ret; - if (!is_protected_kvm_enabled()) + if (!is_protected_kvm_enabled() || !is_kvm_arm_initialised()) return 0; /* diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index bd3431823ec5..2ca2973abe66 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -986,7 +986,6 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (p->is_write) { kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); - __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; kvm_vcpu_pmu_restore_guest(vcpu); } else { p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; @@ -1115,18 +1114,19 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } -#define PMU_SYS_REG(r) \ - SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility +#define PMU_SYS_REG(name) \ + SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \ + .visibility = pmu_visibility /* Macro to expand the PMEVCNTRn_EL0 register */ #define PMU_PMEVCNTR_EL0(n) \ - { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \ + { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \ .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } /* Macro to expand the PMEVTYPERn_EL0 register */ #define PMU_PMEVTYPER_EL0(n) \ - { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \ + { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \ .reset = reset_pmevtyper, \ .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } @@ -2115,9 +2115,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_PMBSR_EL1), undef_access }, /* PMBIDR_EL1 is not trapped */ - { PMU_SYS_REG(SYS_PMINTENSET_EL1), + { PMU_SYS_REG(PMINTENSET_EL1), .access = access_pminten, .reg = PMINTENSET_EL1 }, - { PMU_SYS_REG(SYS_PMINTENCLR_EL1), + { PMU_SYS_REG(PMINTENCLR_EL1), .access = access_pminten, .reg = PMINTENSET_EL1 }, { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, @@ -2164,41 +2164,41 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CTR_EL0), access_ctr }, { SYS_DESC(SYS_SVCR), undef_access }, - { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr, + { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0 }, - { PMU_SYS_REG(SYS_PMCNTENSET_EL0), + { PMU_SYS_REG(PMCNTENSET_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, - { PMU_SYS_REG(SYS_PMCNTENCLR_EL0), + { PMU_SYS_REG(PMCNTENCLR_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, - { PMU_SYS_REG(SYS_PMOVSCLR_EL0), + { PMU_SYS_REG(PMOVSCLR_EL0), .access = access_pmovs, .reg = PMOVSSET_EL0 }, /* * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was * previously (and pointlessly) advertised in the past... */ - { PMU_SYS_REG(SYS_PMSWINC_EL0), + { PMU_SYS_REG(PMSWINC_EL0), .get_user = get_raz_reg, .set_user = set_wi_reg, .access = access_pmswinc, .reset = NULL }, - { PMU_SYS_REG(SYS_PMSELR_EL0), + { PMU_SYS_REG(PMSELR_EL0), .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, - { PMU_SYS_REG(SYS_PMCEID0_EL0), + { PMU_SYS_REG(PMCEID0_EL0), .access = access_pmceid, .reset = NULL }, - { PMU_SYS_REG(SYS_PMCEID1_EL0), + { PMU_SYS_REG(PMCEID1_EL0), .access = access_pmceid, .reset = NULL }, - { PMU_SYS_REG(SYS_PMCCNTR_EL0), + { PMU_SYS_REG(PMCCNTR_EL0), .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr}, - { PMU_SYS_REG(SYS_PMXEVTYPER_EL0), + { PMU_SYS_REG(PMXEVTYPER_EL0), .access = access_pmu_evtyper, .reset = NULL }, - { PMU_SYS_REG(SYS_PMXEVCNTR_EL0), + { PMU_SYS_REG(PMXEVCNTR_EL0), .access = access_pmu_evcntr, .reset = NULL }, /* * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ - { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr, + { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, - { PMU_SYS_REG(SYS_PMOVSSET_EL0), + { PMU_SYS_REG(PMOVSSET_EL0), .access = access_pmovs, .reg = PMOVSSET_EL0 }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, @@ -2354,7 +2354,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ - { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper, + { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper, .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 }, EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0), diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index c3b8e132d599..3dfc8b84e03e 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -749,7 +749,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - WARN_ON(vgic_v4_put(vcpu, false)); + WARN_ON(vgic_v4_put(vcpu)); vgic_v3_vmcr_sync(vcpu); diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index c1c28fe680ba..339a55194b2c 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -336,14 +336,14 @@ void vgic_v4_teardown(struct kvm *kvm) its_vm->vpes = NULL; } -int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) +int vgic_v4_put(struct kvm_vcpu *vcpu) { struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) return 0; - return its_make_vpe_non_resident(vpe, need_db); + return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI)); } int vgic_v4_load(struct kvm_vcpu *vcpu) @@ -354,6 +354,9 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident) return 0; + if (vcpu_get_flag(vcpu, IN_WFI)) + return 0; + /* * Before making the VPE resident, make sure the redistributor * corresponding to our current CPU expects us here. See the diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 4ea2eefbc053..e9ad391fc8ea 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -24,6 +24,7 @@ #include <linux/bug.h> #include <linux/mm.h> #include <linux/mmzone.h> +#include <linux/kfence.h> static void *trans_alloc(struct trans_pgd_info *info) { @@ -41,7 +42,8 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) * the temporary mappings we use during restore. */ set_pte(dst_ptep, pte_mkwrite(pte)); - } else if (debug_pagealloc_enabled() && !pte_none(pte)) { + } else if ((debug_pagealloc_enabled() || + is_kfence_address((void *)addr)) && !pte_none(pte)) { /* * debug_pagealloc will removed the PTE_VALID bit if * the page isn't in use by the resume kernel. It may have diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 145b540ec34f..ec2174838f2a 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -322,7 +322,13 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) * */ - emit_bti(A64_BTI_C, ctx); + /* bpf function may be invoked by 3 instruction types: + * 1. bl, attached via freplace to bpf prog via short jump + * 2. br, attached via freplace to bpf prog via long jump + * 3. blr, working as a function pointer, used by emit_call. + * So BTI_JC should used here to support both br and blr. + */ + emit_bti(A64_BTI_JC, ctx); emit(A64_MOV(1, A64_R(9), A64_LR), ctx); emit(A64_NOP, ctx); diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 1ea4a3dc68f8..65866bf819c3 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2017,7 +2017,7 @@ Field 0 SM EndSysreg SysregFields HFGxTR_EL2 -Field 63 nAMIAIR2_EL1 +Field 63 nAMAIR2_EL1 Field 62 nMAIR2_EL1 Field 61 nS2POR_EL1 Field 60 nPOR_EL1 @@ -2032,9 +2032,9 @@ Field 52 nGCS_EL0 Res0 51 Field 50 nACCDATA_EL1 Field 49 ERXADDR_EL1 -Field 48 EXRPFGCDN_EL1 -Field 47 EXPFGCTL_EL1 -Field 46 EXPFGF_EL1 +Field 48 ERXPFGCDN_EL1 +Field 47 ERXPFGCTL_EL1 +Field 46 ERXPFGF_EL1 Field 45 ERXMISCn_EL1 Field 44 ERXSTATUS_EL1 Field 43 ERXCTLR_EL1 @@ -2049,8 +2049,8 @@ Field 35 TPIDR_EL0 Field 34 TPIDRRO_EL0 Field 33 TPIDR_EL1 Field 32 TCR_EL1 -Field 31 SCTXNUM_EL0 -Field 30 SCTXNUM_EL1 +Field 31 SCXTNUM_EL0 +Field 30 SCXTNUM_EL1 Field 29 SCTLR_EL1 Field 28 REVIDR_EL1 Field 27 PAR_EL1 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 6e948d015332..eb561cc93632 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -63,7 +63,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len info.low_limit = addr; info.high_limit = TASK_SIZE; info.align_mask = align_mask; - info.align_offset = 0; + info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); } diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 9915062d5243..ca2d537e25b1 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -27,12 +27,17 @@ #include <linux/elf-randomize.h> /* - * Construct an artificial page offset for the mapping based on the physical + * Construct an artificial page offset for the mapping based on the virtual * address of the kernel file mapping variable. + * If filp is zero the calculated pgoff value aliases the memory of the given + * address. This is useful for io_uring where the mapping shall alias a kernel + * address and a userspace adress where both the kernel and the userspace + * access the same memory region. */ -#define GET_FILP_PGOFF(filp) \ - (filp ? (((unsigned long) filp->f_mapping) >> 8) \ - & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) +#define GET_FILP_PGOFF(filp, addr) \ + ((filp ? (((unsigned long) filp->f_mapping) >> 8) \ + & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) \ + + (addr >> PAGE_SHIFT)) static unsigned long shared_align_offset(unsigned long filp_pgoff, unsigned long pgoff) @@ -112,7 +117,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; - filp_pgoff = GET_FILP_PGOFF(filp); + filp_pgoff = GET_FILP_PGOFF(filp, addr); if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE */ diff --git a/arch/powerpc/crypto/.gitignore b/arch/powerpc/crypto/.gitignore new file mode 100644 index 000000000000..e1094f08f713 --- /dev/null +++ b/arch/powerpc/crypto/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +aesp10-ppc.S +ghashp10-ppc.S diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index ef42adb44aa3..00c6b0b4ede4 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -4,14 +4,13 @@ #ifdef __KERNEL__ #include <asm/asm-compat.h> -#include <asm/extable.h> #ifdef CONFIG_BUG #ifdef __ASSEMBLY__ #include <asm/asm-offsets.h> #ifdef CONFIG_DEBUG_BUGVERBOSE -.macro __EMIT_BUG_ENTRY addr,file,line,flags +.macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"aw" 5001: .4byte \addr - . .4byte 5002f - . @@ -23,7 +22,7 @@ .previous .endm #else -.macro __EMIT_BUG_ENTRY addr,file,line,flags +.macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"aw" 5001: .4byte \addr - . .short \flags @@ -32,18 +31,6 @@ .endm #endif /* verbose */ -.macro EMIT_WARN_ENTRY addr,file,line,flags - EX_TABLE(\addr,\addr+4) - __EMIT_BUG_ENTRY \addr,\file,\line,\flags -.endm - -.macro EMIT_BUG_ENTRY addr,file,line,flags - .if \flags & 1 /* BUGFLAG_WARNING */ - .err /* Use EMIT_WARN_ENTRY for warnings */ - .endif - __EMIT_BUG_ENTRY \addr,\file,\line,\flags -.endm - #else /* !__ASSEMBLY__ */ /* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and sizeof(struct bug_entry), respectively */ @@ -73,16 +60,6 @@ "i" (sizeof(struct bug_entry)), \ ##__VA_ARGS__) -#define WARN_ENTRY(insn, flags, label, ...) \ - asm_volatile_goto( \ - "1: " insn "\n" \ - EX_TABLE(1b, %l[label]) \ - _EMIT_BUG_ENTRY \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (flags), \ - "i" (sizeof(struct bug_entry)), \ - ##__VA_ARGS__ : : label) - /* * BUG_ON() and WARN_ON() do their best to cooperate with compile-time * optimisations. However depending on the complexity of the condition @@ -95,16 +72,7 @@ } while (0) #define HAVE_ARCH_BUG -#define __WARN_FLAGS(flags) do { \ - __label__ __label_warn_on; \ - \ - WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \ - barrier_before_unreachable(); \ - __builtin_unreachable(); \ - \ -__label_warn_on: \ - break; \ -} while (0) +#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags)) #ifdef CONFIG_PPC64 #define BUG_ON(x) do { \ @@ -117,25 +85,15 @@ __label_warn_on: \ } while (0) #define WARN_ON(x) ({ \ - bool __ret_warn_on = false; \ - do { \ - if (__builtin_constant_p((x))) { \ - if (!(x)) \ - break; \ + int __ret_warn_on = !!(x); \ + if (__builtin_constant_p(__ret_warn_on)) { \ + if (__ret_warn_on) \ __WARN(); \ - __ret_warn_on = true; \ - } else { \ - __label__ __label_warn_on; \ - \ - WARN_ENTRY(PPC_TLNEI " %4, 0", \ - BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \ - __label_warn_on, \ - "r" ((__force long)(x))); \ - break; \ -__label_warn_on: \ - __ret_warn_on = true; \ - } \ - } while (0); \ + } else { \ + BUG_ENTRY(PPC_TLNEI " %4, 0", \ + BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \ + "r" (__ret_warn_on)); \ + } \ unlikely(__ret_warn_on); \ }) @@ -148,14 +106,13 @@ __label_warn_on: \ #ifdef __ASSEMBLY__ .macro EMIT_BUG_ENTRY addr,file,line,flags .endm -.macro EMIT_WARN_ENTRY addr,file,line,flags -.endm #else /* !__ASSEMBLY__ */ #define _EMIT_BUG_ENTRY -#define _EMIT_WARN_ENTRY #endif #endif /* CONFIG_BUG */ +#define EMIT_WARN_ENTRY EMIT_BUG_ENTRY + #include <asm-generic/bug.h> #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index a26ca097d032..79f1c480b5eb 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -12,14 +12,8 @@ /* * This is used to ensure we don't load something for the wrong architecture. - * 64le only supports ELFv2 64-bit binaries (64be supports v1 and v2). */ -#if defined(CONFIG_PPC64) && defined(CONFIG_CPU_LITTLE_ENDIAN) -#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) && \ - (((x)->e_flags & 0x3) == 0x2)) -#else #define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) -#endif #define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC) #define CORE_DUMP_USE_REGSET diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index bc5d39a835fe..bf5dde1a4114 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -183,13 +183,9 @@ static inline bool test_thread_local_flags(unsigned int flags) #define clear_tsk_compat_task(tsk) do { } while (0) #endif -#ifdef CONFIG_PPC64 -#ifdef CONFIG_CPU_BIG_ENDIAN +#if defined(CONFIG_PPC64) #define is_elf2_task() (test_thread_flag(TIF_ELF2ABI)) #else -#define is_elf2_task() (1) -#endif -#else #define is_elf2_task() (0) #endif diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e59ec6d32d37..7ef147e2a20d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1508,13 +1508,8 @@ static void do_program_check(struct pt_regs *regs) if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { - const struct exception_table_entry *entry; - - entry = search_exception_tables(bugaddr); - if (entry) { - regs_set_return_ip(regs, extable_fixup(entry) + regs->nip - bugaddr); - return; - } + regs_add_return_ip(regs, 4); + return; } if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && user_mode(regs)) { diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index 699eeffd9f55..f9522fd70b2f 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 KASAN_SANITIZE := n +KCOV_INSTRUMENT := n obj-$(CONFIG_PPC32) += init_32.o obj-$(CONFIG_PPC_8xx) += 8xx.o diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c index 1bfb29574caa..c1e981649bd9 100644 --- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c @@ -477,7 +477,7 @@ static int mpc512x_lpbfifo_probe(struct platform_device *pdev) return ret; } -static int mpc512x_lpbfifo_remove(struct platform_device *pdev) +static void mpc512x_lpbfifo_remove(struct platform_device *pdev) { unsigned long flags; struct dma_device *dma_dev = lpbfifo.chan->device; @@ -494,8 +494,6 @@ static int mpc512x_lpbfifo_remove(struct platform_device *pdev) free_irq(lpbfifo.irq, &pdev->dev); irq_dispose_mapping(lpbfifo.irq); dma_release_channel(lpbfifo.chan); - - return 0; } static const struct of_device_id mpc512x_lpbfifo_match[] = { @@ -506,7 +504,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match); static struct platform_driver mpc512x_lpbfifo_driver = { .probe = mpc512x_lpbfifo_probe, - .remove = mpc512x_lpbfifo_remove, + .remove_new = mpc512x_lpbfifo_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc512x_lpbfifo_match, diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index 9a44a98ba342..3fbc2a6aa319 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -744,6 +744,12 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds, } task_ref = &win->vas_win.task_ref; + /* + * VAS mmap (coproc_mmap()) and its fault handler + * (vas_mmap_fault()) are called after holding mmap lock. + * So hold mmap mutex after mmap_lock to avoid deadlock. + */ + mmap_write_lock(task_ref->mm); mutex_lock(&task_ref->mmap_mutex); vma = task_ref->vma; /* @@ -752,7 +758,6 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds, */ win->vas_win.status |= flag; - mmap_write_lock(task_ref->mm); /* * vma is set in the original mapping. But this mapping * is done with mmap() after the window is opened with ioctl. @@ -762,8 +767,8 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds, if (vma) zap_vma_pages(vma); - mmap_write_unlock(task_ref->mm); mutex_unlock(&task_ref->mmap_mutex); + mmap_write_unlock(task_ref->mm); /* * Close VAS window in the hypervisor, but do not * free vas_window struct since it may be reused diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index d29a9d908797..38349150c96e 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -103,7 +103,7 @@ static inline void _free_kb_keybuf(struct key_blob *kb) { if (kb->key && kb->key != kb->keybuf && kb->keylen > sizeof(kb->keybuf)) { - kfree(kb->key); + kfree_sensitive(kb->key); kb->key = NULL; } } diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 2f34c7c3c5ab..bf1fdc7bf89e 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -411,8 +411,12 @@ int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc) u16 _rc, _rrc; int cc = 0; - /* Make sure the counter does not reach 0 before calling s390_uv_destroy_range */ - atomic_inc(&kvm->mm->context.protected_count); + /* + * Nothing to do if the counter was already 0. Otherwise make sure + * the counter does not reach 0 before calling s390_uv_destroy_range. + */ + if (!atomic_inc_not_zero(&kvm->mm->context.protected_count)) + return 0; *rc = 1; /* If the current VM is protected, destroy it */ diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index dbe8394234e2..2f123429a291 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -421,6 +421,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + if (likely(!(fault & VM_FAULT_ERROR))) + fault = 0; goto out; } count_vm_vma_lock_event(VMA_LOCK_RETRY); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 989ebd0912b4..9c8af31be970 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2853,6 +2853,7 @@ int s390_replace_asce(struct gmap *gmap) page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); if (!page) return -ENOMEM; + page->index = 0; table = page_to_virt(page); memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); |