diff options
Diffstat (limited to 'arch/arm64/include/asm/kvm_host.h')
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 46 |
1 files changed, 22 insertions, 24 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a8d3f8ca22c..f656169db8c3 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> @@ -5,18 +6,6 @@ * Derived from arch/arm/include/asm/kvm_host.h: * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __ARM64_KVM_HOST_H__ @@ -30,12 +19,12 @@ #include <asm/arch_gicv3.h> #include <asm/barrier.h> #include <asm/cpufeature.h> +#include <asm/cputype.h> #include <asm/daifflags.h> #include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> -#include <asm/smp_plat.h> #include <asm/thread_info.h> #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -495,11 +484,10 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); -static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, - int cpu) +static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { /* The host's MPIDR is immutable, so let's set it up at boot time */ - cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu); + cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr(); } void __kvm_enable_ssbs(void); @@ -592,9 +580,6 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u32 clr); -void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt); -bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt); - void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); #else @@ -611,11 +596,12 @@ static inline void kvm_arm_vhe_guest_enter(void) * will not signal the CPU of interrupts of lower priority, and the * only way to get out will be via guest exceptions. * Naturally, we want to avoid this. + * + * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a + * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. */ - if (system_uses_irq_prio_masking()) { - gic_write_pmr(GIC_PRIO_IRQON); + if (system_uses_irq_prio_masking()) dsb(sy); - } } static inline void kvm_arm_vhe_guest_exit(void) @@ -634,9 +620,21 @@ static inline void kvm_arm_vhe_guest_exit(void) isb(); } -static inline bool kvm_arm_harden_branch_predictor(void) +#define KVM_BP_HARDEN_UNKNOWN -1 +#define KVM_BP_HARDEN_WA_NEEDED 0 +#define KVM_BP_HARDEN_NOT_REQUIRED 1 + +static inline int kvm_arm_harden_branch_predictor(void) { - return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); + switch (get_spectre_v2_workaround_state()) { + case ARM64_BP_HARDEN_WA_NEEDED: + return KVM_BP_HARDEN_WA_NEEDED; + case ARM64_BP_HARDEN_NOT_REQUIRED: + return KVM_BP_HARDEN_NOT_REQUIRED; + case ARM64_BP_HARDEN_UNKNOWN: + default: + return KVM_BP_HARDEN_UNKNOWN; + } } #define KVM_SSBD_UNKNOWN -1 |