From 892fd259cbf6b1dc16116611b84f7f164a11fe2e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Oct 2021 13:50:42 +0100 Subject: KVM: arm64: Reorder vcpu flag definitions The vcpu arch flags are in an interesting, semi random order. As I have made the mistake of reusing a flag once, let's rework this in an order that I find a bit less confusing. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..3bfd30137ce2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -417,14 +417,12 @@ struct kvm_vcpu_arch { #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ +/* + * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be + * set together with an exception... + */ +#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ -#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ -#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ - -#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ - KVM_GUESTDBG_USE_SW_BP | \ - KVM_GUESTDBG_USE_HW | \ - KVM_GUESTDBG_SINGLESTEP) /* * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can * take the following values: @@ -442,11 +440,13 @@ struct kvm_vcpu_arch { #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) -/* - * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be - * set together with an exception... - */ -#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ +#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ +#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ + +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | \ + KVM_GUESTDBG_USE_HW | \ + KVM_GUESTDBG_SINGLESTEP) #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) -- cgit v1.2.3 From 8383741ab2e773a992f1f0f8acdca5e7a4687c49 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Oct 2021 11:18:00 +0100 Subject: KVM: arm64: Get rid of host SVE tracking/saving The SVE host tracking in KVM is pretty involved. It relies on a set of flags tracking the ownership of the SVE register, as well as that of the EL0 access. It is also pretty scary: __hyp_sve_save_host() computes a thread_struct pointer and obtains a sve_state which gets directly accessed without further ado, even on nVHE. How can this even work? The answer to that is that it doesn't, and that this is mostly dead code. Closer examination shows that on executing a syscall, userspace loses its SVE state entirely. This is part of the ABI. Another thing to notice is that although the kernel provides helpers such as kernel_neon_begin()/end(), they only deal with the FP/NEON state, and not SVE. Given that you can only execute a guest as the result of a syscall, and that the kernel cannot use SVE by itself, it becomes pretty obvious that there is never any host SVE state to save, and that this code is only there to increase confusion. Get rid of the TIF_SVE tracking and host save infrastructure altogether. Reviewed-by: Mark Brown Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/fpsimd.c | 20 +++++--------------- arch/arm64/kvm/hyp/include/hyp/switch.h | 27 +++------------------------ 3 files changed, 8 insertions(+), 40 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3bfd30137ce2..3ccfc3e3e436 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -411,7 +411,6 @@ struct kvm_vcpu_arch { #define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ -#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 5621020b28de..2d15e1d6e214 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -66,22 +66,15 @@ error: * * Here, we just set the correct metadata to indicate that the FPSIMD * state in the cpu regs (if any) belongs to current on the host. - * - * TIF_SVE is backed up here, since it may get clobbered with guest state. - * This flag is restored by kvm_arch_vcpu_put_fp(vcpu). */ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) { BUG_ON(!current->mm); + BUG_ON(test_thread_flag(TIF_SVE)); - vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | - KVM_ARM64_HOST_SVE_IN_USE | - KVM_ARM64_HOST_SVE_ENABLED); + vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED; vcpu->arch.flags |= KVM_ARM64_FP_HOST; - if (test_thread_flag(TIF_SVE)) - vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; - if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; } @@ -115,13 +108,11 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) { unsigned long flags; - bool host_has_sve = system_supports_sve(); - bool guest_has_sve = vcpu_has_sve(vcpu); local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - if (guest_has_sve) { + if (vcpu_has_sve(vcpu)) { __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); /* Restore the VL that was saved when bound to the CPU */ @@ -131,7 +122,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) } fpsimd_save_and_flush_cpu_state(); - } else if (has_vhe() && host_has_sve) { + } else if (has_vhe() && system_supports_sve()) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been @@ -145,8 +136,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); } - update_thread_flag(TIF_SVE, - vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); + update_thread_flag(TIF_SVE, 0); local_irq_restore(flags); } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 7a0af1d39303..e65c2956b881 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -143,16 +143,6 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); } -static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) -{ - struct thread_struct *thread; - - thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct, - uw.fpsimd_state); - - __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr); -} - static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) { sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); @@ -169,21 +159,14 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) */ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) { - bool sve_guest, sve_host; + bool sve_guest; u8 esr_ec; u64 reg; if (!system_supports_fpsimd()) return false; - if (system_supports_sve()) { - sve_guest = vcpu_has_sve(vcpu); - sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; - } else { - sve_guest = false; - sve_host = false; - } - + sve_guest = vcpu_has_sve(vcpu); esr_ec = kvm_vcpu_trap_get_class(vcpu); /* Don't handle SVE traps for non-SVE vcpus here: */ @@ -207,11 +190,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) isb(); if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { - if (sve_host) - __hyp_sve_save_host(vcpu); - else - __fpsimd_save_state(vcpu->arch.host_fpsimd_state); - + __fpsimd_save_state(vcpu->arch.host_fpsimd_state); vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; } -- cgit v1.2.3 From af9a0e21d817f40595aa629de32d3bd96582abef Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Oct 2021 14:10:35 +0100 Subject: KVM: arm64: Introduce flag shadowing TIF_FOREIGN_FPSTATE We currently have to maintain a mapping the thread_info structure at EL2 in order to be able to check the TIF_FOREIGN_FPSTATE flag. In order to eventually get rid of this, start with a vcpu flag that shadows the thread flag on each entry into the hypervisor. Reviewed-by: Mark Brown Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/fpsimd.c | 8 ++++++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- 4 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3ccfc3e3e436..9f1703ebae15 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -441,6 +441,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ +#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | \ @@ -736,6 +737,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e4727dc771bf..d1b93dc8d639 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -849,6 +849,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) } kvm_arm_setup_debug(vcpu); + kvm_arch_vcpu_ctxflush_fp(vcpu); /************************************************************** * Enter the guest diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 2d15e1d6e214..a18b9c1744d5 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -79,6 +79,14 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; } +void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) +{ + if (test_thread_flag(TIF_FOREIGN_FPSTATE)) + vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE; + else + vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE; +} + /* * If the guest FPSIMD state was loaded, update the host's context * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e65c2956b881..a243a2a82131 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -49,7 +49,7 @@ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) * trap the accesses. */ if (!system_supports_fpsimd() || - vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) + vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE) vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_FP_HOST); -- cgit v1.2.3 From bee14bca735a6f897a6ec3e42f3e5d2d8966e87e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Oct 2021 14:18:00 +0100 Subject: KVM: arm64: Stop mapping current thread_info at EL2 Now that we can track an equivalent of TIF_FOREIGN_FPSTATE, drop the mapping of current's thread_info at EL2. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 -- arch/arm64/kvm/fpsimd.c | 12 +----------- arch/arm64/kvm/hyp/include/hyp/switch.h | 1 - arch/arm64/kvm/hyp/nvhe/switch.c | 1 - arch/arm64/kvm/hyp/vhe/switch.c | 1 - 5 files changed, 1 insertion(+), 16 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9f1703ebae15..dd5810ea37a9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,7 +26,6 @@ #include #include #include -#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -321,7 +320,6 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; - struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ struct { diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index a18b9c1744d5..b5b59b9f2eff 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -7,7 +7,6 @@ */ #include #include -#include #include #include #include @@ -28,17 +27,9 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) { int ret; - struct thread_info *ti = ¤t->thread_info; struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; - /* - * Make sure the host task thread flags and fpsimd state are - * visible to hyp: - */ - ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP); - if (ret) - goto error; - + /* Make sure the host task fpsimd state is visible to hyp: */ ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP); if (ret) goto error; @@ -54,7 +45,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) goto error; } - vcpu->arch.host_thread_info = kern_hyp_va(ti); vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); error: return ret; diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index a243a2a82131..11e8580f2fdc 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -29,7 +29,6 @@ #include #include #include -#include struct kvm_exception_table_entry { int insn, fixup; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c0e3fed26d93..329c706af39f 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 5a2cb5d9bc4b..1d162b9c78bf 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -24,7 +24,6 @@ #include #include #include -#include /* VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); -- cgit v1.2.3 From 052f064d42b7bac87cbcd140710f262c5f1c3c8b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 14 Oct 2021 11:30:42 +0100 Subject: KVM: arm64: Move kvm_arch_vcpu_run_pid_change() out of line Having kvm_arch_vcpu_run_pid_change() inline doesn't bring anything to the table. Move it next to kvm_vcpu_first_run_init(), which will be convenient for what is next to come. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 7 +------ arch/arm64/kvm/arm.c | 5 +++++ 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..f464e1530985 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -749,12 +749,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); -#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ -static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) -{ - return kvm_arch_vcpu_run_map_fp(vcpu); -} - +#ifdef CONFIG_KVM void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u32 clr); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e4727dc771bf..e939b3d06682 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -584,6 +584,11 @@ static void update_vmid(struct kvm_vmid *vmid) spin_unlock(&kvm_vmid_lock); } +int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) +{ + return kvm_arch_vcpu_run_map_fp(vcpu); +} + static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; -- cgit v1.2.3 From cc5705fb1bf119ebb693d594f0157e0dd418590e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 14 Oct 2021 12:13:06 +0100 Subject: KVM: arm64: Drop vcpu->arch.has_run_once for vcpu->pid With the transition to kvm_arch_vcpu_run_pid_change() to handle the "run once" activities, it becomes obvious that has_run_once is now an exact shadow of vcpu->pid. Replace vcpu->arch.has_run_once with a new vcpu_has_run_once() helper that directly checks for vcpu->pid, and get rid of the now unused field. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 5 ++--- arch/arm64/kvm/arm.c | 8 +++----- arch/arm64/kvm/vgic/vgic-init.c | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f464e1530985..cbb5ff81919b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -367,9 +367,6 @@ struct kvm_vcpu_arch { int target; DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* Detect first run of a vcpu */ - bool has_run_once; - /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; @@ -606,6 +603,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); +#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) + #ifndef __KVM_NVHE_HYPERVISOR__ #define kvm_call_hyp_nvhe(f, ...) \ ({ \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f20a265972c7..c79d8e5230bc 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) + if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) static_branch_dec(&userspace_irqchip_in_use); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); @@ -609,7 +609,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (ret) return ret; - if (likely(vcpu->arch.has_run_once)) + if (likely(vcpu_has_run_once(vcpu))) return 0; kvm_arm_vcpu_init_debug(vcpu); @@ -640,8 +640,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) static_branch_inc(&userspace_irqchip_in_use); } - vcpu->arch.has_run_once = true; - /* * Initialize traps for protected VMs. * NOTE: Move to run in EL2 directly, rather than via a hypercall, once @@ -1132,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, * need to invalidate the I-cache though, as FWB does *not* * imply CTR_EL0.DIC. */ - if (vcpu->arch.has_run_once) { + if (vcpu_has_run_once(vcpu)) { if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) stage2_unmap_vm(vcpu->kvm); else diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 0a06d0648970..ce2b42c38e62 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) return ret; kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->arch.has_run_once) + if (vcpu_has_run_once(vcpu)) goto out_unlock; } ret = 0; -- cgit v1.2.3 From 005467e06b16261ffdd7130ff0b4f0ebd627599a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:02 -0700 Subject: KVM: Drop obsolete kvm_arch_vcpu_block_finish() Drop kvm_arch_vcpu_block_finish() now that all arch implementations are nops. No functional change intended. Acked-by: Christian Borntraeger Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 1 - arch/mips/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 - arch/riscv/include/asm/kvm_host.h | 1 - arch/s390/include/asm/kvm_host.h | 2 -- arch/s390/kvm/kvm-s390.c | 5 ----- arch/x86/include/asm/kvm_host.h | 2 -- virt/kvm/kvm_main.c | 1 - 8 files changed, 14 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..0e75277be5d5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -717,7 +717,6 @@ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} void kvm_arm_init_debug(void); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 696f6b009377..72b90d45a46e 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -897,7 +897,6 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB int kvm_arch_flush_remote_tlb(struct kvm *kvm); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6ec97eff9563..6e92b2c7a938 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -865,6 +865,5 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_exit(void) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 2639b9ee48f9..907fafea787e 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -208,7 +208,6 @@ struct kvm_vcpu_arch { static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a604d51acfc8..a22c9266ea05 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -1010,6 +1010,4 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu); - #endif diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8f577c76fcd3..dd099d352753 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5047,11 +5047,6 @@ static inline unsigned long nonhyp_mask(int i) return 0x0000ffffffffffffUL >> (nonhyp_fai << 4); } -void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) -{ - -} - static int __init kvm_s390_init(void) { int i; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index be1ad66cf39a..c07b30877de0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1931,8 +1931,6 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); } -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} - static inline int kvm_cpu_get_apicid(int mps_cpu) { #ifdef CONFIG_X86_LOCAL_APIC diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cc68d21a8e58..53c58606e1e2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3376,7 +3376,6 @@ out: } trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); - kvm_arch_vcpu_block_finish(vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_block); -- cgit v1.2.3 From 142ff9bddbde757674c7081ffc238cfcffa1859b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 8 Dec 2021 14:46:41 +0000 Subject: KVM: arm64: Drop unused workaround_flags vcpu field workaround_flags is a leftover from our earlier Spectre-v4 workaround implementation, and now serves no purpose. Get rid of the field and the corresponding asm-offset definition. Fixes: 29e8910a566a ("KVM: arm64: Simplify handling of ARCH_WORKAROUND_2") Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 --- arch/arm64/kernel/asm-offsets.c | 1 - 2 files changed, 4 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..3811d19e5674 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -298,9 +298,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* State of various workarounds, see kvm_asm.h for bit assignment */ - u64 workaround_flags; - /* Miscellaneous vcpu state flags */ u64 flags; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 6d0c3afd36b8..1197e7679882 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -111,7 +111,6 @@ int main(void) #ifdef CONFIG_KVM DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); - DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); -- cgit v1.2.3 From 52b28657ebd7cd20e931ce71190f235d0fa018a6 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 15 Dec 2021 16:12:31 +0000 Subject: KVM: arm64: pkvm: Unshare guest structs during teardown Make use of the newly introduced unshare hypercall during guest teardown to unmap guest-related data structures from the hyp stage-1. Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-15-qperret@google.com --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/include/asm/kvm_mmu.h | 1 + arch/arm64/kvm/arm.c | 2 ++ arch/arm64/kvm/fpsimd.c | 34 ++++++++++++++++++++++++++++--- arch/arm64/kvm/mmu.c | 42 +++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/reset.c | 8 +++++++- 6 files changed, 85 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include/asm/kvm_host.h') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cf858a7e3533..9360a2804df1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -321,6 +321,7 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch external_debug_state; struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ + struct task_struct *parent_task; struct { /* {Break,watch}point registers */ @@ -737,6 +738,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); +void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu); static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) { diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 185d0f62b724..81839e9a8a24 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -151,6 +151,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #include int kvm_share_hyp(void *from, void *to); +void kvm_unshare_hyp(void *from, void *to); int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c202abb448b1..6057f3c5aafe 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -188,6 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) } } atomic_set(&kvm->online_vcpus, 0); + + kvm_unshare_hyp(kvm, kvm + 1); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 86899d3aa9a9..2f48fd362a8c 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -14,6 +14,19 @@ #include #include +void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu) +{ + struct task_struct *p = vcpu->arch.parent_task; + struct user_fpsimd_state *fpsimd; + + if (!is_protected_kvm_enabled() || !p) + return; + + fpsimd = &p->thread.uw.fpsimd_state; + kvm_unshare_hyp(fpsimd, fpsimd + 1); + put_task_struct(p); +} + /* * Called on entry to KVM_RUN unless this vcpu previously ran at least * once and the most recent prior KVM_RUN for this vcpu was called from @@ -29,12 +42,27 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; + kvm_vcpu_unshare_task_fp(vcpu); + /* Make sure the host task fpsimd state is visible to hyp: */ ret = kvm_share_hyp(fpsimd, fpsimd + 1); - if (!ret) - vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); + if (ret) + return ret; + + vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); + + /* + * We need to keep current's task_struct pinned until its data has been + * unshared with the hypervisor to make sure it is not re-used by the + * kernel and donated to someone else while already shared -- see + * kvm_vcpu_unshare_task_fp() for the matching put_task_struct(). + */ + if (is_protected_kvm_enabled()) { + get_task_struct(current); + vcpu->arch.parent_task = current; + } - return ret; + return 0; } /* diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index cf6c52ff8b87..b1edcc950d08 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -341,6 +341,32 @@ unlock: return ret; } +static int unshare_pfn_hyp(u64 pfn) +{ + struct rb_node **node, *parent; + struct hyp_shared_pfn *this; + int ret = 0; + + mutex_lock(&hyp_shared_pfns_lock); + this = find_shared_pfn(pfn, &node, &parent); + if (WARN_ON(!this)) { + ret = -ENOENT; + goto unlock; + } + + this->count--; + if (this->count) + goto unlock; + + rb_erase(&this->node, &hyp_shared_pfns); + kfree(this); + ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1); +unlock: + mutex_unlock(&hyp_shared_pfns_lock); + + return ret; +} + int kvm_share_hyp(void *from, void *to) { phys_addr_t start, end, cur; @@ -373,6 +399,22 @@ int kvm_share_hyp(void *from, void *to) return 0; } +void kvm_unshare_hyp(void *from, void *to) +{ + phys_addr_t start, end, cur; + u64 pfn; + + if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from) + return; + + start = ALIGN_DOWN(__pa(from), PAGE_SIZE); + end = PAGE_ALIGN(__pa(to)); + for (cur = start; cur < end; cur += PAGE_SIZE) { + pfn = __phys_to_pfn(cur); + WARN_ON(unshare_pfn_hyp(pfn)); + } +} + /** * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode * @from: The virtual kernel start address of the range diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e3e2a79fbd75..798a84eddbde 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -150,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) { - kfree(vcpu->arch.sve_state); + void *sve_state = vcpu->arch.sve_state; + + kvm_vcpu_unshare_task_fp(vcpu); + kvm_unshare_hyp(vcpu, vcpu + 1); + if (sve_state) + kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); + kfree(sve_state); } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) -- cgit v1.2.3