diff options
Diffstat (limited to 'arch/arm64/include/asm/kvm_emulate.h')
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 185 |
1 files changed, 56 insertions, 129 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index cf811009a33c..0720898f563e 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -184,29 +184,30 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu) return vcpu_is_el2_ctxt(&vcpu->arch.ctxt); } -static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt) +static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu) { return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) || - (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H)); + (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_E2H)); } -static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu) +static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu) { - return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt); + return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE; } -static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt) +static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu) { - return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE; -} + bool e2h, tge; + u64 hcr; -static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu) -{ - return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt); -} + if (!vcpu_has_nv(vcpu)) + return false; + + hcr = __vcpu_sys_reg(vcpu, HCR_EL2); + + e2h = (hcr & HCR_E2H); + tge = (hcr & HCR_TGE); -static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt) -{ /* * We are in a hypervisor context if the vcpu mode is EL2 or * E2H and TGE bits are set. The latter means we are in the user space @@ -215,14 +216,7 @@ static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt) * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the * rest of the KVM code, and will result in a misbehaving guest. */ - return vcpu_is_el2_ctxt(ctxt) || - (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) || - __vcpu_el2_tge_is_set(ctxt); -} - -static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu) -{ - return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt); + return vcpu_is_el2(vcpu) || (e2h && tge) || tge; } static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu) @@ -281,6 +275,19 @@ static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) return vcpu->arch.fault.esr_el2; } +static inline bool guest_hyp_wfx_traps_enabled(const struct kvm_vcpu *vcpu) +{ + u64 esr = kvm_vcpu_get_esr(vcpu); + bool is_wfe = !!(esr & ESR_ELx_WFx_ISS_WFE); + u64 hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2); + + if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu)) + return false; + + return ((is_wfe && (hcr_el2 & HCR_TWE)) || + (!is_wfe && (hcr_el2 & HCR_TWI))); +} + static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { u64 esr = kvm_vcpu_get_esr(vcpu); @@ -298,7 +305,12 @@ static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vc static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) { - return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; + u64 hpfar = vcpu->arch.fault.hpfar_el2; + + if (unlikely(!(hpfar & HPFAR_EL2_NS))) + return INVALID_GPA; + + return FIELD_GET(HPFAR_EL2_FIPA, hpfar) << 12; } static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) @@ -549,110 +561,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) vcpu_set_flag((v), e); \ } while (0) -#define __build_check_all_or_none(r, bits) \ - BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits)) - -#define __cpacr_to_cptr_clr(clr, set) \ - ({ \ - u64 cptr = 0; \ - \ - if ((set) & CPACR_ELx_FPEN) \ - cptr |= CPTR_EL2_TFP; \ - if ((set) & CPACR_ELx_ZEN) \ - cptr |= CPTR_EL2_TZ; \ - if ((set) & CPACR_ELx_SMEN) \ - cptr |= CPTR_EL2_TSM; \ - if ((clr) & CPACR_ELx_TTA) \ - cptr |= CPTR_EL2_TTA; \ - if ((clr) & CPTR_EL2_TAM) \ - cptr |= CPTR_EL2_TAM; \ - if ((clr) & CPTR_EL2_TCPAC) \ - cptr |= CPTR_EL2_TCPAC; \ - \ - cptr; \ - }) - -#define __cpacr_to_cptr_set(clr, set) \ - ({ \ - u64 cptr = 0; \ - \ - if ((clr) & CPACR_ELx_FPEN) \ - cptr |= CPTR_EL2_TFP; \ - if ((clr) & CPACR_ELx_ZEN) \ - cptr |= CPTR_EL2_TZ; \ - if ((clr) & CPACR_ELx_SMEN) \ - cptr |= CPTR_EL2_TSM; \ - if ((set) & CPACR_ELx_TTA) \ - cptr |= CPTR_EL2_TTA; \ - if ((set) & CPTR_EL2_TAM) \ - cptr |= CPTR_EL2_TAM; \ - if ((set) & CPTR_EL2_TCPAC) \ - cptr |= CPTR_EL2_TCPAC; \ - \ - cptr; \ - }) - -#define cpacr_clear_set(clr, set) \ - do { \ - BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \ - BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \ - __build_check_all_or_none((clr), CPACR_ELx_FPEN); \ - __build_check_all_or_none((set), CPACR_ELx_FPEN); \ - __build_check_all_or_none((clr), CPACR_ELx_ZEN); \ - __build_check_all_or_none((set), CPACR_ELx_ZEN); \ - __build_check_all_or_none((clr), CPACR_ELx_SMEN); \ - __build_check_all_or_none((set), CPACR_ELx_SMEN); \ - \ - if (has_vhe() || has_hvhe()) \ - sysreg_clear_set(cpacr_el1, clr, set); \ - else \ - sysreg_clear_set(cptr_el2, \ - __cpacr_to_cptr_clr(clr, set), \ - __cpacr_to_cptr_set(clr, set));\ - } while (0) - -static __always_inline void kvm_write_cptr_el2(u64 val) -{ - if (has_vhe() || has_hvhe()) - write_sysreg(val, cpacr_el1); - else - write_sysreg(val, cptr_el2); -} - -static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) -{ - u64 val; - - if (has_vhe()) { - val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN); - if (cpus_have_final_cap(ARM64_SME)) - val |= CPACR_EL1_SMEN_EL1EN; - } else if (has_hvhe()) { - val = CPACR_ELx_FPEN; - - if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) - val |= CPACR_ELx_ZEN; - if (cpus_have_final_cap(ARM64_SME)) - val |= CPACR_ELx_SMEN; - } else { - val = CPTR_NVHE_EL2_RES1; - - if (vcpu_has_sve(vcpu) && guest_owns_fp_regs()) - val |= CPTR_EL2_TZ; - if (cpus_have_final_cap(ARM64_SME)) - val &= ~CPTR_EL2_TSM; - } - - return val; -} - -static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) -{ - u64 val = kvm_get_reset_cptr_el2(vcpu); - - kvm_write_cptr_el2(val); -} - /* * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE * format if E2H isn't set. @@ -685,7 +593,7 @@ static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu, #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \ (!vcpu_has_nv(vcpu) ? false : \ ____cptr_xen_trap_enabled(vcpu, \ - SYS_FIELD_GET(CPACR_ELx, xen, \ + SYS_FIELD_GET(CPACR_EL1, xen, \ vcpu_sanitised_cptr_el2(vcpu)))) static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu) @@ -698,8 +606,27 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu) return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN); } -static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) +static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu) { - vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH); + struct kvm *kvm = vcpu->kvm; + + if (cpus_have_final_cap(ARM64_HAS_HCX)) { + /* + * In general, all HCRX_EL2 bits are gated by a feature. + * The only reason we can set SMPME without checking any + * feature is that its effects are not directly observable + * from the guest. + */ + vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME; + + if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP)) + vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2); + + if (kvm_has_tcr2(kvm)) + vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En; + + if (kvm_has_fpmr(kvm)) + vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM; + } } #endif /* __ARM64_KVM_EMULATE_H__ */ |