diff options
author | Oliver Upton <oliver.upton@linux.dev> | 2024-03-07 03:50:48 +0300 |
---|---|---|
committer | Oliver Upton <oliver.upton@linux.dev> | 2024-03-07 03:51:31 +0300 |
commit | 0d874858c6bfdaf95f8df26856dbc7a57f3c0128 (patch) | |
tree | d2dd669fd60014a883db27abc19ecbe618d7d373 /arch/arm64/kvm | |
parent | a040adfb7ef1a895be704d16375cbfe1fbb0aa9c (diff) | |
parent | 5c1ebe9ada19f95e5582f80b37e0b5003dc79ddb (diff) | |
download | linux-0d874858c6bfdaf95f8df26856dbc7a57f3c0128.tar.xz |
Merge branch kvm-arm64/vm-configuration into kvmarm/next
* kvm-arm64/vm-configuration: (29 commits)
: VM configuration enforcement, courtesy of Marc Zyngier
:
: Userspace has gained the ability to control the features visible
: through the ID registers, yet KVM didn't take this into account as the
: effective feature set when determing trap / emulation behavior. This
: series adds:
:
: - Mechanism for testing the presence of a particular CPU feature in the
: guest's ID registers
:
: - Infrastructure for computing the effective value of VNCR-backed
: registers, taking into account the RES0 / RES1 bits for a particular
: VM configuration
:
: - Implementation of 'fine-grained UNDEF' controls that shadow the FGT
: register definitions.
KVM: arm64: Don't initialize idreg debugfs w/ preemption disabled
KVM: arm64: Fail the idreg iterator if idregs aren't initialized
KVM: arm64: Make build-time check of RES0/RES1 bits optional
KVM: arm64: Add debugfs file for guest's ID registers
KVM: arm64: Snapshot all non-zero RES0/RES1 sysreg fields for later checking
KVM: arm64: Make FEAT_MOPS UNDEF if not advertised to the guest
KVM: arm64: Make AMU sysreg UNDEF if FEAT_AMU is not advertised to the guest
KVM: arm64: Make PIR{,E0}_EL1 UNDEF if S1PIE is not advertised to the guest
KVM: arm64: Make TLBI OS/Range UNDEF if not advertised to the guest
KVM: arm64: Streamline save/restore of HFG[RW]TR_EL2
KVM: arm64: Move existing feature disabling over to FGU infrastructure
KVM: arm64: Propagate and handle Fine-Grained UNDEF bits
KVM: arm64: Add Fine-Grained UNDEF tracking information
KVM: arm64: Rename __check_nv_sr_forward() to triage_sysreg_trap()
KVM: arm64: Use the xarray as the primary sysreg/sysinsn walker
KVM: arm64: Register AArch64 system register entries with the sysreg xarray
KVM: arm64: Always populate the trap configuration xarray
KVM: arm64: nv: Move system instructions to their own sys_reg_desc array
KVM: arm64: Drop the requirement for XARRAY_MULTI
KVM: arm64: nv: Turn encoding ranges into discrete XArray stores
...
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/Kconfig | 12 | ||||
-rw-r--r-- | arch/arm64/kvm/arm.c | 11 | ||||
-rw-r--r-- | arch/arm64/kvm/check-res-bits.h | 125 | ||||
-rw-r--r-- | arch/arm64/kvm/emulate-nested.c | 231 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/switch.h | 130 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 24 | ||||
-rw-r--r-- | arch/arm64/kvm/nested.c | 267 | ||||
-rw-r--r-- | arch/arm64/kvm/pmu-emul.c | 11 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 242 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.h | 2 |
10 files changed, 891 insertions, 164 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 6c3c8ca73e7f..fa9389270cfe 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -39,7 +39,6 @@ menuconfig KVM select HAVE_KVM_VCPU_RUN_PID_CHANGE select SCHED_INFO select GUEST_PERF_EVENTS if PERF_EVENTS - select XARRAY_MULTI help Support hosting virtualized guest machines. @@ -68,4 +67,15 @@ config PROTECTED_NVHE_STACKTRACE If unsure, or not using protected nVHE (pKVM), say N. +config KVM_ARM64_RES_BITS_PARANOIA + bool "Build-time check of RES0/RES1 bits" + depends on KVM + default n + help + Say Y here to validate that KVM's knowledge of most system + registers' RES0/RES1 bits matches when the rest of the kernel + defines. Expect the build to fail badly if you enable this. + + Just say N. + endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 090dbc954157..3dee5490eea9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -190,6 +190,10 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +void kvm_arch_create_vm_debugfs(struct kvm *kvm) +{ + kvm_sys_regs_create_debugfs(kvm); +} /** * kvm_arch_destroy_vm - destroy the VM data structure @@ -206,6 +210,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) pkvm_destroy_hyp_vm(kvm); kfree(kvm->arch.mpidr_data); + kfree(kvm->arch.sysreg_masks); kvm_destroy_vcpus(kvm); kvm_unshare_hyp(kvm, kvm + 1); @@ -674,6 +679,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) return ret; } + /* + * This needs to happen after NV has imposed its own restrictions on + * the feature set + */ + kvm_init_sysreg(vcpu); + ret = kvm_timer_enable(vcpu); if (ret) return ret; diff --git a/arch/arm64/kvm/check-res-bits.h b/arch/arm64/kvm/check-res-bits.h new file mode 100644 index 000000000000..2d98e60efc3c --- /dev/null +++ b/arch/arm64/kvm/check-res-bits.h @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 - Google LLC + * Author: Marc Zyngier <maz@kernel.org> + */ + +#include <asm/sysreg-defs.h> + +/* + * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + * + * If any of these BUILD_BUG_ON() fails, that's because some bits that + * were reserved have gained some other meaning, and KVM needs to know + * about those. + * + * In such case, do *NOT* blindly change the assertion so that it + * passes, but also teach the rest of the code about the actual + * change. + * + * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + */ +static inline void check_res_bits(void) +{ +#ifdef CONFIG_KVM_ARM64_RES_BITS_PARANOIA + + BUILD_BUG_ON(OSDTRRX_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(MDCCINT_EL1_RES0 != (GENMASK_ULL(63, 31) | GENMASK_ULL(28, 0))); + BUILD_BUG_ON(MDSCR_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(28, 28) | GENMASK_ULL(25, 24) | GENMASK_ULL(20, 20) | GENMASK_ULL(18, 16) | GENMASK_ULL(11, 7) | GENMASK_ULL(5, 1))); + BUILD_BUG_ON(OSDTRTX_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(OSECCR_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(OSLAR_EL1_RES0 != (GENMASK_ULL(63, 1))); + BUILD_BUG_ON(ID_PFR0_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_PFR1_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_DFR0_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_AFR0_EL1_RES0 != (GENMASK_ULL(63, 16))); + BUILD_BUG_ON(ID_MMFR0_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_MMFR1_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_MMFR2_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_MMFR3_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_ISAR0_EL1_RES0 != (GENMASK_ULL(63, 28))); + BUILD_BUG_ON(ID_ISAR1_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_ISAR2_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_ISAR3_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_ISAR4_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_ISAR5_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(23, 20))); + BUILD_BUG_ON(ID_ISAR6_EL1_RES0 != (GENMASK_ULL(63, 28))); + BUILD_BUG_ON(ID_MMFR4_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(MVFR0_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(MVFR1_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(MVFR2_EL1_RES0 != (GENMASK_ULL(63, 8))); + BUILD_BUG_ON(ID_PFR2_EL1_RES0 != (GENMASK_ULL(63, 12))); + BUILD_BUG_ON(ID_DFR1_EL1_RES0 != (GENMASK_ULL(63, 8))); + BUILD_BUG_ON(ID_MMFR5_EL1_RES0 != (GENMASK_ULL(63, 8))); + BUILD_BUG_ON(ID_AA64PFR1_EL1_RES0 != (GENMASK_ULL(23, 20))); + BUILD_BUG_ON(ID_AA64PFR2_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(31, 12))); + BUILD_BUG_ON(ID_AA64ZFR0_EL1_RES0 != (GENMASK_ULL(63, 60) | GENMASK_ULL(51, 48) | GENMASK_ULL(39, 36) | GENMASK_ULL(31, 28) | GENMASK_ULL(15, 8))); + BUILD_BUG_ON(ID_AA64SMFR0_EL1_RES0 != (GENMASK_ULL(62, 61) | GENMASK_ULL(51, 49) | GENMASK_ULL(31, 31) | GENMASK_ULL(27, 0))); + BUILD_BUG_ON(ID_AA64FPFR0_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 2))); + BUILD_BUG_ON(ID_AA64DFR0_EL1_RES0 != (GENMASK_ULL(27, 24) | GENMASK_ULL(19, 16))); + BUILD_BUG_ON(ID_AA64DFR1_EL1_RES0 != (GENMASK_ULL(63, 0))); + BUILD_BUG_ON(ID_AA64AFR0_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(ID_AA64AFR1_EL1_RES0 != (GENMASK_ULL(63, 0))); + BUILD_BUG_ON(ID_AA64ISAR0_EL1_RES0 != (GENMASK_ULL(3, 0))); + BUILD_BUG_ON(ID_AA64ISAR2_EL1_RES0 != (GENMASK_ULL(47, 44))); + BUILD_BUG_ON(ID_AA64ISAR3_EL1_RES0 != (GENMASK_ULL(63, 16))); + BUILD_BUG_ON(ID_AA64MMFR0_EL1_RES0 != (GENMASK_ULL(55, 48))); + BUILD_BUG_ON(ID_AA64MMFR2_EL1_RES0 != (GENMASK_ULL(47, 44))); + BUILD_BUG_ON(ID_AA64MMFR3_EL1_RES0 != (GENMASK_ULL(51, 48))); + BUILD_BUG_ON(ID_AA64MMFR4_EL1_RES0 != (GENMASK_ULL(63, 40) | GENMASK_ULL(35, 28) | GENMASK_ULL(3, 0))); + BUILD_BUG_ON(SCTLR_EL1_RES0 != (GENMASK_ULL(17, 17))); + BUILD_BUG_ON(CPACR_ELx_RES0 != (GENMASK_ULL(63, 30) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | GENMASK_ULL(19, 18) | GENMASK_ULL(15, 0))); + BUILD_BUG_ON(SMPRI_EL1_RES0 != (GENMASK_ULL(63, 4))); + BUILD_BUG_ON(ZCR_ELx_RES0 != (GENMASK_ULL(63, 9))); + BUILD_BUG_ON(SMCR_ELx_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(29, 9))); + BUILD_BUG_ON(GCSCR_ELx_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(7, 7) | GENMASK_ULL(4, 1))); + BUILD_BUG_ON(GCSPR_ELx_RES0 != (GENMASK_ULL(2, 0))); + BUILD_BUG_ON(GCSCRE0_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(7, 6) | GENMASK_ULL(4, 1))); + BUILD_BUG_ON(ALLINT_RES0 != (GENMASK_ULL(63, 14) | GENMASK_ULL(12, 0))); + BUILD_BUG_ON(PMSCR_EL1_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2))); + BUILD_BUG_ON(PMSICR_EL1_RES0 != (GENMASK_ULL(55, 32))); + BUILD_BUG_ON(PMSIRR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(7, 1))); + BUILD_BUG_ON(PMSFCR_EL1_RES0 != (GENMASK_ULL(63, 19) | GENMASK_ULL(15, 4))); + BUILD_BUG_ON(PMSLATFR_EL1_RES0 != (GENMASK_ULL(63, 16))); + BUILD_BUG_ON(PMSIDR_EL1_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(7, 7))); + BUILD_BUG_ON(PMBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 6) | GENMASK_ULL(4, 3))); + BUILD_BUG_ON(PMBSR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(25, 20))); + BUILD_BUG_ON(PMBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6))); + BUILD_BUG_ON(CONTEXTIDR_ELx_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(CCSIDR_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(CLIDR_EL1_RES0 != (GENMASK_ULL(63, 47))); + BUILD_BUG_ON(CCSIDR2_EL1_RES0 != (GENMASK_ULL(63, 24))); + BUILD_BUG_ON(GMID_EL1_RES0 != (GENMASK_ULL(63, 4))); + BUILD_BUG_ON(SMIDR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(14, 12))); + BUILD_BUG_ON(CSSELR_EL1_RES0 != (GENMASK_ULL(63, 5))); + BUILD_BUG_ON(CTR_EL0_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(30, 30) | GENMASK_ULL(13, 4))); + BUILD_BUG_ON(CTR_EL0_RES1 != (GENMASK_ULL(31, 31))); + BUILD_BUG_ON(DCZID_EL0_RES0 != (GENMASK_ULL(63, 5))); + BUILD_BUG_ON(SVCR_RES0 != (GENMASK_ULL(63, 2))); + BUILD_BUG_ON(FPMR_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(23, 23) | GENMASK_ULL(13, 9))); + BUILD_BUG_ON(HFGxTR_EL2_RES0 != (GENMASK_ULL(51, 51))); + BUILD_BUG_ON(HFGITR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(61, 61))); + BUILD_BUG_ON(HDFGRTR_EL2_RES0 != (GENMASK_ULL(49, 49) | GENMASK_ULL(42, 42) | GENMASK_ULL(39, 38) | GENMASK_ULL(21, 20) | GENMASK_ULL(8, 8))); + BUILD_BUG_ON(HDFGWTR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(59, 58) | GENMASK_ULL(51, 51) | GENMASK_ULL(47, 47) | GENMASK_ULL(43, 43) | GENMASK_ULL(40, 38) | GENMASK_ULL(34, 34) | GENMASK_ULL(30, 30) | GENMASK_ULL(22, 22) | GENMASK_ULL(9, 9) | GENMASK_ULL(6, 6))); + BUILD_BUG_ON(HAFGRTR_EL2_RES0 != (GENMASK_ULL(63, 50) | GENMASK_ULL(16, 5))); + BUILD_BUG_ON(HCRX_EL2_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(13, 12))); + BUILD_BUG_ON(DACR32_EL2_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(PMSCR_EL2_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2))); + BUILD_BUG_ON(TCR2_EL1x_RES0 != (GENMASK_ULL(63, 16) | GENMASK_ULL(13, 12) | GENMASK_ULL(9, 6))); + BUILD_BUG_ON(TCR2_EL2_RES0 != (GENMASK_ULL(63, 16))); + BUILD_BUG_ON(LORSA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 1))); + BUILD_BUG_ON(LOREA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 0))); + BUILD_BUG_ON(LORN_EL1_RES0 != (GENMASK_ULL(63, 8))); + BUILD_BUG_ON(LORC_EL1_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(1, 1))); + BUILD_BUG_ON(LORID_EL1_RES0 != (GENMASK_ULL(63, 24) | GENMASK_ULL(15, 8))); + BUILD_BUG_ON(ISR_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(5, 0))); + BUILD_BUG_ON(ICC_NMIAR1_EL1_RES0 != (GENMASK_ULL(63, 24))); + BUILD_BUG_ON(TRBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 7))); + BUILD_BUG_ON(TRBBASER_EL1_RES0 != (GENMASK_ULL(11, 0))); + BUILD_BUG_ON(TRBSR_EL1_RES0 != (GENMASK_ULL(63, 56) | GENMASK_ULL(25, 24) | GENMASK_ULL(19, 19) | GENMASK_ULL(16, 16))); + BUILD_BUG_ON(TRBMAR_EL1_RES0 != (GENMASK_ULL(63, 12))); + BUILD_BUG_ON(TRBTRG_EL1_RES0 != (GENMASK_ULL(63, 32))); + BUILD_BUG_ON(TRBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6))); + +#endif +} diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 431fd429932d..4697ba41b3a9 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -427,12 +427,14 @@ static const complex_condition_check ccc[] = { * [19:14] bit number in the FGT register (6 bits) * [20] trap polarity (1 bit) * [25:21] FG filter (5 bits) - * [62:26] Unused (37 bits) + * [35:26] Main SysReg table index (10 bits) + * [62:36] Unused (27 bits) * [63] RES0 - Must be zero, as lost on insertion in the xarray */ #define TC_CGT_BITS 10 #define TC_FGT_BITS 4 #define TC_FGF_BITS 5 +#define TC_SRI_BITS 10 union trap_config { u64 val; @@ -442,7 +444,8 @@ union trap_config { unsigned long bit:6; /* Bit number */ unsigned long pol:1; /* Polarity */ unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */ - unsigned long unused:37; /* Unused, should be zero */ + unsigned long sri:TC_SRI_BITS; /* SysReg Index */ + unsigned long unused:27; /* Unused, should be zero */ unsigned long mbz:1; /* Must Be Zero */ }; }; @@ -1006,18 +1009,6 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { static DEFINE_XARRAY(sr_forward_xa); -enum fgt_group_id { - __NO_FGT_GROUP__, - HFGxTR_GROUP, - HDFGRTR_GROUP, - HDFGWTR_GROUP, - HFGITR_GROUP, - HAFGRTR_GROUP, - - /* Must be last */ - __NR_FGT_GROUP_IDS__ -}; - enum fg_filter_id { __NO_FGF__, HCRX_FGTnXS, @@ -1757,6 +1748,28 @@ static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc, err); } +static u32 encoding_next(u32 encoding) +{ + u8 op0, op1, crn, crm, op2; + + op0 = sys_reg_Op0(encoding); + op1 = sys_reg_Op1(encoding); + crn = sys_reg_CRn(encoding); + crm = sys_reg_CRm(encoding); + op2 = sys_reg_Op2(encoding); + + if (op2 < Op2_mask) + return sys_reg(op0, op1, crn, crm, op2 + 1); + if (crm < CRm_mask) + return sys_reg(op0, op1, crn, crm + 1, 0); + if (crn < CRn_mask) + return sys_reg(op0, op1, crn + 1, 0, 0); + if (op1 < Op1_mask) + return sys_reg(op0, op1 + 1, 0, 0, 0); + + return sys_reg(op0 + 1, 0, 0, 0, 0); +} + int __init populate_nv_trap_config(void) { int ret = 0; @@ -1775,23 +1788,18 @@ int __init populate_nv_trap_config(void) ret = -EINVAL; } - if (cgt->encoding != cgt->end) { - prev = xa_store_range(&sr_forward_xa, - cgt->encoding, cgt->end, - xa_mk_value(cgt->tc.val), - GFP_KERNEL); - } else { - prev = xa_store(&sr_forward_xa, cgt->encoding, + for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) { + prev = xa_store(&sr_forward_xa, enc, xa_mk_value(cgt->tc.val), GFP_KERNEL); if (prev && !xa_is_err(prev)) { ret = -EINVAL; print_nv_trap_error(cgt, "Duplicate CGT", ret); } - } - if (xa_is_err(prev)) { - ret = xa_err(prev); - print_nv_trap_error(cgt, "Failed CGT insertion", ret); + if (xa_is_err(prev)) { + ret = xa_err(prev); + print_nv_trap_error(cgt, "Failed CGT insertion", ret); + } } } @@ -1804,6 +1812,7 @@ int __init populate_nv_trap_config(void) for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) { const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i]; union trap_config tc; + void *prev; if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) { ret = -EINVAL; @@ -1818,8 +1827,13 @@ int __init populate_nv_trap_config(void) } tc.val |= fgt->tc.val; - xa_store(&sr_forward_xa, fgt->encoding, - xa_mk_value(tc.val), GFP_KERNEL); + prev = xa_store(&sr_forward_xa, fgt->encoding, + xa_mk_value(tc.val), GFP_KERNEL); + + if (xa_is_err(prev)) { + ret = xa_err(prev); + print_nv_trap_error(fgt, "Failed FGT insertion", ret); + } } kvm_info("nv: %ld fine grained trap handlers\n", @@ -1845,6 +1859,38 @@ check_mcb: return ret; } +int __init populate_sysreg_config(const struct sys_reg_desc *sr, + unsigned int idx) +{ + union trap_config tc; + u32 encoding; + void *ret; + + /* + * 0 is a valid value for the index, but not for the storage. + * We'll store (idx+1), so check against an offset'd limit. + */ + if (idx >= (BIT(TC_SRI_BITS) - 1)) { + kvm_err("sysreg %s (%d) out of range\n", sr->name, idx); + return -EINVAL; + } + + encoding = sys_reg(sr->Op0, sr->Op1, sr->CRn, sr->CRm, sr->Op2); + tc = get_trap_config(encoding); + + if (tc.sri) { + kvm_err("sysreg %s (%d) duplicate entry (%d)\n", + sr->name, idx - 1, tc.sri); + return -EINVAL; + } + + tc.sri = idx + 1; + ret = xa_store(&sr_forward_xa, encoding, + xa_mk_value(tc.val), GFP_KERNEL); + + return xa_err(ret); +} + static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu, const struct trap_bits *tb) { @@ -1892,20 +1938,64 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu, return __compute_trap_behaviour(vcpu, tc.cgt, b); } -static bool check_fgt_bit(u64 val, const union trap_config tc) +static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr) { - return ((val >> tc.bit) & 1) == tc.pol; + struct kvm_sysreg_masks *masks; + + /* Only handle the VNCR-backed regs for now */ + if (sr < __VNCR_START__) + return 0; + + masks = kvm->arch.sysreg_masks; + + return masks->mask[sr - __VNCR_START__].res0; } -#define sanitised_sys_reg(vcpu, reg) \ - ({ \ - u64 __val; \ - __val = __vcpu_sys_reg(vcpu, reg); \ - __val &= ~__ ## reg ## _RES0; \ - (__val); \ - }) +static bool check_fgt_bit(struct kvm *kvm, bool is_read, + u64 val, const union trap_config tc) +{ + enum vcpu_sysreg sr; + + if (tc.pol) + return (val & BIT(tc.bit)); + + /* + * FGTs with negative polarities are an absolute nightmare, as + * we need to evaluate the bit in the light of the feature + * that defines it. WTF were they thinking? + * + * So let's check if the bit has been earmarked as RES0, as + * this indicates an unimplemented feature. + */ + if (val & BIT(tc.bit)) + return false; + + switch ((enum fgt_group_id)tc.fgt) { + case HFGxTR_GROUP: + sr = is_read ? HFGRTR_EL2 : HFGWTR_EL2; + break; + + case HDFGRTR_GROUP: + sr = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2; + break; + + case HAFGRTR_GROUP: + sr = HAFGRTR_EL2; + break; + + case HFGITR_GROUP: + sr = HFGITR_EL2; + break; + + default: + WARN_ONCE(1, "Unhandled FGT group"); + return false; + } + + return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit)); +} -bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) +bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index) { union trap_config tc; enum trap_behaviour b; @@ -1913,9 +2003,6 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) u32 sysreg; u64 esr, val; - if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) - return false; - esr = kvm_vcpu_get_esr(vcpu); sysreg = esr_sys64_to_sysreg(esr); is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; @@ -1926,13 +2013,27 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) * A value of 0 for the whole entry means that we know nothing * for this sysreg, and that it cannot be re-injected into the * nested hypervisor. In this situation, let's cut it short. - * - * Note that ultimately, we could also make use of the xarray - * to store the index of the sysreg in the local descriptor - * array, avoiding another search... Hint, hint... */ if (!tc.val) - return false; + goto local; + + /* + * If a sysreg can be trapped using a FGT, first check whether we + * trap for the purpose of forbidding the feature. In that case, + * inject an UNDEF. + */ + if (tc.fgt != __NO_FGT_GROUP__ && + (vcpu->kvm->arch.fgu[tc.fgt] & BIT(tc.bit))) { + kvm_inject_undefined(vcpu); + return true; + } + + /* + * If we're not nesting, immediately return to the caller, with the + * sysreg index, should we have it. + */ + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + goto local; switch ((enum fgt_group_id)tc.fgt) { case __NO_FGT_GROUP__: @@ -1940,25 +2041,24 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) case HFGxTR_GROUP: if (is_read) - val = sanitised_sys_reg(vcpu, HFGRTR_EL2); + val = __vcpu_sys_reg(vcpu, HFGRTR_EL2); else - val = sanitised_sys_reg(vcpu, HFGWTR_EL2); + val = __vcpu_sys_reg(vcpu, HFGWTR_EL2); break; case HDFGRTR_GROUP: - case HDFGWTR_GROUP: if (is_read) - val = sanitised_sys_reg(vcpu, HDFGRTR_EL2); + val = __vcpu_sys_reg(vcpu, HDFGRTR_EL2); else - val = sanitised_sys_reg(vcpu, HDFGWTR_EL2); + val = __vcpu_sys_reg(vcpu, HDFGWTR_EL2); break; case HAFGRTR_GROUP: - val = sanitised_sys_reg(vcpu, HAFGRTR_EL2); + val = __vcpu_sys_reg(vcpu, HAFGRTR_EL2); break; case HFGITR_GROUP: - val = sanitised_sys_reg(vcpu, HFGITR_EL2); + val = __vcpu_sys_reg(vcpu, HFGITR_EL2); switch (tc.fgf) { u64 tmp; @@ -1966,7 +2066,7 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) break; case HCRX_FGTnXS: - tmp = sanitised_sys_reg(vcpu, HCRX_EL2); + tmp = __vcpu_sys_reg(vcpu, HCRX_EL2); if (tmp & HCRX_EL2_FGTnXS) tc.fgt = __NO_FGT_GROUP__; } @@ -1975,10 +2075,11 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) case __NR_FGT_GROUP_IDS__: /* Something is really wrong, bail out */ WARN_ONCE(1, "__NR_FGT_GROUP_IDS__"); - return false; + goto local; } - if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc)) + if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu->kvm, is_read, + val, tc)) goto inject; b = compute_trap_behaviour(vcpu, tc); @@ -1987,6 +2088,26 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) ((b & BEHAVE_FORWARD_WRITE) && !is_read)) goto inject; +local: + if (!tc.sri) { + struct sys_reg_params params; + + params = esr_sys64_to_params(esr); + + /* + * Check for the IMPDEF range, as per DDI0487 J.a, + * D18.3.2 Reserved encodings for IMPLEMENTATION + * DEFINED registers. + */ + if (!(params.Op0 == 3 && (params.CRn & 0b1011) == 0b1011)) + print_sys_reg_msg(¶ms, + "Unsupported guest access at: %lx\n", + *vcpu_pc(vcpu)); + kvm_inject_undefined(vcpu); + return true; + } + + *sr_index = tc.sri - 1; return false; inject: diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index a038320cdb08..e3fcf8c4d5b4 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) clr |= ~hfg & __ ## reg ## _nMASK; \ } while(0) -#define update_fgt_traps_cs(vcpu, reg, clr, set) \ +#define reg_to_fgt_group_id(reg) \ + ({ \ + enum fgt_group_id id; \ + switch(reg) { \ + case HFGRTR_EL2: \ + case HFGWTR_EL2: \ + id = HFGxTR_GROUP; \ + break; \ + case HFGITR_EL2: \ + id = HFGITR_GROUP; \ + break; \ + case HDFGRTR_EL2: \ + case HDFGWTR_EL2: \ + id = HDFGRTR_GROUP; \ + break; \ + case HAFGRTR_EL2: \ + id = HAFGRTR_GROUP; \ + break; \ + default: \ + BUILD_BUG_ON(1); \ + } \ + \ + id; \ + }) + +#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \ + do { \ + u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \ + set |= hfg & __ ## reg ## _MASK; \ + clr |= hfg & __ ## reg ## _nMASK; \ + } while(0) + +#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \ do { \ - struct kvm_cpu_context *hctxt = \ - &this_cpu_ptr(&kvm_host_data)->host_ctxt; \ u64 c = 0, s = 0; \ \ ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ - compute_clr_set(vcpu, reg, c, s); \ + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \ + compute_clr_set(vcpu, reg, c, s); \ + \ + compute_undef_clr_set(vcpu, kvm, reg, c, s); \ + \ s |= set; \ c |= clr; \ if (c || s) { \ @@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) } \ } while(0) -#define update_fgt_traps(vcpu, reg) \ - update_fgt_traps_cs(vcpu, reg, 0, 0) +#define update_fgt_traps(hctxt, vcpu, kvm, reg) \ + update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0) /* * Validate the fine grain trap masks. @@ -122,8 +156,7 @@ static inline bool cpu_has_amu(void) static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; - u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; - u64 r_val, w_val; + struct kvm *kvm = kern_hyp_va(vcpu->kvm); CHECK_FGT_MASKS(HFGRTR_EL2); CHECK_FGT_MASKS(HFGWTR_EL2); @@ -136,72 +169,45 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) if (!cpus_have_final_cap(ARM64_HAS_FGT)) return; - ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2); - ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2); - - if (cpus_have_final_cap(ARM64_SME)) { - tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; - - r_clr |= tmp; - w_clr |= tmp; - } - - /* - * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD. - */ - if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) - w_set |= HFGxTR_EL2_TCR_EL1_MASK; - - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { - compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set); - compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set); - } - - /* The default to trap everything not handled or supported in KVM. */ - tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 | - HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1; - - r_val = __HFGRTR_EL2_nMASK & ~tmp; - r_val |= r_set; - r_val &= ~r_clr; - - w_val = __HFGWTR_EL2_nMASK & ~tmp; - w_val |= w_set; - w_val &= ~w_clr; - - write_sysreg_s(r_val, SYS_HFGRTR_EL2); - write_sysreg_s(w_val, SYS_HFGWTR_EL2); - - if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) - return; - - update_fgt_traps(vcpu, HFGITR_EL2); - update_fgt_traps(vcpu, HDFGRTR_EL2); - update_fgt_traps(vcpu, HDFGWTR_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2); + update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0, + cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ? + HFGxTR_EL2_TCR_EL1_MASK : 0); + update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2); if (cpu_has_amu()) - update_fgt_traps(vcpu, HAFGRTR_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2); } +#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \ + do { \ + if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \ + kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \ + write_sysreg_s(ctxt_sys_reg(hctxt, reg), \ + SYS_ ## reg); \ + } while(0) + static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + struct kvm *kvm = kern_hyp_va(vcpu->kvm); if (!cpus_have_final_cap(ARM64_HAS_FGT)) return; - write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2); - write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); - - if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) - return; - - write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2); - write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2); - write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); + __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2); + if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) + write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); + else + __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2); + __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2); + __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2); + __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2); if (cpu_has_amu()) - write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2); + __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2); } static inline void __activate_traps_common(struct kvm_vcpu *vcpu) @@ -230,7 +236,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); if (cpus_have_final_cap(ARM64_HAS_HCX)) { - u64 hcrx = HCRX_GUEST_FLAGS; + u64 hcrx = vcpu->arch.hcrx_el2; if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { u64 clr = 0, set = 0; diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index bb6b571ec627..4be6a7fa0070 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -27,16 +27,34 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); } -static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) +static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) { struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; if (!vcpu) vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); + return vcpu; +} + +static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_mte(kern_hyp_va(vcpu->kvm)); } +static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!cpus_have_final_cap(ARM64_HAS_S1PIE)) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP); +} + static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR); @@ -55,7 +73,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR); ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR); ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL); - if (cpus_have_final_cap(ARM64_HAS_S1PIE)) { + if (ctxt_has_s1pie(ctxt)) { ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR); ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0); } @@ -131,7 +149,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR); write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR); write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL); - if (cpus_have_final_cap(ARM64_HAS_S1PIE)) { + if (ctxt_has_s1pie(ctxt)) { write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR); write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0); } diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index d55e809e26cb..ced30c90521a 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -163,15 +163,280 @@ static u64 limit_nv_id_reg(u32 id, u64 val) return val; } + +u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr) +{ + u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr); + struct kvm_sysreg_masks *masks; + + masks = vcpu->kvm->arch.sysreg_masks; + + if (masks) { + sr -= __VNCR_START__; + + v &= ~masks->mask[sr].res0; + v |= masks->mask[sr].res1; + } + + return v; +} + +static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1) +{ + int i = sr - __VNCR_START__; + + kvm->arch.sysreg_masks->mask[i].res0 = res0; + kvm->arch.sysreg_masks->mask[i].res1 = res1; +} + int kvm_init_nv_sysregs(struct kvm *kvm) { + u64 res0, res1; + int ret = 0; + mutex_lock(&kvm->arch.config_lock); + if (kvm->arch.sysreg_masks) + goto out; + + kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)), + GFP_KERNEL); + if (!kvm->arch.sysreg_masks) { + ret = -ENOMEM; + goto out; + } + for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++) kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i), kvm->arch.id_regs[i]); + /* VTTBR_EL2 */ + res0 = res1 = 0; + if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16)) + res0 |= GENMASK(63, 56); + if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP)) + res0 |= VTTBR_CNP_BIT; + set_sysreg_masks(kvm, VTTBR_EL2, res0, res1); + + /* VTCR_EL2 */ + res0 = GENMASK(63, 32) | GENMASK(30, 20); + res1 = BIT(31); + set_sysreg_masks(kvm, VTCR_EL2, res0, res1); + + /* VMPIDR_EL2 */ + res0 = GENMASK(63, 40) | GENMASK(30, 24); + res1 = BIT(31); + set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1); + + /* HCR_EL2 */ + res0 = BIT(48); + res1 = HCR_RW; + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP)) + res0 |= GENMASK(63, 59); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2)) + res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA); + if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS)) + res0 |= (HCR_TTLBIS | HCR_TTLBOS); + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) && + !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2)) + res0 |= HCR_ENSCXT; + if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP)) + res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4); + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1)) + res0 |= HCR_AMVOFFEN; + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1)) + res0 |= HCR_FIEN; + if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP)) + res0 |= HCR_FWB; + if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2)) + res0 |= HCR_NV2; + if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP)) + res0 |= (HCR_AT | HCR_NV1 | HCR_NV); + if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && + __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC))) + res0 |= (HCR_API | HCR_APK); + if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP)) + res0 |= BIT(39); + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) + res0 |= (HCR_TEA | HCR_TERR); + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP)) + res0 |= HCR_TLOR; + if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP)) + res1 |= HCR_E2H; + set_sysreg_masks(kvm, HCR_EL2, res0, res1); + + /* HCRX_EL2 */ + res0 = HCRX_EL2_RES0; + res1 = HCRX_EL2_RES1; + if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP)) + res0 |= HCRX_EL2_PACMEn; + if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP)) + res0 |= HCRX_EL2_EnFPM; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) + res0 |= HCRX_EL2_GCSEn; + if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP)) + res0 |= HCRX_EL2_EnIDCP128; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC)) + res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP)) + res0 |= HCRX_EL2_TMEA; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP)) + res0 |= HCRX_EL2_D128En; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP)) + res0 |= HCRX_EL2_PTTWI; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP)) + res0 |= HCRX_EL2_SCTLR2En; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) + res0 |= HCRX_EL2_TCR2En; + if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP)) + res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2); + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP)) + res0 |= HCRX_EL2_CMOW; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP)) + res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) || + !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS)) + res0 |= HCRX_EL2_SMPME; + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) + res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS); + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V)) + res0 |= HCRX_EL2_EnASR; + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64)) + res0 |= HCRX_EL2_EnALS; + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA)) + res0 |= HCRX_EL2_EnAS0; + set_sysreg_masks(kvm, HCRX_EL2, res0, res1); + + /* HFG[RW]TR_EL2 */ + res0 = res1 = 0; + if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && + __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC))) + res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey | + HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey | + HFGxTR_EL2_APIBKey); + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP)) + res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 | + HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 | + HFGxTR_EL2_LORSA_EL1); + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) && + !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2)) + res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0); + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP)) + res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1; + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) + res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 | + HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 | + HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 | + HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 | + HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1); + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA)) + res0 |= HFGxTR_EL2_nACCDATA_EL1; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) + res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP)) + res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP)) + res0 |= HFGxTR_EL2_nRCWMASK_EL1; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP)) + res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1); + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP)) + res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1); + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP)) + res0 |= HFGxTR_EL2_nS2POR_EL1; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP)) + res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1); + set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1); + set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1); + + /* HDFG[RW]TR_EL2 */ + res0 = res1 = 0; + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP)) + res0 |= HDFGRTR_EL2_OSDLR_EL1; + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) + res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 | + HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 | + HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN | + HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 | + HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 | + HDFGRTR_EL2_PMCEIDn_EL0); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) + res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 | + HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 | + HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 | + HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 | + HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 | + HDFGRTR_EL2_PMBIDR_EL1); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) + res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS | + HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM | + HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID | + HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR | + HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR | + HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR | + HDFGRTR_EL2_TRCVICTLR); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP)) + res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 | + HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 | + HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 | + HDFGRTR_EL2_TRBTRG_EL1); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) + res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL | + HDFGRTR_EL2_nBRBDATA); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2)) + res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1; + set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1); + + /* Reuse the bits from the read-side and add the write-specific stuff */ + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) + res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) + res0 |= HDFGWTR_EL2_TRCOSLAR; + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP)) + res0 |= HDFGWTR_EL2_TRFCR_EL1; + set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1); + + /* HFGITR_EL2 */ + res0 = HFGITR_EL2_RES0; + res1 = HFGITR_EL2_RES1; + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2)) + res0 |= HFGITR_EL2_DCCVADP; + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2)) + res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP); + if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) + res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS | + HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS | + HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS | + HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS | + HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS); + if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) + res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 | + HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 | + HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS | + HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS | + HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS | + HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS); + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP)) + res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX | + HFGITR_EL2_CPPRCTX); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) + res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) + res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 | + HFGITR_EL2_nGCSEPP); + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX)) + res0 |= HFGITR_EL2_COSPRCTX; + if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) + res0 |= HFGITR_EL2_ATS1E1A; + set_sysreg_masks(kvm, HFGITR_EL2, res0, res1); + + /* HAFGRTR_EL2 - not a lot to see here */ + res0 = HAFGRTR_EL2_RES0; + res1 = HAFGRTR_EL2_RES1; + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1)) + res0 |= ~(res0 | res1); + set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1); +out: mutex_unlock(&kvm->arch.config_lock); - return 0; + return ret; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 3d9467ff73bc..925522470b2b 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm) { u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 | kvm_pmu_event_mask(kvm); - u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1); - if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0)) + if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP)) mask |= ARMV8_PMU_INCLUDE_EL2; - if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0)) + if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP)) mask |= ARMV8_PMU_EXCLUDE_NS_EL0 | ARMV8_PMU_EXCLUDE_NS_EL1 | ARMV8_PMU_EXCLUDE_EL3; @@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm) */ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) { + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); + return (pmc->idx == ARMV8_PMU_CYCLE_IDX || - kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc))); + kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)); } static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) @@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) return; /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */ - if (!kvm_pmu_is_3p5(vcpu)) + if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) val &= ~ARMV8_PMU_PMCR_LP; /* The reset bits don't indicate any state, and shouldn't be saved. */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 041b11825578..5d2b13953141 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -12,6 +12,7 @@ #include <linux/bitfield.h> #include <linux/bsearch.h> #include <linux/cacheinfo.h> +#include <linux/debugfs.h> #include <linux/kvm_host.h> #include <linux/mm.h> #include <linux/printk.h> @@ -31,6 +32,7 @@ #include <trace/events/kvm.h> +#include "check-res-bits.h" #include "sys_regs.h" #include "trace.h" @@ -505,10 +507,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1); u32 sr = reg_to_encoding(r); - if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) { + if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) { kvm_inject_undefined(vcpu); return false; } @@ -2197,16 +2198,6 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) * guest... */ static const struct sys_reg_desc sys_reg_descs[] = { - { SYS_DESC(SYS_DC_ISW), access_dcsw }, - { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, - { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, - { SYS_DESC(SYS_DC_CSW), access_dcsw }, - { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, - { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, - { SYS_DESC(SYS_DC_CISW), access_dcsw }, - { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, - { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, - DBG_BCR_BVR_WCR_WVR_EL1(0), DBG_BCR_BVR_WCR_WVR_EL1(1), { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, @@ -2738,6 +2729,18 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(SP_EL2, NULL, reset_unknown, 0), }; +static struct sys_reg_desc sys_insn_descs[] = { + { SYS_DESC(SYS_DC_ISW), access_dcsw }, + { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CSW), access_dcsw }, + { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CISW), access_dcsw }, + { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, +}; + static const struct sys_reg_desc *first_idreg; static bool trap_dbgdidr(struct kvm_vcpu *vcpu, @@ -2748,8 +2751,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, return ignore_write(vcpu, p); } else { u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); - u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); - u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr); + u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP); p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) | @@ -3395,12 +3397,6 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs)); } -static bool is_imp_def_sys_reg(struct sys_reg_params *params) -{ - // See ARM DDI 0487E.a, section D12.3.2 - return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011; -} - /** * emulate_sys_reg - Emulate a guest access to an AArch64 system register * @vcpu: The VCPU pointer @@ -3409,26 +3405,106 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params) * Return: true if the system register access was successful, false otherwise. */ static bool emulate_sys_reg(struct kvm_vcpu *vcpu, - struct sys_reg_params *params) + struct sys_reg_params *params) { const struct sys_reg_desc *r; r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); - if (likely(r)) { perform_access(vcpu, params, r); return true; } - if (is_imp_def_sys_reg(params)) { - kvm_inject_undefined(vcpu); + print_sys_reg_msg(params, + "Unsupported guest sys_reg access at: %lx [%08lx]\n", + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); + kvm_inject_undefined(vcpu); + + return false; +} + +static void *idregs_debug_start(struct seq_file *s, loff_t *pos) +{ + struct kvm *kvm = s->private; + u8 *iter; + + mutex_lock(&kvm->arch.config_lock); + + iter = &kvm->arch.idreg_debugfs_iter; + if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) && + *iter == (u8)~0) { + *iter = *pos; + if (*iter >= KVM_ARM_ID_REG_NUM) + iter = NULL; } else { - print_sys_reg_msg(params, - "Unsupported guest sys_reg access at: %lx [%08lx]\n", - *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); - kvm_inject_undefined(vcpu); + iter = ERR_PTR(-EBUSY); } - return false; + + mutex_unlock(&kvm->arch.config_lock); + + return iter; +} + +static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct kvm *kvm = s->private; + + (*pos)++; + + if ((kvm->arch.idreg_debugfs_iter + 1) < KVM_ARM_ID_REG_NUM) { + kvm->arch.idreg_debugfs_iter++; + + return &kvm->arch.idreg_debugfs_iter; + } + + return NULL; +} + +static void idregs_debug_stop(struct seq_file *s, void *v) +{ + struct kvm *kvm = s->private; + + if (IS_ERR(v)) + return; + + mutex_lock(&kvm->arch.config_lock); + + kvm->arch.idreg_debugfs_iter = ~0; + + mutex_unlock(&kvm->arch.config_lock); +} + +static int idregs_debug_show(struct seq_file *s, void *v) +{ + struct kvm *kvm = s->private; + const struct sys_reg_desc *desc; + + desc = first_idreg + kvm->arch.idreg_debugfs_iter; + + if (!desc->name) + return 0; + + seq_printf(s, "%20s:\t%016llx\n", + desc->name, IDREG(kvm, IDX_IDREG(kvm->arch.idreg_debugfs_iter))); + + return 0; +} + +static const struct seq_operations idregs_debug_sops = { + .start = idregs_debug_start, + .next = idregs_debug_next, + .stop = idregs_debug_stop, + .show = idregs_debug_show, +}; + +DEFINE_SEQ_ATTRIBUTE(idregs_debug); + +void kvm_sys_regs_create_debugfs(struct kvm *kvm) +{ + kvm->arch.idreg_debugfs_iter = ~0; + + debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm, + &idregs_debug_fops); } static void kvm_reset_id_regs(struct kvm_vcpu *vcpu) @@ -3478,28 +3554,39 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) } /** - * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access + * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction + * trap on a guest execution * @vcpu: The VCPU pointer */ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) { + const struct sys_reg_desc *desc = NULL; struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_esr(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu); + int sr_idx; trace_kvm_handle_sys_reg(esr); - if (__check_nv_sr_forward(vcpu)) + if (triage_sysreg_trap(vcpu, &sr_idx)) return 1; params = esr_sys64_to_params(esr); params.regval = vcpu_get_reg(vcpu, Rt); - if (!emulate_sys_reg(vcpu, ¶ms)) - return 1; + /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */ + if (params.Op0 == 2 || params.Op0 == 3) + desc = &sys_reg_descs[sr_idx]; + else + desc = &sys_insn_descs[sr_idx]; - if (!params.is_write) + perform_access(vcpu, ¶ms, desc); + + /* Read from system register? */ + if (!params.is_write && + (params.Op0 == 2 || params.Op0 == 3)) vcpu_set_reg(vcpu, Rt, params.regval); + return 1; } @@ -3941,11 +4028,86 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range * return 0; } +void kvm_init_sysreg(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + + mutex_lock(&kvm->arch.config_lock); + + /* + * In the absence of FGT, we cannot independently trap TLBI + * Range instructions. This isn't great, but trapping all + * TLBIs would be far worse. Live with it... + */ + if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) + vcpu->arch.hcr_el2 |= HCR_TTLBOS; + + if (cpus_have_final_cap(ARM64_HAS_HCX)) { + vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS; + + if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP)) + vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2); + } + + if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) + goto out; + + kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 | + HFGxTR_EL2_nMAIR2_EL1 | + HFGxTR_EL2_nS2POR_EL1 | + HFGxTR_EL2_nPOR_EL1 | + HFGxTR_EL2_nPOR_EL0 | + HFGxTR_EL2_nACCDATA_EL1 | + HFGxTR_EL2_nSMPRI_EL1_MASK | + HFGxTR_EL2_nTPIDR2_EL0_MASK); + + if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) + kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS| + HFGITR_EL2_TLBIRVALE1OS | + HFGITR_EL2_TLBIRVAAE1OS | + HFGITR_EL2_TLBIRVAE1OS | + HFGITR_EL2_TLBIVAALE1OS | + HFGITR_EL2_TLBIVALE1OS | + HFGITR_EL2_TLBIVAAE1OS | + HFGITR_EL2_TLBIASIDE1OS | + HFGITR_EL2_TLBIVAE1OS | + HFGITR_EL2_TLBIVMALLE1OS); + + if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) + kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 | + HFGITR_EL2_TLBIRVALE1 | + HFGITR_EL2_TLBIRVAAE1 | + HFGITR_EL2_TLBIRVAE1 | + HFGITR_EL2_TLBIRVAALE1IS| + HFGITR_EL2_TLBIRVALE1IS | + HFGITR_EL2_TLBIRVAAE1IS | + HFGITR_EL2_TLBIRVAE1IS | + HFGITR_EL2_TLBIRVAALE1OS| + HFGITR_EL2_TLBIRVALE1OS | + HFGITR_EL2_TLBIRVAAE1OS | + HFGITR_EL2_TLBIRVAE1OS); + + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP)) + kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 | + HFGxTR_EL2_nPIR_EL1); + + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) + kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | + HAFGRTR_EL2_RES1); + + set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); +out: + mutex_unlock(&kvm->arch.config_lock); +} + int __init kvm_sys_reg_table_init(void) { struct sys_reg_params params; bool valid = true; unsigned int i; + int ret = 0; + + check_res_bits(); /* Make sure tables are unique and in order. */ valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false); @@ -3954,6 +4116,7 @@ int __init kvm_sys_reg_table_init(void) valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true); valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true); valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false); + valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false); if (!valid) return -EINVAL; @@ -3968,8 +4131,13 @@ int __init kvm_sys_reg_table_init(void) if (!first_idreg) return -EINVAL; - if (kvm_get_mode() == KVM_MODE_NV) - return populate_nv_trap_config(); + ret = populate_nv_trap_config(); - return 0; + for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++) + ret = populate_sysreg_config(sys_reg_descs + i, i); + + for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++) + ret = populate_sysreg_config(sys_insn_descs + i, i); + + return ret; } diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index c65c129b3500..997eea21ba2a 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -233,6 +233,8 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, const struct sys_reg_desc table[], unsigned int num); +bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index); + #define AA32(_x) .aarch32_map = AA32_##_x #define Op0(_x) .Op0 = _x #define Op1(_x) .Op1 = _x |