summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2025-02-20 16:49:03 +0300
committerOliver Upton <oliver.upton@linux.dev>2025-02-24 22:30:17 +0300
commitf83c41fb3dddbf47881249335a9718d2cdce0bd0 (patch)
tree5b1a7d2a7f8eb268e079ccfad5e35e9df4d73b7c
parent94f296dcd6d937371dd83df048b9a2d723d357c9 (diff)
downloadlinux-f83c41fb3dddbf47881249335a9718d2cdce0bd0.tar.xz
KVM: arm64: Allow userspace to limit NV support to nVHE
NV is hard. No kidding. In order to make things simpler, we have established that NV would support two mutually exclusive configurations: - VHE-only, and supporting recursive virtualisation - nVHE-only, and not supporting recursive virtualisation For that purpose, introduce a new vcpu feature flag that denotes the second configuration. We use this flag to limit the idregs further. Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20250220134907.554085-11-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h1
-rw-r--r--arch/arm64/kvm/nested.c28
2 files changed, 27 insertions, 2 deletions
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 568bf858f319..3bcab2a106c9 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -105,6 +105,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */
+#define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */
struct kvm_vcpu_init {
__u32 target;
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 409e5e67ae1e..933dc3acac5f 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -51,6 +51,10 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
struct kvm_s2_mmu *tmp;
int num_mmus, ret = 0;
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
+ !cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+ return -EINVAL;
+
/*
* Let's treat memory allocation failures as benign: If we fail to
* allocate anything, return an error and keep the allocated array
@@ -894,6 +898,9 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
ID_AA64MMFR1_EL1_HPDS |
ID_AA64MMFR1_EL1_VH |
ID_AA64MMFR1_EL1_VMIDBits);
+ /* FEAT_E2H0 implies no VHE */
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
+ val &= ~ID_AA64MMFR1_EL1_VH;
break;
case SYS_ID_AA64MMFR2_EL1:
@@ -909,8 +916,25 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
break;
case SYS_ID_AA64MMFR4_EL1:
- val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
- val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
+ /*
+ * You get EITHER
+ *
+ * - FEAT_VHE without FEAT_E2H0
+ * - FEAT_NV limited to FEAT_NV2
+ * - HCR_EL2.NV1 being RES0
+ *
+ * OR
+ *
+ * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
+ *
+ * Life is too short for anything else.
+ */
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
+ val = 0;
+ } else {
+ val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
+ }
break;
case SYS_ID_AA64DFR0_EL1: