diff options
| author | Marc Zyngier <maz@kernel.org> | 2026-04-08 14:26:00 +0300 |
|---|---|---|
| committer | Marc Zyngier <maz@kernel.org> | 2026-04-08 14:26:00 +0300 |
| commit | d77f4792db8be87bd1ed88c952250c717c1b629c (patch) | |
| tree | 90225d4bf913e13c79b40992fdb2bef5a87a517e /tools/testing | |
| parent | 83a3980750e3cc25cb7ded90f11c157eb3f9f428 (diff) | |
| parent | 7e629348df81b339dbc233313f0f36ff5a25fc3d (diff) | |
| download | linux-d77f4792db8be87bd1ed88c952250c717c1b629c.tar.xz | |
Merge branch kvm-arm64/vgic-fixes-7.1 into kvmarm-master/next
* kvm-arm64/vgic-fixes-7.1:
: .
: FIrst pass at fixing a number of vgic-v5 bugs that were found
: after the merge of the initial series.
: .
KVM: arm64: Advertise ID_AA64PFR2_EL1.GCIE
KVM: arm64: vgic-v5: Fold PPI state for all exposed PPIs
KVM: arm64: set_id_regs: Allow GICv3 support to be set at runtime
KVM: arm64: Don't advertises GICv3 in ID_PFR1_EL1 if AArch32 isn't supported
KVM: arm64: Correctly plumb ID_AA64PFR2_EL1 into pkvm idreg handling
KVM: arm64: Move GICv5 timer PPI validation into timer_irqs_are_valid()
KVM: arm64: Remove evaluation of timer state in kvm_cpu_has_pending_timer()
KVM: arm64: Kill arch_timer_context::direct field
KVM: arm64: vgic-v5: Correctly set dist->ready once initialised
KVM: arm64: vgic-v5: Make the effective priority mask a strict limit
KVM: arm64: vgic-v5: Cast vgic_apr to u32 to avoid undefined behaviours
KVM: arm64: vgic-v5: Transfer edge pending state to ICH_PPI_PENDRx_EL2
KVM: arm64: vgic-v5: Hold config_lock while finalizing GICv5 PPIs
KVM: arm64: Account for RESx bits in __compute_fgt()
KVM: arm64: Fix writeable mask for ID_AA64PFR2_EL1
arm64: Fix field references for ICH_PPI_DVIR[01]_EL2
KVM: arm64: Don't skip per-vcpu NV initialisation
KVM: arm64: vgic: Don't reset cpuif/redist addresses at finalize time
Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'tools/testing')
| -rw-r--r-- | tools/testing/selftests/kvm/arm64/set_id_regs.c | 52 |
1 files changed, 45 insertions, 7 deletions
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 73de5be58bab..7899d557c70b 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -37,6 +37,9 @@ struct reg_ftr_bits { * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value. */ int64_t safe_val; + + /* Allowed to be changed by the host after run */ + bool mutable; }; struct test_feature_reg { @@ -44,7 +47,7 @@ struct test_feature_reg { const struct reg_ftr_bits *ftr_bits; }; -#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL) \ +#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL, MUT) \ { \ .name = #NAME, \ .sign = SIGNED, \ @@ -52,15 +55,20 @@ struct test_feature_reg { .shift = SHIFT, \ .mask = MASK, \ .safe_val = SAFE_VAL, \ + .mutable = MUT, \ } #define REG_FTR_BITS(type, reg, field, safe_val) \ __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \ - reg##_##field##_MASK, safe_val) + reg##_##field##_MASK, safe_val, false) + +#define REG_FTR_BITS_MUTABLE(type, reg, field, safe_val) \ + __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \ + reg##_##field##_MASK, safe_val, true) #define S_REG_FTR_BITS(type, reg, field, safe_val) \ __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \ - reg##_##field##_MASK, safe_val) + reg##_##field##_MASK, safe_val, false) #define REG_FTR_END \ { \ @@ -134,7 +142,8 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = { REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0), - REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0), + /* GICv3 support will be forced at run time if available */ + REG_FTR_BITS_MUTABLE(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1), @@ -634,12 +643,38 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n"); } +static uint64_t reset_mutable_bits(uint32_t id, uint64_t val) +{ + struct test_feature_reg *reg = NULL; + + for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { + if (test_regs[i].reg == id) { + reg = &test_regs[i]; + break; + } + } + + if (!reg) + return val; + + for (const struct reg_ftr_bits *bits = reg->ftr_bits; bits->type != FTR_END; bits++) { + if (bits->mutable) { + val &= ~bits->mask; + val |= bits->safe_val << bits->shift; + } + } + + return val; +} + static void test_guest_reg_read(struct kvm_vcpu *vcpu) { bool done = false; struct ucall uc; while (!done) { + uint64_t val; + vcpu_run(vcpu); switch (get_ucall(vcpu, &uc)) { @@ -647,9 +682,11 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu) REPORT_GUEST_ASSERT(uc); break; case UCALL_SYNC: + val = test_reg_vals[encoding_to_range_idx(uc.args[2])]; + val = reset_mutable_bits(uc.args[2], val); + /* Make sure the written values are seen by guest */ - TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])], - uc.args[3]); + TEST_ASSERT_EQ(val, reset_mutable_bits(uc.args[2], uc.args[3])); break; case UCALL_DONE: done = true; @@ -740,7 +777,8 @@ static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encodin uint64_t observed; observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding)); - TEST_ASSERT_EQ(test_reg_vals[idx], observed); + TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]), + reset_mutable_bits(encoding, observed)); } static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu) |
