diff options
Diffstat (limited to 'tools/testing/selftests/kvm/x86_64')
13 files changed, 210 insertions, 99 deletions
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c index 7c2c4d4055a8..63cc9c3f5ab6 100644 --- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c +++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c @@ -87,6 +87,7 @@ int main(int argc, char *argv[]) while (1) { rc = _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "Unexpected exit reason: %u (%s),\n", run->exit_reason, diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index 36669684eca5..b38260e29775 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -19,8 +19,6 @@ #define VCPU_ID 5 -static bool have_nested_state; - void l2_guest_code(void) { GUEST_SYNC(6); @@ -73,7 +71,6 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - struct vmx_pages *vmx_pages = NULL; vm_vaddr_t vmx_pages_gva = 0; struct kvm_regs regs1, regs2; @@ -88,8 +85,6 @@ int main(int argc, char *argv[]) .args[0] = (unsigned long)&evmcs_ver }; - struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); - /* Create VM */ vm = vm_create_default(VCPU_ID, 0, guest_code); @@ -113,7 +108,7 @@ int main(int argc, char *argv[]) vcpu_regs_get(vm, VCPU_ID, ®s1); - vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); for (stage = 1;; stage++) { diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c index 9a21e912097c..f72b3043db0e 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c @@ -52,15 +52,11 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries, TEST_ASSERT(entry->index == 0, ".index field should be zero"); - TEST_ASSERT(entry->index == 0, - ".index field should be zero"); - TEST_ASSERT(entry->flags == 0, ".flags field should be zero"); - TEST_ASSERT(entry->padding[0] == entry->padding[1] - == entry->padding[2] == 0, - ".index field should be zero"); + TEST_ASSERT(!entry->padding[0] && !entry->padding[1] && + !entry->padding[2], "padding should be zero"); /* * If needed for debug: @@ -90,7 +86,6 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm) { int nent = 20; /* should be enough */ static struct kvm_cpuid2 *cpuid; - int ret; cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2)); diff --git a/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c index 50e92996f918..6a3eec8da351 100644 --- a/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c +++ b/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c @@ -1,10 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * kvm_create_max_vcpus * * Copyright (C) 2019, Google LLC. * - * This work is licensed under the terms of the GNU GPL, version 2. - * * Test for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_VCPU_ID. */ diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c new file mode 100644 index 000000000000..00bb97d76000 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c @@ -0,0 +1,126 @@ +/* + * mmio_warning_test + * + * Copyright (C) 2019, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Test that we don't get a kernel warning when we call KVM_RUN after a + * triple fault occurs. To get the triple fault to occur we call KVM_RUN + * on a VCPU that hasn't been properly setup. + * + */ + +#define _GNU_SOURCE +#include <fcntl.h> +#include <kvm_util.h> +#include <linux/kvm.h> +#include <processor.h> +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <test_util.h> +#include <unistd.h> + +#define NTHREAD 4 +#define NPROCESS 5 + +struct thread_context { + int kvmcpu; + struct kvm_run *run; +}; + +void *thr(void *arg) +{ + struct thread_context *tc = (struct thread_context *)arg; + int res; + int kvmcpu = tc->kvmcpu; + struct kvm_run *run = tc->run; + + res = ioctl(kvmcpu, KVM_RUN, 0); + printf("ret1=%d exit_reason=%d suberror=%d\n", + res, run->exit_reason, run->internal.suberror); + + return 0; +} + +void test(void) +{ + int i, kvm, kvmvm, kvmcpu; + pthread_t th[NTHREAD]; + struct kvm_run *run; + struct thread_context tc; + + kvm = open("/dev/kvm", O_RDWR); + TEST_ASSERT(kvm != -1, "failed to open /dev/kvm"); + kvmvm = ioctl(kvm, KVM_CREATE_VM, 0); + TEST_ASSERT(kvmvm != -1, "KVM_CREATE_VM failed"); + kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0); + TEST_ASSERT(kvmcpu != -1, "KVM_CREATE_VCPU failed"); + run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, + kvmcpu, 0); + tc.kvmcpu = kvmcpu; + tc.run = run; + srand(getpid()); + for (i = 0; i < NTHREAD; i++) { + pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc); + usleep(rand() % 10000); + } + for (i = 0; i < NTHREAD; i++) + pthread_join(th[i], NULL); +} + +int get_warnings_count(void) +{ + int warnings; + FILE *f; + + f = popen("dmesg | grep \"WARNING:\" | wc -l", "r"); + fscanf(f, "%d", &warnings); + fclose(f); + + return warnings; +} + +int main(void) +{ + int warnings_before, warnings_after; + + if (!is_intel_cpu()) { + printf("Must be run on an Intel CPU, skipping test\n"); + exit(KSFT_SKIP); + } + + if (vm_is_unrestricted_guest(NULL)) { + printf("Unrestricted guest must be disabled, skipping test\n"); + exit(KSFT_SKIP); + } + + warnings_before = get_warnings_count(); + + for (int i = 0; i < NPROCESS; ++i) { + int status; + int pid = fork(); + + if (pid < 0) + exit(1); + if (pid == 0) { + test(); + exit(0); + } + while (waitpid(pid, &status, __WALL) != pid) + ; + } + + warnings_after = get_warnings_count(); + TEST_ASSERT(warnings_before == warnings_after, + "Warnings found in kernel. Run 'dmesg' to inspect them."); + + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c index eb3e7a838cb4..40050e44ec0a 100644 --- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c +++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c @@ -81,7 +81,6 @@ static void test_msr_platform_info_disabled(struct kvm_vm *vm) int main(int argc, char *argv[]) { struct kvm_vm *vm; - struct kvm_run *state; int rv; uint64_t msr_platform_info; diff --git a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c index 35640e8e95bc..9f7656184f31 100644 --- a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c +++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c @@ -1,16 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * KVM_SET_SREGS tests * * Copyright (C) 2018, Google LLC. * - * This work is licensed under the terms of the GNU GPL, version 2. - * * This is a regression test for the bug fixed by the following commit: * d3802286fa0f ("kvm: x86: Disallow illegal IA32_APIC_BASE MSR values") * * That bug allowed a user-mode program that called the KVM_SET_SREGS * ioctl to put a VCPU's local APIC into an invalid state. - * */ #define _GNU_SOURCE /* for program_invocation_short_name */ #include <fcntl.h> diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c index fb8086964d83..4daf520bada1 100644 --- a/tools/testing/selftests/kvm/x86_64/smm_test.c +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c @@ -87,7 +87,6 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - struct vmx_pages *vmx_pages = NULL; vm_vaddr_t vmx_pages_gva = 0; struct kvm_regs regs; @@ -115,7 +114,7 @@ int main(int argc, char *argv[]) vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { - vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); } else { printf("will skip SMM test with VMX enabled\n"); diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index e0a3c0204b7c..1a23617f34d9 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c @@ -1,10 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * KVM_GET/SET_* tests * * Copyright (C) 2018, Red Hat, Inc. * - * This work is licensed under the terms of the GNU GPL, version 2. - * * Tests for vCPU state save/restore, including nested guest state. */ #define _GNU_SOURCE /* for program_invocation_short_name */ @@ -22,8 +21,6 @@ #define VCPU_ID 5 -static bool have_nested_state; - void l2_guest_code(void) { GUEST_SYNC(6); @@ -122,7 +119,6 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - struct vmx_pages *vmx_pages = NULL; vm_vaddr_t vmx_pages_gva = 0; struct kvm_regs regs1, regs2; @@ -132,8 +128,6 @@ int main(int argc, char *argv[]) struct ucall uc; int stage; - struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); - /* Create VM */ vm = vm_create_default(VCPU_ID, 0, guest_code); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); @@ -142,7 +136,7 @@ int main(int argc, char *argv[]) vcpu_regs_get(vm, VCPU_ID, ®s1); if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { - vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); } else { printf("will skip nested state checks\n"); diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index c8478ce9ea77..11c2a70a7b87 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -1,10 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Test for x86 KVM_CAP_SYNC_REGS * * Copyright (C) 2018, Google LLC. * - * This work is licensed under the terms of the GNU GPL, version 2. - * * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality, * including requesting an invalid register set, updates to/from values * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled. @@ -25,9 +24,15 @@ void guest_code(void) { + /* + * use a callee-save register, otherwise the compiler + * saves it around the call to GUEST_SYNC. + */ + register u32 stage asm("rbx"); for (;;) { GUEST_SYNC(0); - asm volatile ("inc %r11"); + stage++; + asm volatile ("" : : "r" (stage)); } } @@ -147,7 +152,7 @@ int main(int argc, char *argv[]) compare_vcpu_events(&events, &run->s.regs.events); /* Set and verify various register values. */ - run->s.regs.regs.r11 = 0xBAD1DEA; + run->s.regs.regs.rbx = 0xBAD1DEA; run->s.regs.sregs.apic_base = 1 << 11; /* TODO run->s.regs.events.XYZ = ABC; */ @@ -158,9 +163,9 @@ int main(int argc, char *argv[]) "Unexpected exit reason: %u (%s),\n", run->exit_reason, exit_reason_str(run->exit_reason)); - TEST_ASSERT(run->s.regs.regs.r11 == 0xBAD1DEA + 1, - "r11 sync regs value incorrect 0x%llx.", - run->s.regs.regs.r11); + TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1, + "rbx sync regs value incorrect 0x%llx.", + run->s.regs.regs.rbx); TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11, "apic_base sync regs value incorrect 0x%llx.", run->s.regs.sregs.apic_base); @@ -179,15 +184,15 @@ int main(int argc, char *argv[]) */ run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_dirty_regs = 0; - run->s.regs.regs.r11 = 0xDEADBEEF; + run->s.regs.regs.rbx = 0xDEADBEEF; rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "Unexpected exit reason: %u (%s),\n", run->exit_reason, exit_reason_str(run->exit_reason)); - TEST_ASSERT(run->s.regs.regs.r11 != 0xDEADBEEF, - "r11 sync regs value incorrect 0x%llx.", - run->s.regs.regs.r11); + TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF, + "rbx sync regs value incorrect 0x%llx.", + run->s.regs.regs.rbx); /* Clear kvm_valid_regs bits and kvm_dirty_bits. * Verify s.regs values are not overwritten with existing guest values @@ -195,21 +200,21 @@ int main(int argc, char *argv[]) */ run->kvm_valid_regs = 0; run->kvm_dirty_regs = 0; - run->s.regs.regs.r11 = 0xAAAA; - regs.r11 = 0xBAC0; + run->s.regs.regs.rbx = 0xAAAA; + regs.rbx = 0xBAC0; vcpu_regs_set(vm, VCPU_ID, ®s); rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "Unexpected exit reason: %u (%s),\n", run->exit_reason, exit_reason_str(run->exit_reason)); - TEST_ASSERT(run->s.regs.regs.r11 == 0xAAAA, - "r11 sync regs value incorrect 0x%llx.", - run->s.regs.regs.r11); + TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA, + "rbx sync regs value incorrect 0x%llx.", + run->s.regs.regs.rbx); vcpu_regs_get(vm, VCPU_ID, ®s); - TEST_ASSERT(regs.r11 == 0xBAC0 + 1, - "r11 guest value incorrect 0x%llx.", - regs.r11); + TEST_ASSERT(regs.rbx == 0xBAC0 + 1, + "rbx guest value incorrect 0x%llx.", + regs.rbx); /* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten * with existing guest values but that guest values are overwritten @@ -217,19 +222,19 @@ int main(int argc, char *argv[]) */ run->kvm_valid_regs = 0; run->kvm_dirty_regs = TEST_SYNC_FIELDS; - run->s.regs.regs.r11 = 0xBBBB; + run->s.regs.regs.rbx = 0xBBBB; rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "Unexpected exit reason: %u (%s),\n", run->exit_reason, exit_reason_str(run->exit_reason)); - TEST_ASSERT(run->s.regs.regs.r11 == 0xBBBB, - "r11 sync regs value incorrect 0x%llx.", - run->s.regs.regs.r11); + TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB, + "rbx sync regs value incorrect 0x%llx.", + run->s.regs.regs.rbx); vcpu_regs_get(vm, VCPU_ID, ®s); - TEST_ASSERT(regs.r11 == 0xBBBB + 1, - "r11 guest value incorrect 0x%llx.", - regs.r11); + TEST_ASSERT(regs.rbx == 0xBBBB + 1, + "rbx guest value incorrect 0x%llx.", + regs.rbx); kvm_vm_free(vm); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c index 6edec6fd790b..3b0ffe01dacd 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c @@ -1,10 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * vmx_close_while_nested * * Copyright (C) 2019, Red Hat, Inc. * - * This work is licensed under the terms of the GNU GPL, version 2. - * * Verify that nothing bad happens if a KVM user exits with open * file descriptors while executing a nested guest. */ @@ -39,8 +38,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint32_t control; - uintptr_t save_cr3; GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); @@ -55,7 +52,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - struct vmx_pages *vmx_pages; vm_vaddr_t vmx_pages_gva; struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); @@ -68,7 +64,7 @@ int main(int argc, char *argv[]) vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); /* Allocate VMX pages and shared descriptors (vmx_pages). */ - vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); for (;;) { diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c index 61a2163cf9f1..ed7218d166da 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c @@ -1,10 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * vmx_set_nested_state_test * * Copyright (C) 2019, Google LLC. * - * This work is licensed under the terms of the GNU GPL, version 2. - * * This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE. */ @@ -75,7 +74,7 @@ void set_revision_id_for_vmcs12(struct kvm_nested_state *state, u32 vmcs12_revision) { /* Set revision_id in vmcs12 to vmcs12_revision. */ - *(u32 *)(state->data) = vmcs12_revision; + memcpy(&state->data, &vmcs12_revision, sizeof(u32)); } void set_default_state(struct kvm_nested_state *state) @@ -95,9 +94,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size) KVM_STATE_NESTED_EVMCS; state->format = 0; state->size = size; - state->vmx.vmxon_pa = 0x1000; - state->vmx.vmcs_pa = 0x2000; - state->vmx.smm.flags = 0; + state->hdr.vmx.vmxon_pa = 0x1000; + state->hdr.vmx.vmcs12_pa = 0x2000; + state->hdr.vmx.smm.flags = 0; set_revision_id_for_vmcs12(state, VMCS12_REVISION); } @@ -123,39 +122,47 @@ void test_vmx_nested_state(struct kvm_vm *vm) /* * We cannot virtualize anything if the guest does not have VMX * enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa - * is set to -1ull. + * is set to -1ull, but the flags must be zero. */ set_default_vmx_state(state, state_sz); - state->vmx.vmxon_pa = -1ull; + state->hdr.vmx.vmxon_pa = -1ull; + test_nested_state_expect_einval(vm, state); + + state->hdr.vmx.vmcs12_pa = -1ull; + state->flags = KVM_STATE_NESTED_EVMCS; + test_nested_state_expect_einval(vm, state); + + state->flags = 0; test_nested_state(vm, state); /* Enable VMX in the guest CPUID. */ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); - /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ + /* + * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without + * setting the nested state but flags other than eVMCS must be clear. + */ set_default_vmx_state(state, state_sz); - state->vmx.vmxon_pa = -1ull; - state->vmx.smm.flags = 1; + state->hdr.vmx.vmxon_pa = -1ull; + state->hdr.vmx.vmcs12_pa = -1ull; test_nested_state_expect_einval(vm, state); - /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ - set_default_vmx_state(state, state_sz); - state->vmx.vmxon_pa = -1ull; - state->vmx.vmcs_pa = 0; + state->flags = KVM_STATE_NESTED_EVMCS; + test_nested_state(vm, state); + + /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ + state->hdr.vmx.smm.flags = 1; test_nested_state_expect_einval(vm, state); - /* - * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without - * setting the nested state. - */ + /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ set_default_vmx_state(state, state_sz); - state->vmx.vmxon_pa = -1ull; - state->vmx.vmcs_pa = -1ull; - test_nested_state(vm, state); + state->hdr.vmx.vmxon_pa = -1ull; + state->flags = 0; + test_nested_state_expect_einval(vm, state); /* It is invalid to have vmxon_pa set to a non-page aligned address. */ set_default_vmx_state(state, state_sz); - state->vmx.vmxon_pa = 1; + state->hdr.vmx.vmxon_pa = 1; test_nested_state_expect_einval(vm, state); /* @@ -165,7 +172,7 @@ void test_vmx_nested_state(struct kvm_vm *vm) set_default_vmx_state(state, state_sz); state->flags = KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING; - state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; + state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; test_nested_state_expect_einval(vm, state); /* @@ -174,14 +181,14 @@ void test_vmx_nested_state(struct kvm_vm *vm) * KVM_STATE_NESTED_SMM_VMXON */ set_default_vmx_state(state, state_sz); - state->vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE | + state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON); test_nested_state_expect_einval(vm, state); /* Outside SMM, SMM flags must be zero. */ set_default_vmx_state(state, state_sz); state->flags = 0; - state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; + state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; test_nested_state_expect_einval(vm, state); /* Size must be large enough to fit kvm_nested_state and vmcs12. */ @@ -191,8 +198,8 @@ void test_vmx_nested_state(struct kvm_vm *vm) /* vmxon_pa cannot be the same address as vmcs_pa. */ set_default_vmx_state(state, state_sz); - state->vmx.vmxon_pa = 0; - state->vmx.vmcs_pa = 0; + state->hdr.vmx.vmxon_pa = 0; + state->hdr.vmx.vmcs12_pa = 0; test_nested_state_expect_einval(vm, state); /* The revision id for vmcs12 must be VMCS12_REVISION. */ @@ -205,16 +212,16 @@ void test_vmx_nested_state(struct kvm_vm *vm) * it again. */ set_default_vmx_state(state, state_sz); - state->vmx.vmxon_pa = -1ull; - state->vmx.vmcs_pa = -1ull; + state->hdr.vmx.vmxon_pa = -1ull; + state->hdr.vmx.vmcs12_pa = -1ull; state->flags = 0; test_nested_state(vm, state); vcpu_nested_state_get(vm, VCPU_ID, state); TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz, "Size must be between %d and %d. The size returned was %d.", sizeof(*state), state_sz, state->size); - TEST_ASSERT(state->vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull."); - TEST_ASSERT(state->vmx.vmcs_pa == -1ull, "vmcs_pa must be -1ull."); + TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull."); + TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull."); free(state); } diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c index 18fa64db0d7a..f36c10eba71e 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c @@ -1,11 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * vmx_tsc_adjust_test * * Copyright (C) 2018, Google LLC. * - * This work is licensed under the terms of the GNU GPL, version 2. - * - * * IA32_TSC_ADJUST test * * According to the SDM, "if an execution of WRMSR to the @@ -121,7 +119,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) GUEST_DONE(); } -void report(int64_t val) +static void report(int64_t val) { printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); @@ -129,7 +127,6 @@ void report(int64_t val) int main(int argc, char *argv[]) { - struct vmx_pages *vmx_pages; vm_vaddr_t vmx_pages_gva; struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); @@ -142,7 +139,7 @@ int main(int argc, char *argv[]) vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); /* Allocate VMX pages and shared descriptors (vmx_pages). */ - vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); for (;;) { |