summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/lib/kvm_util.c
diff options
context:
space:
mode:
authorPeter Gonda <pgonda@google.com>2024-02-23 03:42:51 +0300
committerSean Christopherson <seanjc@google.com>2024-02-28 23:58:13 +0300
commitcd8eb2913205e5a13ec807061c8f72d6fee624c7 (patch)
tree38471414f03c5d5ca3505e7a213c05af9b8c8564 /tools/testing/selftests/kvm/lib/kvm_util.c
parent57e19f05775847d9d8565dad2cee6bbec03cdb06 (diff)
downloadlinux-cd8eb2913205e5a13ec807061c8f72d6fee624c7.tar.xz
KVM: selftests: Add support for allocating/managing protected guest memory
Add support for differentiating between protected (a.k.a. private, a.k.a. encrypted) memory and normal (a.k.a. shared) memory for VMs that support protected guest memory, e.g. x86's SEV. Provide and manage a common bitmap for tracking whether a given physical page resides in protected memory, as support for protected memory isn't x86 specific, i.e. adding a arch hook would be a net negative now, and in the future. Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Vishal Annapurve <vannapurve@google.com> Cc: Ackerley Tng <ackerleytng@google.com> cc: Andrew Jones <andrew.jones@linux.dev> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Michael Roth <michael.roth@amd.com> Reviewed-by: Itaru Kitayama <itaru.kitayama@fujitsu.com> Tested-by: Carlos Bilbao <carlos.bilbao@amd.com> Originally-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Peter Gonda <pgonda@google.com> Co-developed-by: Sean Christopherson <seanjc@google.com> Link: https://lore.kernel.org/r/20240223004258.3104051-5-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'tools/testing/selftests/kvm/lib/kvm_util.c')
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 22da6a10b41f..2845349a98ca 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -666,6 +666,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
sparsebit_free(&region->unused_phy_pages);
+ sparsebit_free(&region->protected_phy_pages);
ret = munmap(region->mmap_start, region->mmap_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
if (region->fd >= 0) {
@@ -1047,6 +1048,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
}
region->unused_phy_pages = sparsebit_alloc();
+ if (vm_arch_has_protected_memory(vm))
+ region->protected_phy_pages = sparsebit_alloc();
sparsebit_set_num(region->unused_phy_pages,
guest_paddr >> vm->page_shift, npages);
region->region.slot = slot;
@@ -1873,6 +1876,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
region->host_mem);
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->unused_phy_pages, 0);
+ if (region->protected_phy_pages) {
+ fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
+ sparsebit_dump(stream, region->protected_phy_pages, 0);
+ }
}
fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
@@ -1974,6 +1981,7 @@ const char *exit_reason_str(unsigned int exit_reason)
* num - number of pages
* paddr_min - Physical address minimum
* memslot - Memory region to allocate page from
+ * protected - True if the pages will be used as protected/private memory
*
* Output Args: None
*
@@ -1985,8 +1993,9 @@ const char *exit_reason_str(unsigned int exit_reason)
* and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot)
+vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot,
+ bool protected)
{
struct userspace_mem_region *region;
sparsebit_idx_t pg, base;
@@ -1999,8 +2008,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
paddr_min, vm->page_size);
region = memslot2region(vm, memslot);
- base = pg = paddr_min >> vm->page_shift;
+ TEST_ASSERT(!protected || region->protected_phy_pages,
+ "Region doesn't support protected memory");
+ base = pg = paddr_min >> vm->page_shift;
do {
for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
@@ -2019,8 +2030,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
abort();
}
- for (pg = base; pg < base + num; ++pg)
+ for (pg = base; pg < base + num; ++pg) {
sparsebit_clear(region->unused_phy_pages, pg);
+ if (protected)
+ sparsebit_set(region->protected_phy_pages, pg);
+ }
return base * vm->page_size;
}