diff options
| author | Ingo Molnar <mingo@kernel.org> | 2024-03-25 13:32:29 +0300 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2024-03-25 13:32:29 +0300 | 
| commit | f4566a1e73957800df75a3dd2dccee8a4697f327 (patch) | |
| tree | b043b875228c0b25988af66c680d60cae69d761d /tools/testing/selftests/kvm/lib/kvm_util.c | |
| parent | b9e6e28663928cab836a19abbdec3d036a07db3b (diff) | |
| parent | 4cece764965020c22cff7665b18a012006359095 (diff) | |
| download | linux-f4566a1e73957800df75a3dd2dccee8a4697f327.tar.xz | |
Merge tag 'v6.9-rc1' into sched/core, to pick up fixes and to refresh the branch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/testing/selftests/kvm/lib/kvm_util.c')
| -rw-r--r-- | tools/testing/selftests/kvm/lib/kvm_util.c | 129 | 
1 files changed, 114 insertions, 15 deletions
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 1b197426f29f..b2262b5fad9e 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -52,13 +52,13 @@ int open_kvm_dev_path_or_exit(void)  	return _open_kvm_dev_path_or_exit(O_RDONLY);  } -static bool get_module_param_bool(const char *module_name, const char *param) +static ssize_t get_module_param(const char *module_name, const char *param, +				void *buffer, size_t buffer_size)  {  	const int path_size = 128;  	char path[path_size]; -	char value; -	ssize_t r; -	int fd; +	ssize_t bytes_read; +	int fd, r;  	r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",  		     module_name, param); @@ -67,11 +67,46 @@ static bool get_module_param_bool(const char *module_name, const char *param)  	fd = open_path_or_exit(path, O_RDONLY); -	r = read(fd, &value, 1); -	TEST_ASSERT(r == 1, "read(%s) failed", path); +	bytes_read = read(fd, buffer, buffer_size); +	TEST_ASSERT(bytes_read > 0, "read(%s) returned %ld, wanted %ld bytes", +		    path, bytes_read, buffer_size);  	r = close(fd);  	TEST_ASSERT(!r, "close(%s) failed", path); +	return bytes_read; +} + +static int get_module_param_integer(const char *module_name, const char *param) +{ +	/* +	 * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the +	 * NUL char, and 1 byte because the kernel sucks and inserts a newline +	 * at the end. +	 */ +	char value[16 + 1 + 1]; +	ssize_t r; + +	memset(value, '\0', sizeof(value)); + +	r = get_module_param(module_name, param, value, sizeof(value)); +	TEST_ASSERT(value[r - 1] == '\n', +		    "Expected trailing newline, got char '%c'", value[r - 1]); + +	/* +	 * Squash the newline, otherwise atoi_paranoid() will complain about +	 * trailing non-NUL characters in the string. +	 */ +	value[r - 1] = '\0'; +	return atoi_paranoid(value); +} + +static bool get_module_param_bool(const char *module_name, const char *param) +{ +	char value; +	ssize_t r; + +	r = get_module_param(module_name, param, &value, sizeof(value)); +	TEST_ASSERT_EQ(r, 1);  	if (value == 'Y')  		return true; @@ -96,6 +131,21 @@ bool get_kvm_amd_param_bool(const char *param)  	return get_module_param_bool("kvm_amd", param);  } +int get_kvm_param_integer(const char *param) +{ +	return get_module_param_integer("kvm", param); +} + +int get_kvm_intel_param_integer(const char *param) +{ +	return get_module_param_integer("kvm_intel", param); +} + +int get_kvm_amd_param_integer(const char *param) +{ +	return get_module_param_integer("kvm_amd", param); +} +  /*   * Capability   * @@ -226,6 +276,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape)  	vm->mode = shape.mode;  	vm->type = shape.type; +	vm->subtype = shape.subtype;  	vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;  	vm->va_bits = vm_guest_mode_params[vm->mode].va_bits; @@ -266,6 +317,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape)  	case VM_MODE_PXXV48_4K:  #ifdef __x86_64__  		kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); +		kvm_init_vm_address_properties(vm);  		/*  		 * Ignore KVM support for 5-level paging (vm->va_bits == 57),  		 * it doesn't take effect unless a CR4.LA57 is set, which it @@ -666,6 +718,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,  	vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);  	sparsebit_free(®ion->unused_phy_pages); +	sparsebit_free(®ion->protected_phy_pages);  	ret = munmap(region->mmap_start, region->mmap_size);  	TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));  	if (region->fd >= 0) { @@ -1047,6 +1100,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,  	}  	region->unused_phy_pages = sparsebit_alloc(); +	if (vm_arch_has_protected_memory(vm)) +		region->protected_phy_pages = sparsebit_alloc();  	sparsebit_set_num(region->unused_phy_pages,  		guest_paddr >> vm->page_shift, npages);  	region->region.slot = slot; @@ -1377,15 +1432,17 @@ va_found:  	return pgidx_start * vm->page_size;  } -vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, -			    enum kvm_mem_region_type type) +static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, +				     vm_vaddr_t vaddr_min, +				     enum kvm_mem_region_type type, +				     bool protected)  {  	uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);  	virt_pgd_alloc(vm); -	vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, -					      KVM_UTIL_MIN_PFN * vm->page_size, -					      vm->memslots[type]); +	vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, +						KVM_UTIL_MIN_PFN * vm->page_size, +						vm->memslots[type], protected);  	/*  	 * Find an unused range of virtual page addresses of at least @@ -1405,6 +1462,20 @@ vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,  	return vaddr_start;  } +vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, +			    enum kvm_mem_region_type type) +{ +	return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, +				  vm_arch_has_protected_memory(vm)); +} + +vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, +				 vm_vaddr_t vaddr_min, +				 enum kvm_mem_region_type type) +{ +	return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); +} +  /*   * VM Virtual Address Allocate   * @@ -1527,6 +1598,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)  {  	struct userspace_mem_region *region; +	gpa = vm_untag_gpa(vm, gpa); +  	region = userspace_mem_region_find(vm, gpa, gpa);  	if (!region) {  		TEST_FAIL("No vm physical memory at 0x%lx", gpa); @@ -1873,6 +1946,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)  			region->host_mem);  		fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");  		sparsebit_dump(stream, region->unused_phy_pages, 0); +		if (region->protected_phy_pages) { +			fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, ""); +			sparsebit_dump(stream, region->protected_phy_pages, 0); +		}  	}  	fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");  	sparsebit_dump(stream, vm->vpages_mapped, indent + 2); @@ -1974,6 +2051,7 @@ const char *exit_reason_str(unsigned int exit_reason)   *   num - number of pages   *   paddr_min - Physical address minimum   *   memslot - Memory region to allocate page from + *   protected - True if the pages will be used as protected/private memory   *   * Output Args: None   * @@ -1985,8 +2063,9 @@ const char *exit_reason_str(unsigned int exit_reason)   * and their base address is returned. A TEST_ASSERT failure occurs if   * not enough pages are available at or above paddr_min.   */ -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, -			      vm_paddr_t paddr_min, uint32_t memslot) +vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, +				vm_paddr_t paddr_min, uint32_t memslot, +				bool protected)  {  	struct userspace_mem_region *region;  	sparsebit_idx_t pg, base; @@ -1999,8 +2078,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,  		paddr_min, vm->page_size);  	region = memslot2region(vm, memslot); -	base = pg = paddr_min >> vm->page_shift; +	TEST_ASSERT(!protected || region->protected_phy_pages, +		    "Region doesn't support protected memory"); +	base = pg = paddr_min >> vm->page_shift;  	do {  		for (; pg < base + num; ++pg) {  			if (!sparsebit_is_set(region->unused_phy_pages, pg)) { @@ -2019,8 +2100,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,  		abort();  	} -	for (pg = base; pg < base + num; ++pg) +	for (pg = base; pg < base + num; ++pg) {  		sparsebit_clear(region->unused_phy_pages, pg); +		if (protected) +			sparsebit_set(region->protected_phy_pages, pg); +	}  	return base * vm->page_size;  } @@ -2224,3 +2308,18 @@ void __attribute((constructor)) kvm_selftest_init(void)  	kvm_selftest_arch_init();  } + +bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) +{ +	sparsebit_idx_t pg = 0; +	struct userspace_mem_region *region; + +	if (!vm_arch_has_protected_memory(vm)) +		return false; + +	region = userspace_mem_region_find(vm, paddr, paddr); +	TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); + +	pg = paddr >> vm->page_shift; +	return sparsebit_is_set(region->protected_phy_pages, pg); +}  | 
