summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Roth <michael.roth@amd.com>2026-01-09 00:46:20 +0300
committerSean Christopherson <seanjc@google.com>2026-01-15 23:31:16 +0300
commitdcbcc2323c806b55939d765c13d0728421756017 (patch)
tree7b6bd7b2aac52548c190309aafaaa72b7aa2add1
parent8622ef05709fbc4903f54cdf1ac8c3725e479dd8 (diff)
downloadlinux-dcbcc2323c806b55939d765c13d0728421756017.tar.xz
KVM: SEV: Document/enforce page-alignment for KVM_SEV_SNP_LAUNCH_UPDATE
In the past, KVM_SEV_SNP_LAUNCH_UPDATE accepted a non-page-aligned 'uaddr' parameter to copy data from, but continuing to support this with new functionality like in-place conversion and hugepages in the pipeline has proven to be more trouble than it is worth, since there are no known users that have been identified who use a non-page-aligned 'uaddr' parameter. Rather than locking guest_memfd into continuing to support this, go ahead and document page-alignment as a requirement and begin enforcing this in the handling function. Reviewed-by: Vishal Annapurve <vannapurve@google.com> Tested-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Link: https://patch.msgid.link/20260108214622.1084057-5-michael.roth@amd.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--Documentation/virt/kvm/x86/amd-memory-encryption.rst2
-rw-r--r--arch/x86/kvm/svm/sev.c6
2 files changed, 6 insertions, 2 deletions
diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 1ddb6a86ce7f..5a88d0197cb3 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -523,7 +523,7 @@ Returns: 0 on success, < 0 on error, -EAGAIN if caller should retry
struct kvm_sev_snp_launch_update {
__u64 gfn_start; /* Guest page number to load/encrypt data into. */
- __u64 uaddr; /* Userspace address of data to be loaded/encrypted. */
+ __u64 uaddr; /* 4k-aligned address of data to be loaded/encrypted. */
__u64 len; /* 4k-aligned length in bytes to copy into guest memory.*/
__u8 type; /* The type of the guest pages being initialized. */
__u8 pad0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index a70bd3f19e29..b4409bc652d1 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2367,6 +2367,11 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
params.type != KVM_SEV_SNP_PAGE_TYPE_CPUID))
return -EINVAL;
+ src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
+
+ if (!PAGE_ALIGNED(src))
+ return -EINVAL;
+
npages = params.len / PAGE_SIZE;
/*
@@ -2398,7 +2403,6 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
sev_populate_args.sev_fd = argp->sev_fd;
sev_populate_args.type = params.type;
- src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
count = kvm_gmem_populate(kvm, params.gfn_start, src, npages,
sev_gmem_post_populate, &sev_populate_args);