diff options
| author | Will Deacon <will@kernel.org> | 2026-03-30 17:48:17 +0300 |
|---|---|---|
| committer | Marc Zyngier <maz@kernel.org> | 2026-03-30 18:58:08 +0300 |
| commit | 4e6e03f9eaddb6be5ca8477dc5642e94ddece47e (patch) | |
| tree | 0e3b4ca45f874aaab4f03297588957dd53329a48 | |
| parent | 0bf5f4d400cd11ab86b25a56b101726e35f3e7cb (diff) | |
| download | linux-4e6e03f9eaddb6be5ca8477dc5642e94ddece47e.tar.xz | |
KVM: arm64: Hook up reclaim hypercall to pkvm_pgtable_stage2_destroy()
During teardown of a protected guest, its memory pages must be reclaimed
from the hypervisor by issuing the '__pkvm_reclaim_dying_guest_page'
hypercall.
Add a new helper, __pkvm_pgtable_stage2_reclaim(), which is called
during the VM teardown operation to reclaim pages from the hypervisor
and drop the GUP pin on the host.
Tested-by: Fuad Tabba <tabba@google.com>
Tested-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20260330144841.26181-17-will@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
| -rw-r--r-- | arch/arm64/kvm/pkvm.c | 31 |
1 files changed, 30 insertions, 1 deletions
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 7d0fe36fd8dc..3cf23496f225 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -328,6 +328,32 @@ int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, return 0; } +static int __pkvm_pgtable_stage2_reclaim(struct kvm_pgtable *pgt, u64 start, u64 end) +{ + struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); + pkvm_handle_t handle = kvm->arch.pkvm.handle; + struct pkvm_mapping *mapping; + int ret; + + for_each_mapping_in_range_safe(pgt, start, end, mapping) { + struct page *page; + + ret = kvm_call_hyp_nvhe(__pkvm_reclaim_dying_guest_page, + handle, mapping->gfn); + if (WARN_ON(ret)) + continue; + + page = pfn_to_page(mapping->pfn); + WARN_ON_ONCE(mapping->nr_pages != 1); + unpin_user_pages_dirty_lock(&page, 1, true); + account_locked_vm(current->mm, 1, false); + pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); + kfree(mapping); + } + + return 0; +} + static int __pkvm_pgtable_stage2_unshare(struct kvm_pgtable *pgt, u64 start, u64 end) { struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); @@ -361,7 +387,10 @@ void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, kvm->arch.pkvm.is_dying = true; } - __pkvm_pgtable_stage2_unshare(pgt, addr, addr + size); + if (kvm_vm_is_protected(kvm)) + __pkvm_pgtable_stage2_reclaim(pgt, addr, addr + size); + else + __pkvm_pgtable_stage2_unshare(pgt, addr, addr + size); } void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt) |
