summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2026-03-30 17:48:27 +0300
committerMarc Zyngier <maz@kernel.org>2026-03-30 18:58:09 +0300
commit281a38ad2920b5ccfbbc2a0ca0caeee110ad5d6b (patch)
tree96b02a5a4eb162fb52156c1490b7b1f656783dd3
parent56080f53a6ad779b971eb7f4f7a232498805d867 (diff)
downloadlinux-281a38ad2920b5ccfbbc2a0ca0caeee110ad5d6b.tar.xz
KVM: arm64: Reclaim faulting page from pKVM in spurious fault handler
Host kernel accesses to pages that are inaccessible at stage-2 result in the injection of a translation fault, which is fatal unless an exception table fixup is registered for the faulting PC (e.g. for user access routines). This is undesirable, since a get_user_pages() call could be used to obtain a reference to a donated page and then a subsequent access via a kernel mapping would lead to a panic(). Rework the spurious fault handler so that stage-2 faults injected back into the host result in the target page being forcefully reclaimed when no exception table fixup handler is registered. Tested-by: Fuad Tabba <tabba@google.com> Tested-by: Mostafa Saleh <smostafa@google.com> Signed-off-by: Will Deacon <will@kernel.org> Link: https://patch.msgid.link/20260330144841.26181-27-will@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/kvm/pkvm.c12
-rw-r--r--arch/arm64/mm/fault.c17
3 files changed, 32 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index b51ab6840f9c..b546703c3ab9 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -94,6 +94,15 @@ static inline bool is_pkvm_initialized(void)
static_branch_likely(&kvm_protected_mode_initialized);
}
+#ifdef CONFIG_KVM
+bool pkvm_force_reclaim_guest_page(phys_addr_t phys);
+#else
+static inline bool pkvm_force_reclaim_guest_page(phys_addr_t phys)
+{
+ return false;
+}
+#endif
+
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 3cf23496f225..10edd4965936 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -569,3 +569,15 @@ int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
WARN_ON_ONCE(1);
return -EINVAL;
}
+
+/*
+ * Forcefully reclaim a page from the guest, zeroing its contents and
+ * poisoning the stage-2 pte so that pages can no longer be mapped at
+ * the same IPA. The page remains pinned until the guest is destroyed.
+ */
+bool pkvm_force_reclaim_guest_page(phys_addr_t phys)
+{
+ int ret = kvm_call_hyp_nvhe(__pkvm_force_reclaim_guest_page, phys);
+
+ return !ret || ret == -EAGAIN;
+}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 3abfc7272d63..7eacc7b45c1f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -289,9 +289,6 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
if (!is_el1_data_abort(esr) || !esr_fsc_is_translation_fault(esr))
return false;
- if (is_pkvm_stage2_abort(esr))
- return false;
-
local_irq_save(flags);
asm volatile("at s1e1r, %0" :: "r" (addr));
isb();
@@ -302,8 +299,14 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
* If we now have a valid translation, treat the translation fault as
* spurious.
*/
- if (!(par & SYS_PAR_EL1_F))
+ if (!(par & SYS_PAR_EL1_F)) {
+ if (is_pkvm_stage2_abort(esr)) {
+ par &= SYS_PAR_EL1_PA;
+ return pkvm_force_reclaim_guest_page(par);
+ }
+
return true;
+ }
/*
* If we got a different type of fault from the AT instruction,
@@ -389,9 +392,11 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
if (!is_el1_instruction_abort(esr) && fixup_exception(regs, esr))
return;
- if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
- "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
+ if (is_spurious_el1_translation_fault(addr, esr, regs)) {
+ WARN_RATELIMIT(!is_pkvm_stage2_abort(esr),
+ "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr);
return;
+ }
if (is_el1_mte_sync_tag_check_fault(esr)) {
do_tag_recovery(addr, esr, regs);