summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2026-03-09 18:35:27 +0300
committerMarc Zyngier <maz@kernel.org>2026-03-28 14:29:41 +0300
commit08abf09f4d763565ee9ed084401b7ac92f8b4952 (patch)
treef2fb3d5c8a51d53caad04c7fd57f5bc81506db26
parent18e1312879db0af5bfd1b1a8b6771d314ccd3ca9 (diff)
downloadlinux-08abf09f4d763565ee9ed084401b7ac92f8b4952.tar.xz
KVM: arm64: Restrict the scope of the 'writable' attribute
The 'writable' field is ambiguous, and indicates multiple things: - whether the underlying memslot is writable - whether we are resolving the fault with writable attributes Add a new field to kvm_s2_fault_vma_info (map_writable) to indicate the former condition, and have local writable variables to track the latter. Tested-by: Fuad Tabba <tabba@google.com> Reviewed-by: Fuad Tabba <tabba@google.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/mmu.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 227bc34bc535..f8e88539988c 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1653,6 +1653,7 @@ struct kvm_s2_fault_vma_info {
gfn_t gfn;
bool mte_allowed;
bool is_vma_cacheable;
+ bool map_writable;
};
static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
@@ -1718,7 +1719,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
}
struct kvm_s2_fault {
- bool writable;
bool s2_force_noncacheable;
kvm_pfn_t pfn;
bool force_pte;
@@ -1799,7 +1799,7 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
fault->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, s2vi),
kvm_is_write_fault(s2fd->vcpu) ? FOLL_WRITE : 0,
- &fault->writable, &fault->page);
+ &s2vi->map_writable, &fault->page);
if (unlikely(is_error_noslot_pfn(fault->pfn))) {
if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(s2fd->hva, __ffs(s2vi->vma_pagesize));
@@ -1816,6 +1816,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
const struct kvm_s2_fault_vma_info *s2vi)
{
struct kvm *kvm = s2fd->vcpu->kvm;
+ bool writable = s2vi->map_writable;
/*
* Check if this is non-struct page memory PFN, and cannot support
@@ -1855,7 +1856,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* Only actually map the page as writable if this was a write
* fault.
*/
- fault->writable = false;
+ writable = false;
}
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && fault->s2_force_noncacheable)
@@ -1873,9 +1874,9 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
}
if (s2fd->nested)
- adjust_nested_fault_perms(s2fd->nested, &fault->prot, &fault->writable);
+ adjust_nested_fault_perms(s2fd->nested, &fault->prot, &writable);
- if (fault->writable)
+ if (writable)
fault->prot |= KVM_PGTABLE_PROT_W;
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu))
@@ -1904,6 +1905,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
const struct kvm_s2_fault_vma_info *s2vi, void *memcache)
{
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
+ bool writable = fault->prot & KVM_PGTABLE_PROT_W;
struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
long perm_fault_granule;
@@ -1964,7 +1966,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
out_unlock:
- kvm_release_faultin_page(kvm, fault->page, !!ret, fault->writable);
+ kvm_release_faultin_page(kvm, fault->page, !!ret, writable);
kvm_fault_unlock(kvm);
/*
@@ -1972,7 +1974,7 @@ out_unlock:
* making sure we adjust the canonical IPA if the mapping size has
* been updated (via a THP upgrade, for example).
*/
- if (fault->writable && !ret) {
+ if (writable && !ret) {
phys_addr_t ipa = gfn_to_gpa(get_canonical_gfn(s2fd, s2vi));
ipa &= ~(mapping_size - 1);
mark_page_dirty_in_slot(kvm, s2fd->memslot, gpa_to_gfn(ipa));