diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2022-07-29 16:34:45 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-07-29 16:34:45 +0300 |
commit | 2e2e91158febfeb73b5d4f249440218304f34101 (patch) | |
tree | 054dbf5e54bdae60014a7b008dacb58757cbd202 /arch/riscv/kvm/mmu.c | |
parent | e0dccc3b76fb35bb257b4118367a883073d7390e (diff) | |
parent | 6bb2e00ea304ffc0446f345c46fe22713ce43cbf (diff) | |
download | linux-2e2e91158febfeb73b5d4f249440218304f34101.tar.xz |
Merge tag 'kvm-riscv-5.20-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv changes for 5.20
* Track ISA extensions used by Guest using bitmap
* Added system instruction emulation framework
* Added CSR emulation framework
* Added gfp_custom flag in struct kvm_mmu_memory_cache
* Added G-stage ioremap() and iounmap() functions
* Added support for Svpbmt inside Guest
Diffstat (limited to 'arch/riscv/kvm/mmu.c')
-rw-r--r-- | arch/riscv/kvm/mmu.c | 28 |
1 files changed, 19 insertions, 9 deletions
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 9826073fbc67..bc545aef6034 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -343,8 +343,9 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot) kvm_flush_remote_tlbs(kvm); } -static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, - unsigned long size, bool writable) +int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, + phys_addr_t hpa, unsigned long size, + bool writable, bool in_atomic) { pte_t pte; int ret = 0; @@ -353,13 +354,14 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, struct kvm_mmu_memory_cache pcache; memset(&pcache, 0, sizeof(pcache)); + pcache.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0; pcache.gfp_zero = __GFP_ZERO; end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; pfn = __phys_to_pfn(hpa); for (addr = gpa; addr < end; addr += PAGE_SIZE) { - pte = pfn_pte(pfn, PAGE_KERNEL); + pte = pfn_pte(pfn, PAGE_KERNEL_IO); if (!writable) pte = pte_wrprotect(pte); @@ -382,6 +384,13 @@ out: return ret; } +void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) +{ + spin_lock(&kvm->mmu_lock); + gstage_unmap_range(kvm, gpa, size, false); + spin_unlock(&kvm->mmu_lock); +} + void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, @@ -517,8 +526,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, goto out; } - ret = gstage_ioremap(kvm, gpa, pa, - vm_end - vm_start, writable); + ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa, + vm_end - vm_start, + writable, false); if (ret) break; } @@ -611,7 +621,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, { int ret; kvm_pfn_t hfn; - bool writeable; + bool writable; short vma_pageshift; gfn_t gfn = gpa >> PAGE_SHIFT; struct vm_area_struct *vma; @@ -659,7 +669,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, mmu_seq = kvm->mmu_notifier_seq; - hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable); + hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable); if (hfn == KVM_PFN_ERR_HWPOISON) { send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, vma_pageshift, current); @@ -673,14 +683,14 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, * for write faults. */ if (logging && !is_write) - writeable = false; + writable = false; spin_lock(&kvm->mmu_lock); if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; - if (writeable) { + if (writable) { kvm_set_pfn_dirty(hfn); mark_page_dirty(kvm, gfn); ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, |