diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2024-05-10 17:49:02 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2024-05-10 20:20:18 +0300 |
commit | 4232da23d75d173195c6766729e51947b64f83cd (patch) | |
tree | 7eb1420b118d452c0b497bf6267ecdfb0a6d5931 /virt | |
parent | bbe10a5cc0c775e52e91c8b3b6547b59b2054f44 (diff) | |
parent | 7b7e584f90bf670d5c6f2b1fff884bf3b972cad4 (diff) | |
download | linux-4232da23d75d173195c6766729e51947b64f83cd.tar.xz |
Merge tag 'loongarch-kvm-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson into HEAD
LoongArch KVM changes for v6.10
1. Add ParaVirt IPI support.
2. Add software breakpoint support.
3. Add mmio trace events support.
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 3 | ||||
-rw-r--r-- | virt/kvm/kvm_mm.h | 6 | ||||
-rw-r--r-- | virt/kvm/pfncache.c | 50 |
3 files changed, 38 insertions, 21 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 38b498669ef9..38694f95bb56 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -798,8 +798,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, * mn_active_invalidate_count (see above) instead of * mmu_invalidate_in_progress. */ - gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, - hva_range.may_block); + gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end); /* * If one or more memslots were found and thus zapped, notify arch code diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h index ecefc7ec51af..715f19669d01 100644 --- a/virt/kvm/kvm_mm.h +++ b/virt/kvm/kvm_mm.h @@ -26,13 +26,11 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, #ifdef CONFIG_HAVE_KVM_PFNCACHE void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, - unsigned long end, - bool may_block); + unsigned long end); #else static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, - unsigned long end, - bool may_block) + unsigned long end) { } #endif /* HAVE_KVM_PFNCACHE */ diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 4e07112a24c2..e3453e869e92 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -23,7 +23,7 @@ * MMU notifier 'invalidate_range_start' hook. */ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, - unsigned long end, bool may_block) + unsigned long end) { struct gfn_to_pfn_cache *gpc; @@ -57,6 +57,19 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, spin_unlock(&kvm->gpc_lock); } +static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva, + unsigned long len) +{ + unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : + offset_in_page(gpa); + + /* + * The cached access must fit within a single page. The 'len' argument + * to activate() and refresh() exists only to enforce that. + */ + return offset + len <= PAGE_SIZE; +} + bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); @@ -74,7 +87,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) if (kvm_is_error_hva(gpc->uhva)) return false; - if (offset_in_page(gpc->uhva) + len > PAGE_SIZE) + if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) return false; if (!gpc->valid) @@ -232,8 +245,7 @@ out_error: return -EFAULT; } -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, - unsigned long len) +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva) { unsigned long page_offset; bool unmap_old = false; @@ -247,15 +259,6 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva))) return -EINVAL; - /* - * The cached acces must fit within a single page. The 'len' argument - * exists only to enforce that. - */ - page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : - offset_in_page(gpa); - if (page_offset + len > PAGE_SIZE) - return -EINVAL; - lockdep_assert_held(&gpc->refresh_lock); write_lock_irq(&gpc->lock); @@ -270,6 +273,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); if (kvm_is_error_gpa(gpa)) { + page_offset = offset_in_page(uhva); + gpc->gpa = INVALID_GPA; gpc->memslot = NULL; gpc->uhva = PAGE_ALIGN_DOWN(uhva); @@ -279,6 +284,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l } else { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); + page_offset = offset_in_page(gpa); + if (gpc->gpa != gpa || gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) { gfn_t gfn = gpa_to_gfn(gpa); @@ -354,6 +361,9 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) guard(mutex)(&gpc->refresh_lock); + if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) + return -EINVAL; + /* * If the GPA is valid then ignore the HVA, as a cache can be GPA-based * or HVA-based, not both. For GPA-based caches, the HVA will be @@ -361,7 +371,7 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) */ uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD; - return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len); + return __kvm_gpc_refresh(gpc, gpc->gpa, uhva); } void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) @@ -381,6 +391,9 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned { struct kvm *kvm = gpc->kvm; + if (!kvm_gpc_is_valid_len(gpa, uhva, len)) + return -EINVAL; + guard(mutex)(&gpc->refresh_lock); if (!gpc->active) { @@ -400,11 +413,18 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned gpc->active = true; write_unlock_irq(&gpc->lock); } - return __kvm_gpc_refresh(gpc, gpa, uhva, len); + return __kvm_gpc_refresh(gpc, gpa, uhva); } int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) { + /* + * Explicitly disallow INVALID_GPA so that the magic value can be used + * by KVM to differentiate between GPA-based and HVA-based caches. + */ + if (WARN_ON_ONCE(kvm_is_error_gpa(gpa))) + return -EINVAL; + return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len); } |