diff options
author | Sean Christopherson <seanjc@google.com> | 2022-10-14 00:12:31 +0300 |
---|---|---|
committer | David Woodhouse <dwmw@amazon.co.uk> | 2022-11-30 22:25:24 +0300 |
commit | 58f5ee5fedd981e05cb086cba4e8f923c3727a04 (patch) | |
tree | 14e4ac0c59bb8d8bc5c16b6494779a4be983ab5a /virt | |
parent | 5762cb10235776dd1ed5f5f9d6c1aff2b73bec5c (diff) | |
download | linux-58f5ee5fedd981e05cb086cba4e8f923c3727a04.tar.xz |
KVM: Drop @gpa from exported gfn=>pfn cache check() and refresh() helpers
Drop the @gpa param from the exported check()+refresh() helpers and limit
changing the cache's GPA to the activate path. All external users just
feed in gpc->gpa, i.e. this is a fancy nop.
Allowing users to change the GPA at check()+refresh() is dangerous as
those helpers explicitly allow concurrent calls, e.g. KVM could get into
a livelock scenario. It's also unclear as to what the expected behavior
should be if multiple tasks attempt to refresh with different GPAs.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/pfncache.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index c1a772cedc4b..a805cc1544bf 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -76,18 +76,17 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, } } -bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) +bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); if (!gpc->active) return false; - if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE) + if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE) return false; - if (gpc->gpa != gpa || gpc->generation != slots->generation || - kvm_is_error_hva(gpc->uhva)) + if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) return false; if (!gpc->valid) @@ -237,7 +236,8 @@ out_error: return -EFAULT; } -int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, + unsigned long len) { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); unsigned long page_offset = gpa & ~PAGE_MASK; @@ -331,6 +331,11 @@ out_unlock: return ret; } + +int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) +{ + return __kvm_gpc_refresh(gpc, gpc->gpa, len); +} EXPORT_SYMBOL_GPL(kvm_gpc_refresh); void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, @@ -371,7 +376,7 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) gpc->active = true; write_unlock_irq(&gpc->lock); } - return kvm_gpc_refresh(gpc, gpa, len); + return __kvm_gpc_refresh(gpc, gpa, len); } EXPORT_SYMBOL_GPL(kvm_gpc_activate); |