diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-16 17:15:14 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-16 17:15:14 +0300 |
commit | 79e06c4c4950be2abd8ca5d2428a8c915aa62c24 (patch) | |
tree | 0507ef82aa3c7766b7b19163a0351882b7d7c5b5 /include/linux/kvm_host.h | |
parent | cb3f09f9afe5286c0aed7a1c5cc71495de166efb (diff) | |
parent | c862dcd199759d4a45e65dab47b03e3e8a144e3a (diff) | |
download | linux-79e06c4c4950be2abd8ca5d2428a8c915aa62c24.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"RISCV:
- Use common KVM implementation of MMU memory caches
- SBI v0.2 support for Guest
- Initial KVM selftests support
- Fix to avoid spurious virtual interrupts after clearing hideleg CSR
- Update email address for Anup and Atish
ARM:
- Simplification of the 'vcpu first run' by integrating it into KVM's
'pid change' flow
- Refactoring of the FP and SVE state tracking, also leading to a
simpler state and less shared data between EL1 and EL2 in the nVHE
case
- Tidy up the header file usage for the nvhe hyp object
- New HYP unsharing mechanism, finally allowing pages to be unmapped
from the Stage-1 EL2 page-tables
- Various pKVM cleanups around refcounting and sharing
- A couple of vgic fixes for bugs that would trigger once the vcpu
xarray rework is merged, but not sooner
- Add minimal support for ARMv8.7's PMU extension
- Rework kvm_pgtable initialisation ahead of the NV work
- New selftest for IRQ injection
- Teach selftests about the lack of default IPA space and page sizes
- Expand sysreg selftest to deal with Pointer Authentication
- The usual bunch of cleanups and doc update
s390:
- fix sigp sense/start/stop/inconsistency
- cleanups
x86:
- Clean up some function prototypes more
- improved gfn_to_pfn_cache with proper invalidation, used by Xen
emulation
- add KVM_IRQ_ROUTING_XEN_EVTCHN and event channel delivery
- completely remove potential TOC/TOU races in nested SVM consistency
checks
- update some PMCs on emulated instructions
- Intel AMX support (joint work between Thomas and Intel)
- large MMU cleanups
- module parameter to disable PMU virtualization
- cleanup register cache
- first part of halt handling cleanups
- Hyper-V enlightened MSR bitmap support for nested hypervisors
Generic:
- clean up Makefiles
- introduce CONFIG_HAVE_KVM_DIRTY_RING
- optimize memslot lookup using a tree
- optimize vCPU array usage by converting to xarray"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (268 commits)
x86/fpu: Fix inline prefix warnings
selftest: kvm: Add amx selftest
selftest: kvm: Move struct kvm_x86_state to header
selftest: kvm: Reorder vcpu_load_state steps for AMX
kvm: x86: Disable interception for IA32_XFD on demand
x86/fpu: Provide fpu_sync_guest_vmexit_xfd_state()
kvm: selftests: Add support for KVM_CAP_XSAVE2
kvm: x86: Add support for getting/setting expanded xstate buffer
x86/fpu: Add uabi_size to guest_fpu
kvm: x86: Add CPUID support for Intel AMX
kvm: x86: Add XCR0 support for Intel AMX
kvm: x86: Disable RDMSR interception of IA32_XFD_ERR
kvm: x86: Emulate IA32_XFD_ERR for guest
kvm: x86: Intercept #NM for saving IA32_XFD_ERR
x86/fpu: Prepare xfd_err in struct fpu_guest
kvm: x86: Add emulation for IA32_XFD
x86/fpu: Provide fpu_update_guest_xfd() for IA32_XFD emulation
kvm: x86: Enable dynamic xfeatures at KVM_SET_CPUID2
x86/fpu: Provide fpu_enable_guest_xfd_features() for KVM
x86/fpu: Add guest support to xfd_enable_feature()
...
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 432 |
1 files changed, 340 insertions, 92 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1a91e3a70dc0..d89d564f7c19 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -29,6 +29,10 @@ #include <linux/refcount.h> #include <linux/nospec.h> #include <linux/notifier.h> +#include <linux/hashtable.h> +#include <linux/interval_tree.h> +#include <linux/rbtree.h> +#include <linux/xarray.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -151,6 +155,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 #define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ @@ -310,7 +315,9 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; +#ifndef __KVM_HAVE_ARCH_WQP struct rcuwait wait; +#endif struct pid __rcu *pid; int sigset_active; sigset_t sigset; @@ -355,11 +362,13 @@ struct kvm_vcpu { struct kvm_dirty_ring dirty_ring; /* - * The index of the most recently used memslot by this vCPU. It's ok - * if this becomes stale due to memslot changes since we always check - * it is a valid slot. + * The most recently used memslot by this vCPU and the slots generation + * for which it is valid. + * No wraparound protection is needed since generations won't overflow in + * thousands of years, even assuming 1M memslot operations per second. */ - int last_used_slot; + struct kvm_memory_slot *last_used_slot; + u64 last_used_slot_gen; }; /* must be called with irqs disabled */ @@ -424,7 +433,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) */ #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) +/* + * Since at idle each memslot belongs to two memslot sets it has to contain + * two embedded nodes for each data structure that it forms a part of. + * + * Two memslot sets (one active and one inactive) are necessary so the VM + * continues to run on one memslot set while the other is being modified. + * + * These two memslot sets normally point to the same set of memslots. + * They can, however, be desynchronized when performing a memslot management + * operation by replacing the memslot to be modified by its copy. + * After the operation is complete, both memslot sets once again point to + * the same, common set of memslot data. + * + * The memslots themselves are independent of each other so they can be + * individually added or deleted. + */ struct kvm_memory_slot { + struct hlist_node id_node[2]; + struct interval_tree_node hva_node[2]; + struct rb_node gfn_node[2]; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; @@ -435,7 +463,7 @@ struct kvm_memory_slot { u16 as_id; }; -static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot) +static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; } @@ -469,6 +497,12 @@ struct kvm_hv_sint { u32 sint; }; +struct kvm_xen_evtchn { + u32 port; + u32 vcpu; + u32 priority; +}; + struct kvm_kernel_irq_routing_entry { u32 gsi; u32 type; @@ -489,6 +523,7 @@ struct kvm_kernel_irq_routing_entry { } msi; struct kvm_s390_adapter_int adapter; struct kvm_hv_sint hv_sint; + struct kvm_xen_evtchn xen_evtchn; }; struct hlist_node link; }; @@ -519,18 +554,21 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) } #endif -/* - * Note: - * memslots are not sorted by id anymore, please use id_to_memslot() - * to get the memslot by its id. - */ struct kvm_memslots { u64 generation; - /* The mapping table from slot id to the index in memslots[]. */ - short id_to_index[KVM_MEM_SLOTS_NUM]; - atomic_t last_used_slot; - int used_slots; - struct kvm_memory_slot memslots[]; + atomic_long_t last_used_slot; + struct rb_root_cached hva_tree; + struct rb_root gfn_tree; + /* + * The mapping table from slot id to memslot. + * + * 7-bit bucket count matches the size of the old id to index array for + * 512 slots, while giving good performance with this slot count. + * Higher bucket counts bring only small performance improvements but + * always result in higher memory usage (even for lower memslot counts). + */ + DECLARE_HASHTABLE(id_hash, 7); + int node_idx; }; struct kvm { @@ -551,14 +589,22 @@ struct kvm { */ struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ + unsigned long nr_memslot_pages; + /* The two memslot sets - active and inactive (per address space) */ + struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2]; + /* The current active memslot set for each address space */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; - struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + struct xarray vcpu_array; /* Used to wait for completion of MMU notifiers. */ spinlock_t mn_invalidate_lock; unsigned long mn_active_invalidate_count; struct rcuwait mn_memslots_update_rcuwait; + /* For management / invalidation of gfn_to_pfn_caches */ + spinlock_t gpc_lock; + struct list_head gpc_list; + /* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only @@ -701,19 +747,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ smp_rmb(); - return kvm->vcpus[i]; + return xa_load(&kvm->vcpu_array, i); } -#define kvm_for_each_vcpu(idx, vcpup, kvm) \ - for (idx = 0; \ - idx < atomic_read(&kvm->online_vcpus) && \ - (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ - idx++) +#define kvm_for_each_vcpu(idx, vcpup, kvm) \ + xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ + (atomic_read(&kvm->online_vcpus) - 1)) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; if (id < 0) return NULL; @@ -727,13 +771,12 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) return NULL; } -#define kvm_for_each_memslot(memslot, slots) \ - for (memslot = &slots->memslots[0]; \ - memslot < slots->memslots + slots->used_slots; memslot++) \ - if (WARN_ON_ONCE(!memslot->npages)) { \ - } else +static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) +{ + return vcpu->vcpu_idx; +} -void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_destroy_vcpus(struct kvm *kvm); void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); @@ -793,21 +836,124 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) return __kvm_memslots(vcpu->kvm, as_id); } +static inline bool kvm_memslots_empty(struct kvm_memslots *slots) +{ + return RB_EMPTY_ROOT(&slots->gfn_tree); +} + +#define kvm_for_each_memslot(memslot, bkt, slots) \ + hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ + if (WARN_ON_ONCE(!memslot->npages)) { \ + } else + static inline struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) { - int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; + int idx = slots->node_idx; - if (index < 0) - return NULL; + hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { + if (slot->id == id) + return slot; + } + + return NULL; +} + +/* Iterator used for walking memslots that overlap a gfn range. */ +struct kvm_memslot_iter { + struct kvm_memslots *slots; + struct rb_node *node; + struct kvm_memory_slot *slot; +}; - slot = &slots->memslots[index]; +static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) +{ + iter->node = rb_next(iter->node); + if (!iter->node) + return; - WARN_ON(slot->id != id); - return slot; + iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); } +static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, + struct kvm_memslots *slots, + gfn_t start) +{ + int idx = slots->node_idx; + struct rb_node *tmp; + struct kvm_memory_slot *slot; + + iter->slots = slots; + + /* + * Find the so called "upper bound" of a key - the first node that has + * its key strictly greater than the searched one (the start gfn in our case). + */ + iter->node = NULL; + for (tmp = slots->gfn_tree.rb_node; tmp; ) { + slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]); + if (start < slot->base_gfn) { + iter->node = tmp; + tmp = tmp->rb_left; + } else { + tmp = tmp->rb_right; + } + } + + /* + * Find the slot with the lowest gfn that can possibly intersect with + * the range, so we'll ideally have slot start <= range start + */ + if (iter->node) { + /* + * A NULL previous node means that the very first slot + * already has a higher start gfn. + * In this case slot start > range start. + */ + tmp = rb_prev(iter->node); + if (tmp) + iter->node = tmp; + } else { + /* a NULL node below means no slots */ + iter->node = rb_last(&slots->gfn_tree); + } + + if (iter->node) { + iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); + + /* + * It is possible in the slot start < range start case that the + * found slot ends before or at range start (slot end <= range start) + * and so it does not overlap the requested range. + * + * In such non-overlapping case the next slot (if it exists) will + * already have slot start > range start, otherwise the logic above + * would have found it instead of the current slot. + */ + if (iter->slot->base_gfn + iter->slot->npages <= start) + kvm_memslot_iter_next(iter); + } +} + +static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) +{ + if (!iter->node) + return false; + + /* + * If this slot starts beyond or at the end of the range so does + * every next one + */ + return iter->slot->base_gfn < end; +} + +/* Iterate over each memslot at least partially intersecting [start, end) range */ +#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \ + for (kvm_memslot_iter_start(iter, slots, start); \ + kvm_memslot_iter_is_valid(iter, end); \ + kvm_memslot_iter_next(iter)) + /* * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: * - create a new memory slot @@ -833,11 +979,10 @@ int __kvm_set_memory_region(struct kvm *kvm, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); @@ -863,9 +1008,9 @@ void kvm_set_page_accessed(struct page *page); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); -kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, +kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable, hva_t *hva); @@ -942,7 +1087,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); -void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn); +void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); @@ -966,10 +1111,109 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); +/** + * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a + * given guest physical address. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @vcpu: vCPU to be used for marking pages dirty and to be woken on + * invalidation. + * @guest_uses_pa: indicates that the resulting host physical PFN is used while + * @vcpu is IN_GUEST_MODE so invalidations should wake it. + * @kernel_map: requests a kernel virtual mapping (kmap / memremap). + * @gpa: guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: 0 for success. + * -EINVAL for a mapping which would cross a page boundary. + * -EFAULT for an untranslatable guest physical address. + * + * This primes a gfn_to_pfn_cache and links it into the @kvm's list for + * invalidations to be processed. Invalidation callbacks to @vcpu using + * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM + * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check() + * to ensure that the cache is valid before accessing the target page. + */ +int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + struct kvm_vcpu *vcpu, bool guest_uses_pa, + bool kernel_map, gpa_t gpa, unsigned long len, + bool dirty); + +/** + * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @gpa: current guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: %true if the cache is still valid and the address matches. + * %false if the cache is not valid. + * + * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock + * while calling this function, and then continue to hold the lock until the + * access is complete. + * + * Callers in IN_GUEST_MODE may do so without locking, although they should + * still hold a read lock on kvm->scru for the memslot checks. + */ +bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len); + +/** + * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @gpa: updated guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: 0 for success. + * -EINVAL for a mapping which would cross a page boundary. + * -EFAULT for an untranslatable guest physical address. + * + * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful + * returm from this function does not mean the page can be immediately + * accessed because it may have raced with an invalidation. Callers must + * still lock and check the cache status, as this function does not return + * with the lock still held to permit access. + */ +int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len, bool dirty); + +/** + * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * + * This unmaps the referenced page and marks it dirty, if appropriate. The + * cache is left in the invalid state but at least the mapping from GPA to + * userspace HVA will remain cached and can be reused on a subsequent + * refresh. + */ +void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); + +/** + * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * + * This removes a cache from the @kvm's list to be processed on MMU notifier + * invocation. + */ +void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); + void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); -void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_vcpu_halt(struct kvm_vcpu *vcpu); +bool kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); @@ -1152,6 +1396,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) #endif } +/* + * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns + * true if the vCPU was blocking and was awakened, false otherwise. + */ +static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) +{ + return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); +} + +static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) +{ + return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)); +} + #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED /* * returns true if the virtual interrupt controller is initialized and @@ -1184,7 +1442,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); bool kvm_is_reserved_pfn(kvm_pfn_t pfn); bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); -bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; @@ -1215,25 +1472,15 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* - * Returns a pointer to the memslot at slot_index if it contains gfn. + * Returns a pointer to the memslot if it contains gfn. * Otherwise returns NULL. */ static inline struct kvm_memory_slot * -try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) +try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { - struct kvm_memory_slot *slot; - - if (slot_index < 0 || slot_index >= slots->used_slots) + if (!slot) return NULL; - /* - * slot_index can come from vcpu->last_used_slot which is not kept - * in sync with userspace-controllable memslot deletion. So use nospec - * to prevent the CPU from speculating past the end of memslots[]. - */ - slot_index = array_index_nospec(slot_index, slots->used_slots); - slot = &slots->memslots[slot_index]; - if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) return slot; else @@ -1241,63 +1488,63 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) } /* - * Returns a pointer to the memslot that contains gfn and records the index of - * the slot in index. Otherwise returns NULL. + * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. * - * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! + * With "approx" set returns the memslot also when the address falls + * in a hole. In that case one of the memslots bordering the hole is + * returned. */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) +search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) { - int start = 0, end = slots->used_slots; - struct kvm_memory_slot *memslots = slots->memslots; struct kvm_memory_slot *slot; - - if (unlikely(!slots->used_slots)) - return NULL; - - while (start < end) { - int slot = start + (end - start) / 2; - - if (gfn >= memslots[slot].base_gfn) - end = slot; - else - start = slot + 1; - } - - slot = try_get_memslot(slots, start, gfn); - if (slot) { - *index = start; - return slot; + struct rb_node *node; + int idx = slots->node_idx; + + slot = NULL; + for (node = slots->gfn_tree.rb_node; node; ) { + slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); + if (gfn >= slot->base_gfn) { + if (gfn < slot->base_gfn + slot->npages) + return slot; + node = node->rb_right; + } else + node = node->rb_left; } - return NULL; + return approx ? slot : NULL; } -/* - * __gfn_to_memslot() and its descendants are here because it is called from - * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot() - * itself isn't here as an inline because that would bloat other code too much. - */ static inline struct kvm_memory_slot * -__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) { struct kvm_memory_slot *slot; - int slot_index = atomic_read(&slots->last_used_slot); - slot = try_get_memslot(slots, slot_index, gfn); + slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); + slot = try_get_memslot(slot, gfn); if (slot) return slot; - slot = search_memslots(slots, gfn, &slot_index); + slot = search_memslots(slots, gfn, approx); if (slot) { - atomic_set(&slots->last_used_slot, slot_index); + atomic_long_set(&slots->last_used_slot, (unsigned long)slot); return slot; } return NULL; } +/* + * __gfn_to_memslot() and its descendants are here to allow arch code to inline + * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline + * because that would bloat other code too much. + */ +static inline struct kvm_memory_slot * +__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +{ + return ____gfn_to_memslot(slots, gfn, false); +} + static inline unsigned long __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { @@ -1473,7 +1720,8 @@ struct _kvm_stats_desc { STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ HALT_POLL_HIST_COUNT), \ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ - HALT_POLL_HIST_COUNT) + HALT_POLL_HIST_COUNT), \ + STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking) extern struct dentry *kvm_debugfs_dir; |