diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 84 |
1 files changed, 66 insertions, 18 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 60e5e4612b0b..5225052aebc1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -5,7 +5,7 @@ * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. - * Copyright 2010 Red Hat, Inc. and/or its affilates. + * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> @@ -705,14 +705,12 @@ skip_lpage: if (r) goto out_free; -#ifdef CONFIG_DMAR /* map the pages in iommu page table */ if (npages) { r = kvm_iommu_map_pages(kvm, &new); if (r) goto out_free; } -#endif r = -ENOMEM; slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); @@ -927,35 +925,46 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) return memslot - slots->memslots; } -static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) -{ - return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; -} - -unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) +static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, + gfn_t *nr_pages) { struct kvm_memory_slot *slot; slot = gfn_to_memslot(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); + + if (nr_pages) + *nr_pages = slot->npages - (gfn - slot->base_gfn); + return gfn_to_hva_memslot(slot, gfn); } + +unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) +{ + return gfn_to_hva_many(kvm, gfn, NULL); +} EXPORT_SYMBOL_GPL(gfn_to_hva); -static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) +static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic) { struct page *page[1]; int npages; pfn_t pfn; - might_sleep(); - - npages = get_user_pages_fast(addr, 1, 1, page); + if (atomic) + npages = __get_user_pages_fast(addr, 1, 1, page); + else { + might_sleep(); + npages = get_user_pages_fast(addr, 1, 1, page); + } if (unlikely(npages != 1)) { struct vm_area_struct *vma; + if (atomic) + goto return_fault_page; + down_read(¤t->mm->mmap_sem); if (is_hwpoison_address(addr)) { up_read(¤t->mm->mmap_sem); @@ -968,6 +977,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) if (vma == NULL || addr < vma->vm_start || !(vma->vm_flags & VM_PFNMAP)) { up_read(¤t->mm->mmap_sem); +return_fault_page: get_page(fault_page); return page_to_pfn(fault_page); } @@ -981,7 +991,13 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) return pfn; } -pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) +pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) +{ + return hva_to_pfn(kvm, addr, true); +} +EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); + +static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic) { unsigned long addr; @@ -991,7 +1007,18 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) return page_to_pfn(bad_page); } - return hva_to_pfn(kvm, addr); + return hva_to_pfn(kvm, addr, atomic); +} + +pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) +{ + return __gfn_to_pfn(kvm, gfn, true); +} +EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); + +pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) +{ + return __gfn_to_pfn(kvm, gfn, false); } EXPORT_SYMBOL_GPL(gfn_to_pfn); @@ -999,9 +1026,26 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { unsigned long addr = gfn_to_hva_memslot(slot, gfn); - return hva_to_pfn(kvm, addr); + return hva_to_pfn(kvm, addr, false); } +int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, + int nr_pages) +{ + unsigned long addr; + gfn_t entry; + + addr = gfn_to_hva_many(kvm, gfn, &entry); + if (kvm_is_error_hva(addr)) + return -1; + + if (entry < nr_pages) + return 0; + + return __get_user_pages_fast(addr, nr_pages, 1, pages); +} +EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); + struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) { pfn_t pfn; @@ -1964,7 +2008,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, case CPU_STARTING: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); + spin_lock(&kvm_lock); hardware_enable(NULL); + spin_unlock(&kvm_lock); break; } return NOTIFY_OK; @@ -1977,7 +2023,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void) /* spin while reset goes on */ local_irq_enable(); while (true) - ; + cpu_relax(); } /* Fault while not rebooting. We want the trace. */ BUG(); @@ -2171,8 +2217,10 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state) static int kvm_resume(struct sys_device *dev) { - if (kvm_usage_count) + if (kvm_usage_count) { + WARN_ON(spin_is_locked(&kvm_lock)); hardware_enable(NULL); + } return 0; } |