summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c40
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/mmio.c6
-rw-r--r--virt/kvm/arm/mmu.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c2
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/kvm_main.c52
8 files changed, 60 insertions, 66 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index f9555b1e7f15..cc29a8148328 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
struct arch_timer_context *vtimer;
+ u32 cnt_ctl;
- if (!vcpu) {
- pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
- return IRQ_NONE;
- }
- vtimer = vcpu_vtimer(vcpu);
+ /*
+ * We may see a timer interrupt after vcpu_put() has been called which
+ * sets the CPU's vcpu pointer to NULL, because even though the timer
+ * has been disabled in vtimer_save_state(), the hardware interrupt
+ * signal may not have been retired from the interrupt controller yet.
+ */
+ if (!vcpu)
+ return IRQ_HANDLED;
+ vtimer = vcpu_vtimer(vcpu);
if (!vtimer->irq.level) {
- vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
- if (kvm_timer_irq_can_fire(vtimer))
+ cnt_ctl = read_sysreg_el0(cntv_ctl);
+ cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+ ARCH_TIMER_CTRL_IT_MASK;
+ if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
kvm_timer_update_irq(vcpu, true, vtimer);
}
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
+ isb();
vtimer->loaded = false;
out:
@@ -720,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
return 0;
}
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
{
struct arch_timer_kvm_info *info;
int err;
@@ -756,10 +764,13 @@ int kvm_timer_hyp_init(void)
return err;
}
- err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
- if (err) {
- kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
- goto out_free_irq;
+ if (has_gic) {
+ err = irq_set_vcpu_affinity(host_vtimer_irq,
+ kvm_get_running_vcpus());
+ if (err) {
+ kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+ goto out_free_irq;
+ }
}
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -835,10 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
no_vgic:
preempt_disable();
timer->enabled = 1;
- if (!irqchip_in_kernel(vcpu->kvm))
- kvm_timer_vcpu_load_user(vcpu);
- else
- kvm_timer_vcpu_load_vgic(vcpu);
+ kvm_timer_vcpu_load(vcpu);
preempt_enable();
return 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index af3e98fc377e..7e3941f2ecde 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -1322,7 +1322,7 @@ static int init_subsystems(void)
/*
* Init HYP architected timer support
*/
- err = kvm_timer_hyp_init();
+ err = kvm_timer_hyp_init(vgic_present);
if (err)
goto out;
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index b6e715fd3c90..dac7ceb1a677 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
- data);
+ &data);
data = vcpu_data_host_to_guest(vcpu, data, len);
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
len);
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
kvm_mmio_write_buf(data_buf, len, data);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf);
} else {
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
- fault_ipa, 0);
+ fault_ipa, NULL);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 876caf531d32..f8eaf86b740a 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
*/
void free_hyp_pgds(void)
{
- unsigned long addr;
-
mutex_lock(&kvm_hyp_pgd_mutex);
if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
if (hyp_pgd) {
unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
- for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+ (uintptr_t)high_memory - PAGE_OFFSET);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+ VMALLOC_END - VMALLOC_START);
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL;
@@ -1312,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (is_vm_hugetlb_page(vma) && !logging_active) {
+ if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
hugetlb = true;
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
} else {
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 62310122ee78..743ca5cb05ef 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
- ret = vgic_v4_init(kvm);
- if (ret)
- goto out;
+ if (vgic_has_its(kvm)) {
+ ret = vgic_v4_init(kvm);
+ if (ret)
+ goto out;
+ }
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_enable(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 4a37292855bc..bc4265154bac 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
struct kvm_vcpu *vcpu;
int i, nr_vcpus, ret;
- if (!vgic_supports_direct_msis(kvm))
+ if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
if (dist->its_vm.vpes)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f2ac53ab8243..a334399fafec 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -188,7 +188,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct kvm_kernel_irqfd *irqfd =
container_of(wait, struct kvm_kernel_irqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
unsigned seq;
@@ -287,7 +287,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
- unsigned int events;
+ __poll_t events;
int idx;
if (!kvm_arch_intc_initialized(kvm))
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 210bf820385a..001085b611ad 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -476,6 +476,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
@@ -1322,17 +1323,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
-static int get_user_page_nowait(unsigned long start, int write,
- struct page **page)
-{
- int flags = FOLL_NOWAIT | FOLL_HWPOISON;
-
- if (write)
- flags |= FOLL_WRITE;
-
- return get_user_pages(start, 1, flags, page, NULL);
-}
-
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
@@ -1381,7 +1371,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
- struct page *page[1];
+ unsigned int flags = FOLL_HWPOISON;
+ struct page *page;
int npages = 0;
might_sleep();
@@ -1389,35 +1380,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (writable)
*writable = write_fault;
- if (async) {
- down_read(&current->mm->mmap_sem);
- npages = get_user_page_nowait(addr, write_fault, page);
- up_read(&current->mm->mmap_sem);
- } else {
- unsigned int flags = FOLL_HWPOISON;
-
- if (write_fault)
- flags |= FOLL_WRITE;
+ if (write_fault)
+ flags |= FOLL_WRITE;
+ if (async)
+ flags |= FOLL_NOWAIT;
- npages = get_user_pages_unlocked(addr, 1, page, flags);
- }
+ npages = get_user_pages_unlocked(addr, 1, &page, flags);
if (npages != 1)
return npages;
/* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) {
- struct page *wpage[1];
+ struct page *wpage;
- npages = __get_user_pages_fast(addr, 1, 1, wpage);
- if (npages == 1) {
+ if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
*writable = true;
- put_page(page[0]);
- page[0] = wpage[0];
+ put_page(page);
+ page = wpage;
}
-
- npages = 1;
}
- *pfn = page_to_pfn(page[0]);
+ *pfn = page_to_pfn(page);
return npages;
}
@@ -4037,8 +4019,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
- kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
- SLAB_ACCOUNT, NULL);
+ kvm_vcpu_cache =
+ kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
+ SLAB_ACCOUNT,
+ offsetof(struct kvm_vcpu, arch),
+ sizeof_field(struct kvm_vcpu, arch),
+ NULL);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto out_free_3;