summaryrefslogtreecommitdiff
path: root/virt/kvm/arm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r--virt/kvm/arm/arm.c6
-rw-r--r--virt/kvm/arm/mmu.c23
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c19
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c20
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c23
-rw-r--r--virt/kvm/arm/vgic/vgic.c4
-rw-r--r--virt/kvm/arm/vgic/vgic.h10
7 files changed, 91 insertions, 14 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9f6f522a4bfc..3417e184c8e1 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -208,9 +208,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
r = 1;
break;
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
break;
@@ -1143,6 +1140,9 @@ static void cpu_hyp_reinit(void)
} else {
cpu_init_hyp_mode(NULL);
}
+
+ if (vgic_present)
+ kvm_vgic_init_cpu_hardware();
}
static void _kvm_arch_hardware_enable(void *discard)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index efb4335aa5c4..313ee646480f 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
+ assert_spin_locked(&kvm->mmu_lock);
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
+ /*
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+ if (next != end)
+ cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
+ down_read(&current->mm->mmap_sem);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+ up_read(&current->mm->mmap_sem);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
if (kvm->arch.pgd == NULL)
return;
+ spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ spin_unlock(&kvm->mmu_lock);
+
/* Free the HW pgd, one page at a time */
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
kvm->arch.pgd = NULL;
@@ -1791,6 +1803,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
+ down_read(&current->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -1834,8 +1847,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
- return -EINVAL;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
@@ -1847,7 +1862,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
- return ret;
+ goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
@@ -1855,6 +1870,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+out:
+ up_read(&current->mm->mmap_sem);
return ret;
}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 962bb577a1a0..dc68e2e424ab 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -437,6 +437,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
}
/**
+ * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
+ *
+ * For a specific CPU, initialize the GIC VE hardware.
+ */
+void kvm_vgic_init_cpu_hardware(void)
+{
+ BUG_ON(preemptible());
+
+ /*
+ * We want to make sure the list registers start out clear so that we
+ * only have the program the used registers.
+ */
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_init_lrs();
+ else
+ kvm_call_hyp(__vgic_v3_init_lrs);
+}
+
+/**
* kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
* according to the host GIC model. Accordingly calls either
* vgic_v2/v3_probe which registers the KVM_DEVICE that can be
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a3ad7ff95c9b..0a4283ed9aa7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
val = vmcr.ctlr;
break;
case GIC_CPU_PRIMASK:
- val = vmcr.pmr;
+ /*
+ * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+ * the PMR field as GICH_VMCR.VMPriMask rather than
+ * GICC_PMR.Priority, so we expose the upper five bits of
+ * priority mask to userspace using the lower bits in the
+ * unsigned long.
+ */
+ val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
+ GICV_PMR_PRIORITY_SHIFT;
break;
case GIC_CPU_BINPOINT:
val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
vmcr.ctlr = val;
break;
case GIC_CPU_PRIMASK:
- vmcr.pmr = val;
+ /*
+ * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+ * the PMR field as GICH_VMCR.VMPriMask rather than
+ * GICC_PMR.Priority, so we expose the upper five bits of
+ * priority mask to userspace using the lower bits in the
+ * unsigned long.
+ */
+ vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
+ GICV_PMR_PRIORITY_MASK;
break;
case GIC_CPU_BINPOINT:
vmcr.bpr = val;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 025b57d5787e..a65757aab6d3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -22,6 +22,21 @@
#include "vgic.h"
+static inline void vgic_v2_write_lr(int lr, u32 val)
+{
+ void __iomem *base = kvm_vgic_global_state.vctrl_base;
+
+ writel_relaxed(val, base + GICH_LR0 + (lr * 4));
+}
+
+void vgic_v2_init_lrs(void)
+{
+ int i;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
+ vgic_v2_write_lr(i, 0);
+}
+
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -160,8 +175,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
GICH_VMCR_ALIAS_BINPOINT_MASK;
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
GICH_VMCR_BINPOINT_MASK;
- vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
- GICH_VMCR_PRIMASK_MASK;
+ vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
+ GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
cpu_if->vgic_vmcr = vmcr;
}
@@ -179,8 +194,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
GICH_VMCR_ALIAS_BINPOINT_SHIFT;
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
GICH_VMCR_BINPOINT_SHIFT;
- vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
- GICH_VMCR_PRIMASK_SHIFT;
+ vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
+ GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
}
void vgic_v2_enable(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index d40210ae9474..83b24d20ff8f 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -29,7 +29,9 @@
#define DEBUG_SPINLOCK_BUG_ON(p)
#endif
-struct vgic_global __section(.hyp.text) kvm_vgic_global_state = {.gicv3_cpuif = STATIC_KEY_FALSE_INIT,};
+struct vgic_global kvm_vgic_global_state __ro_after_init = {
+ .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
+};
/*
* Locking order is always:
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 8ac739734874..da83e4caa272 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -104,11 +104,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
return irq->pending_latch || irq->line_level;
}
+/*
+ * This struct provides an intermediate representation of the fields contained
+ * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
+ * state to userspace can generate either GICv2 or GICv3 CPU interface
+ * registers regardless of the hardware backed GIC used.
+ */
struct vgic_vmcr {
u32 ctlr;
u32 abpr;
u32 bpr;
- u32 pmr;
+ u32 pmr; /* Priority mask field in the GICC_PMR and
+ * ICC_PMR_EL1 priority field format */
/* Below member variable are valid only for GICv3 */
u32 grpen0;
u32 grpen1;
@@ -152,6 +159,7 @@ int vgic_v2_map_resources(struct kvm *kvm);
int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type);
+void vgic_v2_init_lrs(void);
void vgic_v2_load(struct kvm_vcpu *vcpu);
void vgic_v2_put(struct kvm_vcpu *vcpu);