summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/Kconfig10
-rw-r--r--virt/kvm/arm/arch_timer.c3
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c847
-rw-r--r--virt/kvm/arm/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c1036
-rw-r--r--virt/kvm/arm/vgic-v3.c82
-rw-r--r--virt/kvm/arm/vgic.c1127
-rw-r--r--virt/kvm/arm/vgic.h123
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c194
10 files changed, 2495 insertions, 933 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index fc0c5e603eb4..e2c876d5a03b 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -37,3 +37,13 @@ config HAVE_KVM_CPU_RELAX_INTERCEPT
config KVM_VFIO
bool
+
+config HAVE_KVM_ARCH_TLB_FLUSH_ALL
+ bool
+
+config KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ bool
+
+config KVM_COMPAT
+ def_bool y
+ depends on COMPAT && !S390
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 1c0772b340d8..6e54f3542126 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -152,7 +152,8 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
return;
}
- ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
+ ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
+ &timecounter->frac);
timer_arm(timer, ns);
}
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
new file mode 100644
index 000000000000..19c6210f02cf
--- /dev/null
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -0,0 +1,847 @@
+/*
+ * Contains GICv2 specific emulation code, was in vgic.c before.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+#define GICC_ARCH_VERSION_V2 0x2
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
+static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
+{
+ return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
+}
+
+static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+ u32 word_offset = offset & 3;
+
+ switch (offset & ~3) {
+ case 0: /* GICD_CTLR */
+ reg = vcpu->kvm->arch.vgic.enabled;
+ vgic_reg_access(mmio, &reg, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vcpu->kvm->arch.vgic.enabled = reg & 1;
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ break;
+
+ case 4: /* GICD_TYPER */
+ reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+ reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
+ vgic_reg_access(mmio, &reg, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+
+ case 8: /* GICD_IIDR */
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, &reg, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+}
+
+static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+}
+
+static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+#define GICD_ITARGETSR_SIZE 32
+#define GICD_CPUTARGETS_BITS 8
+#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
+static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+ u32 val = 0;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
+ val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
+
+ return val;
+}
+
+static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i, c;
+ unsigned long *bmap;
+ u32 target;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ /*
+ * Pick the LSB in each byte. This ensures we target exactly
+ * one vcpu per IRQ. If the byte is null, assume we target
+ * CPU0.
+ */
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
+ int shift = i * GICD_CPUTARGETS_BITS;
+
+ target = ffs((val >> shift) & 0xffU);
+ target = target ? (target - 1) : 0;
+ dist->irq_spi_cpu[irq + i] = target;
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
+ if (c == target)
+ set_bit(irq + i, bmap);
+ else
+ clear_bit(irq + i, bmap);
+ }
+ }
+}
+
+static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ /* We treat the banked interrupts targets as read-only */
+ if (offset < 32) {
+ u32 roreg;
+
+ roreg = 1 << vcpu->vcpu_id;
+ roreg |= roreg << 8;
+ roreg |= roreg << 16;
+
+ vgic_reg_access(mmio, &roreg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 *reg;
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_dispatch_sgi(vcpu, reg);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
+static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3);
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg = 0;
+
+ /* Copy source SGIs from distributor side */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
+
+ reg |= ((u32)sources) << (8 * (sgi - min_sgi));
+ }
+
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, bool set)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3);
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg;
+ bool updated = false;
+
+ reg = mmio_data_read(mmio, ~0);
+
+ /* Clear pending SGIs on the distributor */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 mask = reg >> (8 * (sgi - min_sgi));
+ u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
+
+ if (set) {
+ if ((*src & mask) != mask)
+ updated = true;
+ *src |= mask;
+ } else {
+ if (*src & mask)
+ updated = true;
+ *src &= ~mask;
+ }
+ }
+
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+
+ return updated;
+}
+
+static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
+}
+
+static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
+}
+
+static const struct kvm_mmio_range vgic_dist_ranges[] = {
+ {
+ .base = GIC_DIST_CTRL,
+ .len = 12,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc,
+ },
+ {
+ .base = GIC_DIST_IGROUP,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ENABLE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg,
+ },
+ {
+ .base = GIC_DIST_ENABLE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_PRI,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg,
+ },
+ {
+ .base = GIC_DIST_TARGET,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_target_reg,
+ },
+ {
+ .base = GIC_DIST_CONFIG,
+ .len = VGIC_MAX_IRQS / 4,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg,
+ },
+ {
+ .base = GIC_DIST_SOFTINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_sgi_reg,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_CLEAR,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_clear,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_SET,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_set,
+ },
+ {}
+};
+
+static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
+
+ if (!is_in_range(mmio->phys_addr, mmio->len, base,
+ KVM_VGIC_V2_DIST_SIZE))
+ return false;
+
+ /* GICv2 does not support accesses wider than 32 bits */
+ if (mmio->len > 4) {
+ kvm_inject_dabt(vcpu, mmio->phys_addr);
+ return true;
+ }
+
+ return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
+}
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int nrcpus = atomic_read(&kvm->online_vcpus);
+ u8 target_cpus;
+ int sgi, mode, c, vcpu_id;
+
+ vcpu_id = vcpu->vcpu_id;
+
+ sgi = reg & 0xf;
+ target_cpus = (reg >> 16) & 0xff;
+ mode = (reg >> 24) & 3;
+
+ switch (mode) {
+ case 0:
+ if (!target_cpus)
+ return;
+ break;
+
+ case 1:
+ target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
+ break;
+
+ case 2:
+ target_cpus = 1 << vcpu_id;
+ break;
+ }
+
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (target_cpus & 1) {
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set_pending(vcpu, sgi);
+ *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n",
+ sgi, vcpu_id, c);
+ }
+
+ target_cpus >>= 1;
+ }
+}
+
+static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long sources;
+ int vcpu_id = vcpu->vcpu_id;
+ int c;
+
+ sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
+
+ for_each_set_bit(c, &sources, dist->nr_cpus) {
+ if (vgic_queue_irq(vcpu, c, irq))
+ clear_bit(c, &sources);
+ }
+
+ *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
+
+ /*
+ * If the sources bitmap has been cleared it means that we
+ * could queue all the SGIs onto link registers (see the
+ * clear_bit above), and therefore we are done with them in
+ * our emulated gic and can get rid of them.
+ */
+ if (!sources) {
+ vgic_dist_irq_clear_pending(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
+ * @kvm: pointer to the kvm struct
+ *
+ * Map the virtual CPU interface into the VM before running any VCPUs. We
+ * can't do this at creation time, because user space must first set the
+ * virtual CPU interface address in the guest physical address space.
+ */
+static int vgic_v2_map_resources(struct kvm *kvm,
+ const struct vgic_params *params)
+{
+ int ret = 0;
+
+ if (!irqchip_in_kernel(kvm))
+ return 0;
+
+ mutex_lock(&kvm->lock);
+
+ if (vgic_ready(kvm))
+ goto out;
+
+ if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
+ kvm_err("Need to set vgic cpu and dist addresses first\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * Initialize the vgic if this hasn't already been done on demand by
+ * accessing the vgic state from userspace.
+ */
+ ret = vgic_init(kvm);
+ if (ret) {
+ kvm_err("Unable to allocate maps\n");
+ goto out;
+ }
+
+ ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
+ params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
+ true);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ goto out;
+ }
+
+ kvm->arch.vgic.ready = true;
+out:
+ if (ret)
+ kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
+}
+
+static int vgic_v2_init_model(struct kvm *kvm)
+{
+ int i;
+
+ for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+ return 0;
+}
+
+void vgic_v2_init_emulation(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
+ dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
+ dist->vm_ops.init_model = vgic_v2_init_model;
+ dist->vm_ops.map_resources = vgic_v2_map_resources;
+
+ kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
+}
+
+static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ bool updated = false;
+ struct vgic_vmcr vmcr;
+ u32 *vmcr_field;
+ u32 reg;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+
+ switch (offset & ~0x3) {
+ case GIC_CPU_CTRL:
+ vmcr_field = &vmcr.ctlr;
+ break;
+ case GIC_CPU_PRIMASK:
+ vmcr_field = &vmcr.pmr;
+ break;
+ case GIC_CPU_BINPOINT:
+ vmcr_field = &vmcr.bpr;
+ break;
+ case GIC_CPU_ALIAS_BINPOINT:
+ vmcr_field = &vmcr.abpr;
+ break;
+ default:
+ BUG();
+ }
+
+ if (!mmio->is_write) {
+ reg = *vmcr_field;
+ mmio_data_write(mmio, ~0, reg);
+ } else {
+ reg = mmio_data_read(mmio, ~0);
+ if (reg != *vmcr_field) {
+ *vmcr_field = reg;
+ vgic_set_vmcr(vcpu, &vmcr);
+ updated = true;
+ }
+ }
+ return updated;
+}
+
+static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
+}
+
+static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ if (mmio->is_write)
+ return false;
+
+ /* GICC_IIDR */
+ reg = (PRODUCT_ID_KVM << 20) |
+ (GICC_ARCH_VERSION_V2 << 16) |
+ (IMPLEMENTER_ARM << 0);
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+/*
+ * CPU Interface Register accesses - these are not accessed by the VM, but by
+ * user space for saving and restoring VGIC state.
+ */
+static const struct kvm_mmio_range vgic_cpu_ranges[] = {
+ {
+ .base = GIC_CPU_CTRL,
+ .len = 12,
+ .handle_mmio = handle_cpu_mmio_misc,
+ },
+ {
+ .base = GIC_CPU_ALIAS_BINPOINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_abpr,
+ },
+ {
+ .base = GIC_CPU_ACTIVEPRIO,
+ .len = 16,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_CPU_IDENT,
+ .len = 4,
+ .handle_mmio = handle_cpu_mmio_ident,
+ },
+};
+
+static int vgic_attr_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ u32 *reg, bool is_write)
+{
+ const struct kvm_mmio_range *r = NULL, *ranges;
+ phys_addr_t offset;
+ int ret, cpuid, c;
+ struct kvm_vcpu *vcpu, *tmp_vcpu;
+ struct vgic_dist *vgic;
+ struct kvm_exit_mmio mmio;
+
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+ mutex_lock(&dev->kvm->lock);
+
+ ret = vgic_init(dev->kvm);
+ if (ret)
+ goto out;
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ vgic = &dev->kvm->arch.vgic;
+
+ mmio.len = 4;
+ mmio.is_write = is_write;
+ if (is_write)
+ mmio_data_write(&mmio, ~0, *reg);
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ mmio.phys_addr = vgic->vgic_dist_base + offset;
+ ranges = vgic_dist_ranges;
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ mmio.phys_addr = vgic->vgic_cpu_base + offset;
+ ranges = vgic_cpu_ranges;
+ break;
+ default:
+ BUG();
+ }
+ r = vgic_find_range(ranges, &mmio, offset);
+
+ if (unlikely(!r || !r->handle_mmio)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+
+ spin_lock(&vgic->lock);
+
+ /*
+ * Ensure that no other VCPU is running by checking the vcpu->cpu
+ * field. If no other VPCUs are running we can safely access the VGIC
+ * state, because even if another VPU is run after this point, that
+ * VCPU will not touch the vgic state, because it will block on
+ * getting the vgic->lock in kvm_vgic_sync_hwstate().
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+ if (unlikely(tmp_vcpu->cpu != -1)) {
+ ret = -EBUSY;
+ goto out_vgic_unlock;
+ }
+ }
+
+ /*
+ * Move all pending IRQs from the LRs on all VCPUs so the pending
+ * state can be properly represented in the register state accessible
+ * through this API.
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
+ vgic_unqueue_irqs(tmp_vcpu);
+
+ offset -= r->base;
+ r->handle_mmio(vcpu, &mmio, offset);
+
+ if (!is_write)
+ *reg = mmio_data_read(&mmio, ~0);
+
+ ret = 0;
+out_vgic_unlock:
+ spin_unlock(&vgic->lock);
+out:
+ mutex_unlock(&dev->kvm->lock);
+ return ret;
+}
+
+static int vgic_v2_create(struct kvm_device *dev, u32 type)
+{
+ return kvm_vgic_create(dev->kvm, type);
+}
+
+static void vgic_v2_destroy(struct kvm_device *dev)
+{
+ kfree(dev);
+}
+
+static int vgic_v2_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_set_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg;
+
+ if (get_user(reg, uaddr))
+ return -EFAULT;
+
+ return vgic_attr_regs_access(dev, attr, &reg, true);
+ }
+
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v2_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_get_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg = 0;
+
+ ret = vgic_attr_regs_access(dev, attr, &reg, false);
+ if (ret)
+ return ret;
+ return put_user(reg, uaddr);
+ }
+
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v2_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ phys_addr_t offset;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_dist_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_cpu_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ return 0;
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return 0;
+ }
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+ .name = "kvm-arm-vgic-v2",
+ .create = vgic_v2_create,
+ .destroy = vgic_v2_destroy,
+ .set_attr = vgic_v2_set_attr,
+ .get_attr = vgic_v2_get_attr,
+ .has_attr = vgic_v2_has_attr,
+};
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 2935405ad22f..a0a7b5d1a070 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -229,12 +229,16 @@ int vgic_v2_probe(struct device_node *vgic_node,
goto out_unmap;
}
+ vgic->can_emulate_gicv2 = true;
+ kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2);
+
vgic->vcpu_base = vcpu_res.start;
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
vctrl_res.start, vgic->maint_irq);
vgic->type = VGIC_V2;
+ vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
*ops = &vgic_v2_ops;
*params = vgic;
goto out;
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
new file mode 100644
index 000000000000..b3f154631515
--- /dev/null
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -0,0 +1,1036 @@
+/*
+ * GICv3 distributor and redistributor emulation
+ *
+ * GICv3 emulation is currently only supported on a GICv3 host (because
+ * we rely on the hardware's CPU interface virtualization support), but
+ * supports both hardware with or without the optional GICv2 backwards
+ * compatibility features.
+ *
+ * Limitations of the emulation:
+ * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
+ * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
+ * - We do not support the message based interrupts (MBIs) triggered by
+ * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
+ * - We do not support the (optional) backwards compatibility feature.
+ * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
+ * the compatiblity feature, you can use a GICv2 in the guest, though.
+ * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
+ * - Priorities are not emulated (same as the GICv2 emulation). Linux
+ * as a guest is fine with this, because it does not use priorities.
+ * - We only support Group1 interrupts. Again Linux uses only those.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Andre Przywara <andre.przywara@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <kvm/arm_vgic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg = 0xffffffff;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg = 0;
+
+ /*
+ * Force ARE and DS to 1, the guest cannot change this.
+ * For the time being we only support Group1 interrupts.
+ */
+ if (vcpu->kvm->arch.vgic.enabled)
+ reg = GICD_CTLR_ENABLE_SS_G1;
+ reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ if (reg & GICD_CTLR_ENABLE_SS_G0)
+ kvm_info("guest tried to enable unsupported Group0 interrupts\n");
+ vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * As this implementation does not provide compatibility
+ * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
+ * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
+ * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
+ */
+#define INTERRUPT_ID_BITS 10
+static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+
+ reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
+
+ reg |= (INTERRUPT_ID_BITS - 1) << 19;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+/*
+ * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
+ * when we store the target MPIDR written by the guest.
+ */
+static u32 compress_mpidr(unsigned long mpidr)
+{
+ u32 ret;
+
+ ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
+
+ return ret;
+}
+
+static unsigned long uncompress_mpidr(u32 value)
+{
+ unsigned long mpidr;
+
+ mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
+ mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
+ mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
+ mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
+
+ return mpidr;
+}
+
+/*
+ * Lookup the given MPIDR value to get the vcpu_id (if there is one)
+ * and store that in the irq_spi_cpu[] array.
+ * This limits the number of VCPUs to 255 for now, extending the data
+ * type (or storing kvm_vcpu pointers) should lift the limit.
+ * Store the original MPIDR value in an extra array to support read-as-written.
+ * Unallocated MPIDRs are translated to a special value and caught
+ * before any array accesses.
+ */
+static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int spi;
+ u32 reg;
+ int vcpu_id;
+ unsigned long *bmap, mpidr;
+
+ /*
+ * The upper 32 bits of each 64 bit register are zero,
+ * as we don't support Aff3.
+ */
+ if ((offset & 4)) {
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ /* This region only covers SPIs, so no handling of private IRQs here. */
+ spi = offset / 8;
+
+ /* get the stored MPIDR for this IRQ */
+ mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
+ reg = mpidr;
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+
+ if (!mmio->is_write)
+ return false;
+
+ /*
+ * Now clear the currently assigned vCPU from the map, making room
+ * for the new one to be written below
+ */
+ vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ __clear_bit(spi, bmap);
+ }
+
+ dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
+ vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
+
+ /*
+ * The spec says that non-existent MPIDR values should not be
+ * forwarded to any existent (v)CPU, but should be able to become
+ * pending anyway. We simply keep the irq_spi_target[] array empty, so
+ * the interrupt will never be injected.
+ * irq_spi_cpu[irq] gets a magic value in this case.
+ */
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ dist->irq_spi_cpu[spi] = vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ __set_bit(spi, bmap);
+ } else {
+ dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
+ }
+
+ vgic_update_state(kvm);
+
+ return true;
+}
+
+/*
+ * We should be careful about promising too much when a guest reads
+ * this register. Don't claim to be like any hardware implementation,
+ * but just report the GIC as version 3 - which is what a Linux guest
+ * would check.
+ */
+static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg = 0;
+
+ switch (offset + GICD_IDREGS) {
+ case GICD_PIDR2:
+ reg = 0x3b;
+ break;
+ }
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
+ {
+ .base = GICD_CTLR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_ctlr,
+ },
+ {
+ .base = GICD_TYPER,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_typer,
+ },
+ {
+ .base = GICD_IIDR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_iidr,
+ },
+ {
+ /* this register is optional, it is RAZ/WI if not implemented */
+ .base = GICD_STATUSR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this write only register is WI when TYPER.MBIS=0 */
+ .base = GICD_SETSPI_NSR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this write only register is WI when TYPER.MBIS=0 */
+ .base = GICD_CLRSPI_NSR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_SETSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_CLRSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IGROUPR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_rao_wi,
+ },
+ {
+ .base = GICD_ISENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_dist,
+ },
+ {
+ .base = GICD_ICENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_dist,
+ },
+ {
+ .base = GICD_ISPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_dist,
+ },
+ {
+ .base = GICD_ICPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_dist,
+ },
+ {
+ .base = GICD_ISACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IPRIORITYR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_dist,
+ },
+ {
+ /* TARGETSRn is RES0 when ARE=1 */
+ .base = GICD_ITARGETSR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICFGR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_dist,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_IGRPMODR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_NSACR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when ARE=1 */
+ .base = GICD_SGIR,
+ .len = 0x04,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when ARE=1 */
+ .base = GICD_CPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* this is RAZ/WI when ARE=1 */
+ .base = GICD_SPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IROUTER + 0x100,
+ .len = 0x1ee0,
+ .bits_per_irq = 64,
+ .handle_mmio = handle_mmio_route_reg,
+ },
+ {
+ .base = GICD_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id);
+}
+
+static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
+ redist_vcpu->vcpu_id);
+}
+
+static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+ u32 *reg;
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ redist_vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+
+ u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ redist_vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = {
+ {
+ .base = GICR_IGROUPR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_rao_wi,
+ },
+ {
+ .base = GICR_ISENABLER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_redist,
+ },
+ {
+ .base = GICR_ICENABLER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_redist,
+ },
+ {
+ .base = GICR_ISPENDR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_redist,
+ },
+ {
+ .base = GICR_ICPENDR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_redist,
+ },
+ {
+ .base = GICR_ISACTIVER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_ICACTIVER0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IPRIORITYR0,
+ .len = 0x20,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_redist,
+ },
+ {
+ .base = GICR_ICFGR0,
+ .len = 0x08,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_redist,
+ },
+ {
+ .base = GICR_IGRPMODR0,
+ .len = 0x04,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_NSACR,
+ .len = 0x04,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {},
+};
+
+static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ /* since we don't support LPIs, this register is zero for now */
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+ u64 mpidr;
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+ int target_vcpu_id = redist_vcpu->vcpu_id;
+
+ /* the upper 32 bits contain the affinity value */
+ if ((offset & ~3) == 4) {
+ mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
+ reg = compress_mpidr(mpidr);
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = redist_vcpu->vcpu_id << 8;
+ if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
+ reg |= GICR_TYPER_LAST;
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static const struct kvm_mmio_range vgic_redist_ranges[] = {
+ {
+ .base = GICR_CTLR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_ctlr_redist,
+ },
+ {
+ .base = GICR_TYPER,
+ .len = 0x08,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_typer_redist,
+ },
+ {
+ .base = GICR_IIDR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_iidr,
+ },
+ {
+ .base = GICR_WAKER,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+/*
+ * This function splits accesses between the distributor and the two
+ * redistributor parts (private/SPI). As each redistributor is accessible
+ * from any CPU, we have to determine the affected VCPU by taking the faulting
+ * address into account. We then pass this VCPU to the handler function via
+ * the private parameter.
+ */
+#define SGI_BASE_OFFSET SZ_64K
+static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long dbase = dist->vgic_dist_base;
+ unsigned long rdbase = dist->vgic_redist_base;
+ int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
+ int vcpu_id;
+ const struct kvm_mmio_range *mmio_range;
+
+ if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
+ return vgic_handle_mmio_range(vcpu, run, mmio,
+ vgic_v3_dist_ranges, dbase);
+ }
+
+ if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
+ GIC_V3_REDIST_SIZE * nrcpus))
+ return false;
+
+ vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
+ rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
+ mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
+
+ if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
+ rdbase += SGI_BASE_OFFSET;
+ mmio_range = vgic_redist_sgi_ranges;
+ } else {
+ mmio_range = vgic_redist_ranges;
+ }
+ return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
+}
+
+static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ if (vgic_queue_irq(vcpu, 0, irq)) {
+ vgic_dist_irq_clear_pending(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+static int vgic_v3_map_resources(struct kvm *kvm,
+ const struct vgic_params *params)
+{
+ int ret = 0;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ if (!irqchip_in_kernel(kvm))
+ return 0;
+
+ mutex_lock(&kvm->lock);
+
+ if (vgic_ready(kvm))
+ goto out;
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * For a VGICv3 we require the userland to explicitly initialize
+ * the VGIC before we need to use it.
+ */
+ if (!vgic_initialized(kvm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ kvm->arch.vgic.ready = true;
+out:
+ if (ret)
+ kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int vgic_v3_init_model(struct kvm *kvm)
+{
+ int i;
+ u32 mpidr;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
+
+ dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
+ GFP_KERNEL);
+
+ if (!dist->irq_spi_mpidr)
+ return -ENOMEM;
+
+ /* Initialize the target VCPUs for each IRQ to VCPU 0 */
+ mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
+ dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
+ dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
+ vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
+ }
+
+ return 0;
+}
+
+/* GICv3 does not keep track of SGI sources anymore. */
+static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
+{
+}
+
+void vgic_v3_init_emulation(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
+ dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
+ dist->vm_ops.init_model = vgic_v3_init_model;
+ dist->vm_ops.map_resources = vgic_v3_map_resources;
+
+ kvm->arch.max_vcpus = KVM_MAX_VCPUS;
+}
+
+/*
+ * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
+ * generation register ICC_SGI1R_EL1) with a given VCPU.
+ * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
+ * return -1.
+ */
+static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
+{
+ unsigned long affinity;
+ int level0;
+
+ /*
+ * Split the current VCPU's MPIDR into affinity level 0 and the
+ * rest as this is what we have to compare against.
+ */
+ affinity = kvm_vcpu_get_mpidr_aff(vcpu);
+ level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
+ affinity &= ~MPIDR_LEVEL_MASK;
+
+ /* bail out if the upper three levels don't match */
+ if (sgi_aff != affinity)
+ return -1;
+
+ /* Is this VCPU's bit set in the mask ? */
+ if (!(sgi_cpu_mask & BIT(level0)))
+ return -1;
+
+ return level0;
+}
+
+#define SGI_AFFINITY_LEVEL(reg, level) \
+ ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
+ >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+
+/**
+ * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
+ * @vcpu: The VCPU requesting a SGI
+ * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
+ *
+ * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
+ * This will trap in sys_regs.c and call this function.
+ * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
+ * target processors as well as a bitmask of 16 Aff0 CPUs.
+ * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
+ * check for matching ones. If this bit is set, we signal all, but not the
+ * calling VCPU.
+ */
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *c_vcpu;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u16 target_cpus;
+ u64 mpidr;
+ int sgi, c;
+ int vcpu_id = vcpu->vcpu_id;
+ bool broadcast;
+ int updated = 0;
+
+ sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
+ broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+ target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
+ mpidr = SGI_AFFINITY_LEVEL(reg, 3);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
+
+ /*
+ * We take the dist lock here, because we come from the sysregs
+ * code path and not from the MMIO one (which already takes the lock).
+ */
+ spin_lock(&dist->lock);
+
+ /*
+ * We iterate over all VCPUs to find the MPIDRs matching the request.
+ * If we have handled one CPU, we clear it's bit to detect early
+ * if we are already finished. This avoids iterating through all
+ * VCPUs when most of the times we just signal a single VCPU.
+ */
+ kvm_for_each_vcpu(c, c_vcpu, kvm) {
+
+ /* Exit early if we have dealt with all requested CPUs */
+ if (!broadcast && target_cpus == 0)
+ break;
+
+ /* Don't signal the calling VCPU */
+ if (broadcast && c == vcpu_id)
+ continue;
+
+ if (!broadcast) {
+ int level0;
+
+ level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
+ if (level0 == -1)
+ continue;
+
+ /* remove this matching VCPU from the mask */
+ target_cpus &= ~BIT(level0);
+ }
+
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set_pending(c_vcpu, sgi);
+ updated = 1;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
+ }
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+ spin_unlock(&dist->lock);
+ if (updated)
+ vgic_kick_vcpus(vcpu->kvm);
+}
+
+static int vgic_v3_create(struct kvm_device *dev, u32 type)
+{
+ return kvm_vgic_create(dev->kvm, type);
+}
+
+static void vgic_v3_destroy(struct kvm_device *dev)
+{
+ kfree(dev);
+}
+
+static int vgic_v3_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_set_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v3_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_get_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v3_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return -ENXIO;
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ return 0;
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return 0;
+ }
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v3_ops = {
+ .name = "kvm-arm-vgic-v3",
+ .create = vgic_v3_create,
+ .destroy = vgic_v3_destroy,
+ .set_attr = vgic_v3_set_attr,
+ .get_attr = vgic_v3_get_attr,
+ .has_attr = vgic_v3_has_attr,
+};
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 1c2c8eef0599..3a62d8a9a2c6 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -34,6 +34,7 @@
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
+#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
/*
* LRs are stored in reverse order in memory. make sure we index them
@@ -48,12 +49,17 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
struct vgic_lr lr_desc;
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
- lr_desc.irq = val & GICH_LR_VIRTUALID;
- if (lr_desc.irq <= 15)
- lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
else
- lr_desc.source = 0;
- lr_desc.state = 0;
+ lr_desc.irq = val & GICH_LR_VIRTUALID;
+
+ lr_desc.source = 0;
+ if (lr_desc.irq <= 15 &&
+ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
+ lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+
+ lr_desc.state = 0;
if (val & ICH_LR_PENDING_BIT)
lr_desc.state |= LR_STATE_PENDING;
@@ -68,8 +74,20 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
- u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
- lr_desc.irq);
+ u64 lr_val;
+
+ lr_val = lr_desc.irq;
+
+ /*
+ * Currently all guest IRQs are Group1, as Group0 would result
+ * in a FIQ in the guest, which it wouldn't expect.
+ * Eventually we want to make this configurable, so we may revisit
+ * this in the future.
+ */
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ lr_val |= ICH_LR_GROUP;
+ else
+ lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
if (lr_desc.state & LR_STATE_PENDING)
lr_val |= ICH_LR_PENDING_BIT;
@@ -145,15 +163,27 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
static void vgic_v3_enable(struct kvm_vcpu *vcpu)
{
+ struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
/*
* By forcing VMCR to zero, the GIC will restore the binary
* points to their reset values. Anything else resets to zero
* anyway.
*/
- vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
+ vgic_v3->vgic_vmcr = 0;
+
+ /*
+ * If we are emulating a GICv3, we do it in an non-GICv2-compatible
+ * way, so we force SRE to 1 to demonstrate this to the guest.
+ * This goes with the spec allowing the value to be RAO/WI.
+ */
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+ else
+ vgic_v3->vgic_sre = 0;
/* Get the show on the road... */
- vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN;
+ vgic_v3->vgic_hcr = ICH_HCR_EN;
}
static const struct vgic_ops vgic_v3_ops = {
@@ -205,35 +235,37 @@ int vgic_v3_probe(struct device_node *vgic_node,
* maximum of 16 list registers. Just ignore bit 4...
*/
vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
+ vgic->can_emulate_gicv2 = false;
if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
gicv_idx = 1;
gicv_idx += 3; /* Also skip GICD, GICC, GICH */
if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
- kvm_err("Cannot obtain GICV region\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (!PAGE_ALIGNED(vcpu_res.start)) {
- kvm_err("GICV physical address 0x%llx not page aligned\n",
+ kvm_info("GICv3: no GICV resource entry\n");
+ vgic->vcpu_base = 0;
+ } else if (!PAGE_ALIGNED(vcpu_res.start)) {
+ pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)vcpu_res.start);
- ret = -ENXIO;
- goto out;
- }
-
- if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
- kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+ vgic->vcpu_base = 0;
+ } else if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+ pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
(unsigned long long)resource_size(&vcpu_res),
PAGE_SIZE);
- ret = -ENXIO;
- goto out;
+ vgic->vcpu_base = 0;
+ } else {
+ vgic->vcpu_base = vcpu_res.start;
+ vgic->can_emulate_gicv2 = true;
+ kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+ KVM_DEV_TYPE_ARM_VGIC_V2);
}
+ if (vgic->vcpu_base == 0)
+ kvm_info("disabling GICv2 emulation\n");
+ kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3);
- vgic->vcpu_base = vcpu_res.start;
vgic->vctrl_base = NULL;
vgic->type = VGIC_V3;
+ vgic->max_gic_vcpus = KVM_MAX_VCPUS;
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
vcpu_res.start, vgic->maint_irq);
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 03affc7bf453..0cc6ab6005a0 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -75,37 +75,31 @@
* inactive as long as the external input line is held high.
*/
-#define VGIC_ADDR_UNDEF (-1)
-#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
-
-#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
-#define IMPLEMENTER_ARM 0x43b
-#define GICC_ARCH_VERSION_V2 0x2
-
-#define ACCESS_READ_VALUE (1 << 0)
-#define ACCESS_READ_RAZ (0 << 0)
-#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
-#define ACCESS_WRITE_IGNORED (0 << 1)
-#define ACCESS_WRITE_SETBIT (1 << 1)
-#define ACCESS_WRITE_CLEARBIT (2 << 1)
-#define ACCESS_WRITE_VALUE (3 << 1)
-#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
-
-static int vgic_init(struct kvm *kvm);
+#include "vgic.h"
+
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
-static void vgic_update_state(struct kvm *kvm);
-static void vgic_kick_vcpus(struct kvm *kvm);
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
-static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
+static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
+{
+ vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
+}
+
+static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
+}
+
+int kvm_vgic_map_resources(struct kvm *kvm)
+{
+ return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
+}
+
/*
* struct vgic_bitmap contains a bitmap made of unsigned longs, but
* extracts u32s out of them.
@@ -160,8 +154,7 @@ static unsigned long *u64_to_bitmask(u64 *val)
return (unsigned long *)val;
}
-static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
- int cpuid, u32 offset)
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
{
offset >>= 2;
if (!offset)
@@ -179,8 +172,8 @@ static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
}
-static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
- int irq, int val)
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val)
{
unsigned long *reg;
@@ -202,7 +195,7 @@ static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
return x->private + cpuid;
}
-static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
{
return x->shared;
}
@@ -229,7 +222,7 @@ static void vgic_free_bytemap(struct vgic_bytemap *b)
b->shared = NULL;
}
-static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
{
u32 *reg;
@@ -326,14 +319,14 @@ static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
}
-static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
}
-static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -349,7 +342,7 @@ static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
vcpu->arch.vgic_cpu.pending_shared);
}
-static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
{
if (irq < VGIC_NR_PRIVATE_IRQS)
clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
@@ -363,16 +356,6 @@ static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
}
-static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
-{
- return le32_to_cpu(*((u32 *)mmio->data)) & mask;
-}
-
-static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
-{
- *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
-}
-
/**
* vgic_reg_access - access vgic register
* @mmio: pointer to the data describing the mmio access
@@ -384,8 +367,8 @@ static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
* modes defined for vgic register access
* (read,raz,write-ignored,setbit,clearbit,write)
*/
-static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
- phys_addr_t offset, int mode)
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+ phys_addr_t offset, int mode)
{
int word_offset = (offset & 3) * 8;
u32 mask = (1UL << (mmio->len * 8)) - 1;
@@ -434,107 +417,58 @@ static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
}
}
-static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- u32 word_offset = offset & 3;
-
- switch (offset & ~3) {
- case 0: /* GICD_CTLR */
- reg = vcpu->kvm->arch.vgic.enabled;
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vcpu->kvm->arch.vgic.enabled = reg & 1;
- vgic_update_state(vcpu->kvm);
- return true;
- }
- break;
-
- case 4: /* GICD_TYPER */
- reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
- reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
-
- case 8: /* GICD_IIDR */
- reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
- }
-
- return false;
-}
-
-static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
vgic_reg_access(mmio, NULL, offset,
ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
return false;
}
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
- if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
+ u32 *reg;
+ int mode = ACCESS_READ_VALUE | access;
+ struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
-static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
- if (offset < 4) /* Force SGI enabled */
- *reg |= 0xffff;
- vgic_retire_disabled_irqs(vcpu);
- vgic_update_state(vcpu->kvm);
+ if (access & ACCESS_WRITE_CLEARBIT) {
+ if (offset < 4) /* Force SGI enabled */
+ *reg |= 0xffff;
+ vgic_retire_disabled_irqs(target_vcpu);
+ }
+ vgic_update_state(kvm);
return true;
}
return false;
}
-static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_set_pending_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id)
{
u32 *reg, orig;
u32 level_mask;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
+ struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
level_mask = (~(*reg));
/* Mark both level and edge triggered irqs as pending */
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
orig = *reg;
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
/* Set the soft-pending flag only for level-triggered irqs */
reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+ vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
*reg &= level_mask;
/* Ignore writes to SGIs */
@@ -543,31 +477,30 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
*reg |= orig & 0xffff;
}
- vgic_update_state(vcpu->kvm);
+ vgic_update_state(kvm);
return true;
}
return false;
}
-static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_clear_pending_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id)
{
u32 *level_active;
u32 *reg, orig;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
+ struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
orig = *reg;
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
/* Re-set level triggered level-active interrupts */
level_active = vgic_bitmap_get_reg(&dist->irq_level,
- vcpu->vcpu_id, offset);
- reg = vgic_bitmap_get_reg(&dist->irq_pending,
- vcpu->vcpu_id, offset);
+ vcpu_id, offset);
+ reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
*reg |= *level_active;
/* Ignore writes to SGIs */
@@ -578,101 +511,12 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
/* Clear soft-pending flags */
reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
- vgic_update_state(vcpu->kvm);
+ vgic_update_state(kvm);
return true;
}
-
- return false;
-}
-
-static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- return false;
-}
-
-#define GICD_ITARGETSR_SIZE 32
-#define GICD_CPUTARGETS_BITS 8
-#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
-static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int i;
- u32 val = 0;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
- val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
-
- return val;
-}
-
-static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i, c;
- unsigned long *bmap;
- u32 target;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- /*
- * Pick the LSB in each byte. This ensures we target exactly
- * one vcpu per IRQ. If the byte is null, assume we target
- * CPU0.
- */
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
- int shift = i * GICD_CPUTARGETS_BITS;
- target = ffs((val >> shift) & 0xffU);
- target = target ? (target - 1) : 0;
- dist->irq_spi_cpu[irq + i] = target;
- kvm_for_each_vcpu(c, vcpu, kvm) {
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
- if (c == target)
- set_bit(irq + i, bmap);
- else
- clear_bit(irq + i, bmap);
- }
- }
-}
-
-static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- /* We treat the banked interrupts targets as read-only */
- if (offset < 32) {
- u32 roreg = 1 << vcpu->vcpu_id;
- roreg |= roreg << 8;
- roreg |= roreg << 16;
-
- vgic_reg_access(mmio, &roreg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
return false;
}
@@ -711,14 +555,10 @@ static u16 vgic_cfg_compress(u32 val)
* LSB is always 0. As such, we only keep the upper bit, and use the
* two above functions to compress/expand the bits
*/
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
u32 val;
- u32 *reg;
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
if (offset & 4)
val = *reg >> 16;
@@ -747,21 +587,6 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
return false;
}
-static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_dispatch_sgi(vcpu, reg);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
/**
* vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
@@ -774,11 +599,9 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
* to the distributor but the active state stays in the LRs, because we don't
* track the active state on the distributor side.
*/
-static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- int vcpu_id = vcpu->vcpu_id;
int i;
for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
@@ -805,7 +628,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
*/
vgic_dist_irq_set_pending(vcpu, lr.irq);
if (lr.irq < VGIC_NR_SGIS)
- *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
+ add_sgi_source(vcpu, lr.irq, lr.source);
lr.state &= ~LR_STATE_PENDING;
vgic_set_lr(vcpu, i, lr);
@@ -824,188 +647,12 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
}
}
-/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
-static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg = 0;
-
- /* Copy source SGIs from distributor side */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- int shift = 8 * (sgi - min_sgi);
- reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
- }
-
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, bool set)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg;
- bool updated = false;
-
- reg = mmio_data_read(mmio, ~0);
-
- /* Clear pending SGIs on the distributor */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- u8 mask = reg >> (8 * (sgi - min_sgi));
- u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
- if (set) {
- if ((*src & mask) != mask)
- updated = true;
- *src |= mask;
- } else {
- if (*src & mask)
- updated = true;
- *src &= ~mask;
- }
- }
-
- if (updated)
- vgic_update_state(vcpu->kvm);
-
- return updated;
-}
-
-static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
-}
-
-static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
-}
-
-/*
- * I would have liked to use the kvm_bus_io_*() API instead, but it
- * cannot cope with banked registers (only the VM pointer is passed
- * around, and we need the vcpu). One of these days, someone please
- * fix it!
- */
-struct mmio_range {
- phys_addr_t base;
- unsigned long len;
- int bits_per_irq;
- bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-};
-
-static const struct mmio_range vgic_dist_ranges[] = {
- {
- .base = GIC_DIST_CTRL,
- .len = 12,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_misc,
- },
- {
- .base = GIC_DIST_IGROUP,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ENABLE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_enable_reg,
- },
- {
- .base = GIC_DIST_ENABLE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_enable_reg,
- },
- {
- .base = GIC_DIST_PENDING_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_pending_reg,
- },
- {
- .base = GIC_DIST_PENDING_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_pending_reg,
- },
- {
- .base = GIC_DIST_ACTIVE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ACTIVE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_PRI,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_priority_reg,
- },
- {
- .base = GIC_DIST_TARGET,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_target_reg,
- },
- {
- .base = GIC_DIST_CONFIG,
- .len = VGIC_MAX_IRQS / 4,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_cfg_reg,
- },
- {
- .base = GIC_DIST_SOFTINT,
- .len = 4,
- .handle_mmio = handle_mmio_sgi_reg,
- },
- {
- .base = GIC_DIST_SGI_PENDING_CLEAR,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_clear,
- },
- {
- .base = GIC_DIST_SGI_PENDING_SET,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_set,
- },
- {}
-};
-
-static const
-struct mmio_range *find_matching_range(const struct mmio_range *ranges,
+const
+struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- const struct mmio_range *r = ranges;
+ const struct kvm_mmio_range *r = ranges;
while (r->len) {
if (offset >= r->base &&
@@ -1018,7 +665,7 @@ struct mmio_range *find_matching_range(const struct mmio_range *ranges,
}
static bool vgic_validate_access(const struct vgic_dist *dist,
- const struct mmio_range *range,
+ const struct kvm_mmio_range *range,
unsigned long offset)
{
int irq;
@@ -1033,37 +680,76 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
return true;
}
+/*
+ * Call the respective handler function for the given range.
+ * We split up any 64 bit accesses into two consecutive 32 bit
+ * handler calls and merge the result afterwards.
+ * We do this in a little endian fashion regardless of the host's
+ * or guest's endianness, because the GIC is always LE and the rest of
+ * the code (vgic_reg_access) also puts it in a LE fashion already.
+ * At this point we have already identified the handle function, so
+ * range points to that one entry and offset is relative to this.
+ */
+static bool call_range_handler(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ unsigned long offset,
+ const struct kvm_mmio_range *range)
+{
+ u32 *data32 = (void *)mmio->data;
+ struct kvm_exit_mmio mmio32;
+ bool ret;
+
+ if (likely(mmio->len <= 4))
+ return range->handle_mmio(vcpu, mmio, offset);
+
+ /*
+ * Any access bigger than 4 bytes (that we currently handle in KVM)
+ * is actually 8 bytes long, caused by a 64-bit access
+ */
+
+ mmio32.len = 4;
+ mmio32.is_write = mmio->is_write;
+ mmio32.private = mmio->private;
+
+ mmio32.phys_addr = mmio->phys_addr + 4;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[1];
+ ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
+ if (!mmio->is_write)
+ data32[1] = *(u32 *)mmio32.data;
+
+ mmio32.phys_addr = mmio->phys_addr;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[0];
+ ret |= range->handle_mmio(vcpu, &mmio32, offset);
+ if (!mmio->is_write)
+ data32[0] = *(u32 *)mmio32.data;
+
+ return ret;
+}
+
/**
- * vgic_handle_mmio - handle an in-kernel MMIO access
+ * vgic_handle_mmio_range - handle an in-kernel MMIO access
* @vcpu: pointer to the vcpu performing the access
* @run: pointer to the kvm_run structure
* @mmio: pointer to the data describing the access
+ * @ranges: array of MMIO ranges in a given region
+ * @mmio_base: base address of that region
*
- * returns true if the MMIO access has been performed in kernel space,
- * and false if it needs to be emulated in user space.
+ * returns true if the MMIO access could be performed
*/
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio,
+ const struct kvm_mmio_range *ranges,
+ unsigned long mmio_base)
{
- const struct mmio_range *range;
+ const struct kvm_mmio_range *range;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long base = dist->vgic_dist_base;
bool updated_state;
unsigned long offset;
- if (!irqchip_in_kernel(vcpu->kvm) ||
- mmio->phys_addr < base ||
- (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
- return false;
-
- /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
- if (mmio->len > 4) {
- kvm_inject_dabt(vcpu, mmio->phys_addr);
- return true;
- }
-
- offset = mmio->phys_addr - base;
- range = find_matching_range(vgic_dist_ranges, mmio, offset);
+ offset = mmio->phys_addr - mmio_base;
+ range = vgic_find_range(ranges, mmio, offset);
if (unlikely(!range || !range->handle_mmio)) {
pr_warn("Unhandled access %d %08llx %d\n",
mmio->is_write, mmio->phys_addr, mmio->len);
@@ -1071,12 +757,12 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
spin_lock(&vcpu->kvm->arch.vgic.lock);
- offset = mmio->phys_addr - range->base - base;
+ offset -= range->base;
if (vgic_validate_access(dist, range, offset)) {
- updated_state = range->handle_mmio(vcpu, mmio, offset);
+ updated_state = call_range_handler(vcpu, mmio, offset, range);
} else {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ if (!mmio->is_write)
+ memset(mmio->data, 0, mmio->len);
updated_state = false;
}
spin_unlock(&vcpu->kvm->arch.vgic.lock);
@@ -1089,50 +775,28 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
return true;
}
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
-{
- return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
-}
-
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+/**
+ * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
+ * @vcpu: pointer to the vcpu performing the access
+ * @run: pointer to the kvm_run structure
+ * @mmio: pointer to the data describing the access
+ *
+ * returns true if the MMIO access has been performed in kernel space,
+ * and false if it needs to be emulated in user space.
+ * Calls the actual handling routine for the selected VGIC model.
+ */
+bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
{
- struct kvm *kvm = vcpu->kvm;
- struct vgic_dist *dist = &kvm->arch.vgic;
- int nrcpus = atomic_read(&kvm->online_vcpus);
- u8 target_cpus;
- int sgi, mode, c, vcpu_id;
-
- vcpu_id = vcpu->vcpu_id;
-
- sgi = reg & 0xf;
- target_cpus = (reg >> 16) & 0xff;
- mode = (reg >> 24) & 3;
-
- switch (mode) {
- case 0:
- if (!target_cpus)
- return;
- break;
-
- case 1:
- target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
- break;
-
- case 2:
- target_cpus = 1 << vcpu_id;
- break;
- }
-
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (target_cpus & 1) {
- /* Flag the SGI as pending */
- vgic_dist_irq_set_pending(vcpu, sgi);
- *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
- kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
- }
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return false;
- target_cpus >>= 1;
- }
+ /*
+ * This will currently call either vgic_v2_handle_mmio() or
+ * vgic_v3_handle_mmio(), which in turn will call
+ * vgic_handle_mmio_range() defined above.
+ */
+ return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
}
static int vgic_nr_shared_irqs(struct vgic_dist *dist)
@@ -1173,7 +837,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
* Update the interrupt state and determine which CPUs have pending
* interrupts. Must be called with distributor lock held.
*/
-static void vgic_update_state(struct kvm *kvm)
+void vgic_update_state(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
@@ -1234,12 +898,12 @@ static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
vgic_ops->disable_underflow(vcpu);
}
-static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->get_vmcr(vcpu, vmcr);
}
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->set_vmcr(vcpu, vmcr);
}
@@ -1288,8 +952,9 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
/*
* Queue an interrupt to a CPU virtual interface. Return true on success,
* or false if it wasn't possible to queue it.
+ * sgi_source must be zero for any non-SGI interrupts.
*/
-static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -1338,37 +1003,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
return true;
}
-static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long sources;
- int vcpu_id = vcpu->vcpu_id;
- int c;
-
- sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
-
- for_each_set_bit(c, &sources, dist->nr_cpus) {
- if (vgic_queue_irq(vcpu, c, irq))
- clear_bit(c, &sources);
- }
-
- *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
-
- /*
- * If the sources bitmap has been cleared it means that we
- * could queue all the SGIs onto link registers (see the
- * clear_bit above), and therefore we are done with them in
- * our emulated gic and can get rid of them.
- */
- if (!sources) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- return true;
- }
-
- return false;
-}
-
static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
{
if (!vgic_can_sample_irq(vcpu, irq))
@@ -1413,7 +1047,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
/* SGIs */
for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
- if (!vgic_queue_sgi(vcpu, i))
+ if (!queue_sgi(vcpu, i))
overflow = 1;
}
@@ -1575,7 +1209,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
}
-static void vgic_kick_vcpus(struct kvm *kvm)
+void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int c;
@@ -1615,7 +1249,7 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
struct kvm_vcpu *vcpu;
int edge_triggered, level_triggered;
int enabled;
- bool ret = true;
+ bool ret = true, can_inject = true;
spin_lock(&dist->lock);
@@ -1630,6 +1264,11 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
+ if (cpuid == VCPU_NOT_ALLOCATED) {
+ /* Pretend we use CPU0, and prevent injection */
+ cpuid = 0;
+ can_inject = false;
+ }
vcpu = kvm_get_vcpu(kvm, cpuid);
}
@@ -1652,7 +1291,7 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
enabled = vgic_irq_is_enabled(vcpu, irq_num);
- if (!enabled) {
+ if (!enabled || !can_inject) {
ret = false;
goto out;
}
@@ -1698,6 +1337,16 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
int vcpu_id;
if (unlikely(!vgic_initialized(kvm))) {
+ /*
+ * We only provide the automatic initialization of the VGIC
+ * for the legacy case of a GICv2. Any other type must
+ * be explicitly initialized once setup with the respective
+ * KVM device call.
+ */
+ if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
+ ret = -EBUSY;
+ goto out;
+ }
mutex_lock(&kvm->lock);
ret = vgic_init(kvm);
mutex_unlock(&kvm->lock);
@@ -1762,6 +1411,17 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
return 0;
}
+/**
+ * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
+ *
+ * The host's GIC naturally limits the maximum amount of VCPUs a guest
+ * can use.
+ */
+int kvm_vgic_get_max_vcpus(void)
+{
+ return vgic->max_gic_vcpus;
+}
+
void kvm_vgic_destroy(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1784,6 +1444,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
}
kfree(dist->irq_sgi_sources);
kfree(dist->irq_spi_cpu);
+ kfree(dist->irq_spi_mpidr);
kfree(dist->irq_spi_target);
kfree(dist->irq_pending_on_cpu);
dist->irq_sgi_sources = NULL;
@@ -1797,7 +1458,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
* Allocate and initialize the various data structures. Must be called
* with kvm->lock held!
*/
-static int vgic_init(struct kvm *kvm)
+int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
@@ -1809,7 +1470,7 @@ static int vgic_init(struct kvm *kvm)
nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
if (!nr_cpus) /* No vcpus? Can't be good... */
- return -EINVAL;
+ return -ENODEV;
/*
* If nobody configured the number of interrupts, use the
@@ -1852,8 +1513,9 @@ static int vgic_init(struct kvm *kvm)
if (ret)
goto out;
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
+ ret = kvm->arch.vgic.vm_ops.init_model(kvm);
+ if (ret)
+ goto out;
kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
@@ -1882,72 +1544,49 @@ out:
return ret;
}
-/**
- * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
- * @kvm: pointer to the kvm struct
- *
- * Map the virtual CPU interface into the VM before running any VCPUs. We
- * can't do this at creation time, because user space must first set the
- * virtual CPU interface address in the guest physical address space.
- */
-int kvm_vgic_map_resources(struct kvm *kvm)
+static int init_vgic_model(struct kvm *kvm, int type)
{
- int ret = 0;
-
- if (!irqchip_in_kernel(kvm))
- return 0;
-
- mutex_lock(&kvm->lock);
-
- if (vgic_ready(kvm))
- goto out;
-
- if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
- kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
- }
-
- /*
- * Initialize the vgic if this hasn't already been done on demand by
- * accessing the vgic state from userspace.
- */
- ret = vgic_init(kvm);
- if (ret) {
- kvm_err("Unable to allocate maps\n");
- goto out;
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ vgic_v2_init_emulation(kvm);
+ break;
+#ifdef CONFIG_ARM_GIC_V3
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ vgic_v3_init_emulation(kvm);
+ break;
+#endif
+ default:
+ return -ENODEV;
}
- ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
- vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
- true);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
- }
+ if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
+ return -E2BIG;
- kvm->arch.vgic.ready = true;
-out:
- if (ret)
- kvm_vgic_destroy(kvm);
- mutex_unlock(&kvm->lock);
- return ret;
+ return 0;
}
-int kvm_vgic_create(struct kvm *kvm)
+int kvm_vgic_create(struct kvm *kvm, u32 type)
{
int i, vcpu_lock_idx = -1, ret;
struct kvm_vcpu *vcpu;
mutex_lock(&kvm->lock);
- if (kvm->arch.vgic.vctrl_base) {
+ if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out;
}
/*
+ * This function is also called by the KVM_CREATE_IRQCHIP handler,
+ * which had no chance yet to check the availability of the GICv2
+ * emulation. So check this here again. KVM_CREATE_DEVICE does
+ * the proper checks already.
+ */
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2)
+ return -ENODEV;
+
+ /*
* Any time a vcpu is run, vcpu_load is called which tries to grab the
* vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
* that no other VCPUs are run while we create the vgic.
@@ -1965,11 +1604,17 @@ int kvm_vgic_create(struct kvm *kvm)
}
ret = 0;
+ ret = init_vgic_model(kvm, type);
+ if (ret)
+ goto out_unlock;
+
spin_lock_init(&kvm->arch.vgic.lock);
kvm->arch.vgic.in_kernel = true;
+ kvm->arch.vgic.vgic_model = type;
kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
out_unlock:
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
@@ -2022,7 +1667,7 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
/**
* kvm_vgic_addr - set or get vgic VM base addresses
* @kvm: pointer to the vm struct
- * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
+ * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
* @addr: pointer to address value
* @write: if true set the address in the VM address space, if false read the
* address
@@ -2036,216 +1681,64 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
{
int r = 0;
struct vgic_dist *vgic = &kvm->arch.vgic;
+ int type_needed;
+ phys_addr_t *addr_ptr, block_size;
+ phys_addr_t alignment;
mutex_lock(&kvm->lock);
switch (type) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
- if (write) {
- r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
- *addr, KVM_VGIC_V2_DIST_SIZE);
- } else {
- *addr = vgic->vgic_dist_base;
- }
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_dist_base;
+ block_size = KVM_VGIC_V2_DIST_SIZE;
+ alignment = SZ_4K;
break;
case KVM_VGIC_V2_ADDR_TYPE_CPU:
- if (write) {
- r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
- *addr, KVM_VGIC_V2_CPU_SIZE);
- } else {
- *addr = vgic->vgic_cpu_base;
- }
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_cpu_base;
+ block_size = KVM_VGIC_V2_CPU_SIZE;
+ alignment = SZ_4K;
break;
- default:
- r = -ENODEV;
- }
-
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- bool updated = false;
- struct vgic_vmcr vmcr;
- u32 *vmcr_field;
- u32 reg;
-
- vgic_get_vmcr(vcpu, &vmcr);
-
- switch (offset & ~0x3) {
- case GIC_CPU_CTRL:
- vmcr_field = &vmcr.ctlr;
- break;
- case GIC_CPU_PRIMASK:
- vmcr_field = &vmcr.pmr;
+#ifdef CONFIG_ARM_GIC_V3
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_dist_base;
+ block_size = KVM_VGIC_V3_DIST_SIZE;
+ alignment = SZ_64K;
break;
- case GIC_CPU_BINPOINT:
- vmcr_field = &vmcr.bpr;
- break;
- case GIC_CPU_ALIAS_BINPOINT:
- vmcr_field = &vmcr.abpr;
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_redist_base;
+ block_size = KVM_VGIC_V3_REDIST_SIZE;
+ alignment = SZ_64K;
break;
+#endif
default:
- BUG();
- }
-
- if (!mmio->is_write) {
- reg = *vmcr_field;
- mmio_data_write(mmio, ~0, reg);
- } else {
- reg = mmio_data_read(mmio, ~0);
- if (reg != *vmcr_field) {
- *vmcr_field = reg;
- vgic_set_vmcr(vcpu, &vmcr);
- updated = true;
- }
- }
- return updated;
-}
-
-static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
-}
-
-static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- if (mmio->is_write)
- return false;
-
- /* GICC_IIDR */
- reg = (PRODUCT_ID_KVM << 20) |
- (GICC_ARCH_VERSION_V2 << 16) |
- (IMPLEMENTER_ARM << 0);
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-/*
- * CPU Interface Register accesses - these are not accessed by the VM, but by
- * user space for saving and restoring VGIC state.
- */
-static const struct mmio_range vgic_cpu_ranges[] = {
- {
- .base = GIC_CPU_CTRL,
- .len = 12,
- .handle_mmio = handle_cpu_mmio_misc,
- },
- {
- .base = GIC_CPU_ALIAS_BINPOINT,
- .len = 4,
- .handle_mmio = handle_mmio_abpr,
- },
- {
- .base = GIC_CPU_ACTIVEPRIO,
- .len = 16,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_CPU_IDENT,
- .len = 4,
- .handle_mmio = handle_cpu_mmio_ident,
- },
-};
-
-static int vgic_attr_regs_access(struct kvm_device *dev,
- struct kvm_device_attr *attr,
- u32 *reg, bool is_write)
-{
- const struct mmio_range *r = NULL, *ranges;
- phys_addr_t offset;
- int ret, cpuid, c;
- struct kvm_vcpu *vcpu, *tmp_vcpu;
- struct vgic_dist *vgic;
- struct kvm_exit_mmio mmio;
-
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
- KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-
- mutex_lock(&dev->kvm->lock);
-
- ret = vgic_init(dev->kvm);
- if (ret)
- goto out;
-
- if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
- ret = -EINVAL;
+ r = -ENODEV;
goto out;
}
- vcpu = kvm_get_vcpu(dev->kvm, cpuid);
- vgic = &dev->kvm->arch.vgic;
-
- mmio.len = 4;
- mmio.is_write = is_write;
- if (is_write)
- mmio_data_write(&mmio, ~0, *reg);
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- mmio.phys_addr = vgic->vgic_dist_base + offset;
- ranges = vgic_dist_ranges;
- break;
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- mmio.phys_addr = vgic->vgic_cpu_base + offset;
- ranges = vgic_cpu_ranges;
- break;
- default:
- BUG();
- }
- r = find_matching_range(ranges, &mmio, offset);
-
- if (unlikely(!r || !r->handle_mmio)) {
- ret = -ENXIO;
+ if (vgic->vgic_model != type_needed) {
+ r = -ENODEV;
goto out;
}
-
- spin_lock(&vgic->lock);
-
- /*
- * Ensure that no other VCPU is running by checking the vcpu->cpu
- * field. If no other VPCUs are running we can safely access the VGIC
- * state, because even if another VPU is run after this point, that
- * VCPU will not touch the vgic state, because it will block on
- * getting the vgic->lock in kvm_vgic_sync_hwstate().
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (unlikely(tmp_vcpu->cpu != -1)) {
- ret = -EBUSY;
- goto out_vgic_unlock;
- }
+ if (write) {
+ if (!IS_ALIGNED(*addr, alignment))
+ r = -EINVAL;
+ else
+ r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
+ block_size);
+ } else {
+ *addr = *addr_ptr;
}
- /*
- * Move all pending IRQs from the LRs on all VCPUs so the pending
- * state can be properly represented in the register state accessible
- * through this API.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
- vgic_unqueue_irqs(tmp_vcpu);
-
- offset -= r->base;
- r->handle_mmio(vcpu, &mmio, offset);
-
- if (!is_write)
- *reg = mmio_data_read(&mmio, ~0);
-
- ret = 0;
-out_vgic_unlock:
- spin_unlock(&vgic->lock);
out:
- mutex_unlock(&dev->kvm->lock);
- return ret;
+ mutex_unlock(&kvm->lock);
+ return r;
}
-static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r;
@@ -2261,17 +1754,6 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
r = kvm_vgic_addr(dev->kvm, type, &addr, true);
return (r == -ENODEV) ? -ENXIO : r;
}
-
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg;
-
- if (get_user(reg, uaddr))
- return -EFAULT;
-
- return vgic_attr_regs_access(dev, attr, &reg, true);
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 val;
@@ -2302,13 +1784,20 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return ret;
}
-
+ case KVM_DEV_ARM_VGIC_GRP_CTRL: {
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ r = vgic_init(dev->kvm);
+ return r;
+ }
+ break;
+ }
}
return -ENXIO;
}
-static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = -ENXIO;
@@ -2326,20 +1815,9 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return -EFAULT;
break;
}
-
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg = 0;
-
- r = vgic_attr_regs_access(dev, attr, &reg, false);
- if (r)
- return r;
- r = put_user(reg, uaddr);
- break;
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
break;
}
@@ -2349,61 +1827,17 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return r;
}
-static int vgic_has_attr_regs(const struct mmio_range *ranges,
- phys_addr_t offset)
+int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset)
{
struct kvm_exit_mmio dev_attr_mmio;
dev_attr_mmio.len = 4;
- if (find_matching_range(ranges, &dev_attr_mmio, offset))
+ if (vgic_find_range(ranges, &dev_attr_mmio, offset))
return 0;
else
return -ENXIO;
}
-static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
-{
- phys_addr_t offset;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR:
- switch (attr->attr) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- return 0;
- }
- break;
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_dist_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_cpu_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
- return 0;
- }
- return -ENXIO;
-}
-
-static void vgic_destroy(struct kvm_device *dev)
-{
- kfree(dev);
-}
-
-static int vgic_create(struct kvm_device *dev, u32 type)
-{
- return kvm_vgic_create(dev->kvm);
-}
-
-static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
- .name = "kvm-arm-vgic",
- .create = vgic_create,
- .destroy = vgic_destroy,
- .set_attr = vgic_set_attr,
- .get_attr = vgic_get_attr,
- .has_attr = vgic_has_attr,
-};
-
static void vgic_init_maintenance_interrupt(void *info)
{
enable_percpu_irq(vgic->maint_irq, 0);
@@ -2474,8 +1908,7 @@ int kvm_vgic_hyp_init(void)
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
- return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
- KVM_DEV_TYPE_ARM_VGIC_V2);
+ return 0;
out_free_irq:
free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
new file mode 100644
index 000000000000..1e83bdf5f499
--- /dev/null
+++ b/virt/kvm/arm/vgic.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012-2014 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from virt/kvm/arm/vgic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __KVM_VGIC_H__
+#define __KVM_VGIC_H__
+
+#define VGIC_ADDR_UNDEF (-1)
+#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
+
+#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
+#define IMPLEMENTER_ARM 0x43b
+
+#define ACCESS_READ_VALUE (1 << 0)
+#define ACCESS_READ_RAZ (0 << 0)
+#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
+#define ACCESS_WRITE_IGNORED (0 << 1)
+#define ACCESS_WRITE_SETBIT (1 << 1)
+#define ACCESS_WRITE_CLEARBIT (2 << 1)
+#define ACCESS_WRITE_VALUE (3 << 1)
+#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
+
+#define VCPU_NOT_ALLOCATED ((u8)-1)
+
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
+
+void vgic_update_state(struct kvm *kvm);
+int vgic_init_common_maps(struct kvm *kvm);
+
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset);
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset);
+
+void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq);
+void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq);
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val);
+
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
+
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+ phys_addr_t offset, int mode);
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+static inline
+u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
+{
+ return le32_to_cpu(*((u32 *)mmio->data)) & mask;
+}
+
+static inline
+void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
+{
+ *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
+}
+
+struct kvm_mmio_range {
+ phys_addr_t base;
+ unsigned long len;
+ int bits_per_irq;
+ bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+};
+
+static inline bool is_in_range(phys_addr_t addr, unsigned long len,
+ phys_addr_t baseaddr, unsigned long size)
+{
+ return (addr >= baseaddr) && (addr + len <= baseaddr + size);
+}
+
+const
+struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio,
+ const struct kvm_mmio_range *ranges,
+ unsigned long mmio_base);
+
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access);
+
+bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id);
+
+bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id);
+
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+void vgic_kick_vcpus(struct kvm *kvm);
+
+int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset);
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+
+int vgic_init(struct kvm *kvm);
+void vgic_v2_init_emulation(struct kvm *kvm);
+void vgic_v3_init_emulation(struct kvm *kvm);
+
+#endif
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 5ff7f7f2689a..44660aee335f 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -80,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
might_sleep();
- kvm_get_user_page_io(NULL, mm, addr, 1, NULL);
+ get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL);
kvm_async_page_present_sync(vcpu, apf);
spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1cc6e2e19982..a1093700f3a4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -66,6 +66,9 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+unsigned int halt_poll_ns = 0;
+module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+
/*
* Ordering of locks:
*
@@ -89,7 +92,7 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#endif
@@ -176,6 +179,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
return called;
}
+#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
long dirty_count = kvm->tlbs_dirty;
@@ -186,6 +190,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
+#endif
void kvm_reload_remote_mmus(struct kvm *kvm)
{
@@ -673,6 +678,7 @@ static void update_memslots(struct kvm_memslots *slots,
if (!new->npages) {
WARN_ON(!mslots[i].npages);
new->base_gfn = 0;
+ new->flags = 0;
if (mslots[i].npages)
slots->used_slots--;
} else {
@@ -993,6 +999,86 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+/**
+ * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
+ * are dirty write protect them for next write.
+ * @kvm: pointer to kvm instance
+ * @log: slot id and address to which we copy the log
+ * @is_dirty: flag set if any page is dirty
+ *
+ * We need to keep it in mind that VCPU threads can write to the bitmap
+ * concurrently. So, to avoid losing track of dirty pages we keep the
+ * following order:
+ *
+ * 1. Take a snapshot of the bit and clear it if needed.
+ * 2. Write protect the corresponding page.
+ * 3. Copy the snapshot to the userspace.
+ * 4. Upon return caller flushes TLB's if needed.
+ *
+ * Between 2 and 4, the guest may write to the page using the remaining TLB
+ * entry. This is not a problem because the page is reported dirty using
+ * the snapshot taken before and step 4 ensures that writes done after
+ * exiting to userspace will be logged for the next call.
+ *
+ */
+int kvm_get_dirty_log_protect(struct kvm *kvm,
+ struct kvm_dirty_log *log, bool *is_dirty)
+{
+ struct kvm_memory_slot *memslot;
+ int r, i;
+ unsigned long n;
+ unsigned long *dirty_bitmap;
+ unsigned long *dirty_bitmap_buffer;
+
+ r = -EINVAL;
+ if (log->slot >= KVM_USER_MEM_SLOTS)
+ goto out;
+
+ memslot = id_to_memslot(kvm->memslots, log->slot);
+
+ dirty_bitmap = memslot->dirty_bitmap;
+ r = -ENOENT;
+ if (!dirty_bitmap)
+ goto out;
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+
+ dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
+ memset(dirty_bitmap_buffer, 0, n);
+
+ spin_lock(&kvm->mmu_lock);
+ *is_dirty = false;
+ for (i = 0; i < n / sizeof(long); i++) {
+ unsigned long mask;
+ gfn_t offset;
+
+ if (!dirty_bitmap[i])
+ continue;
+
+ *is_dirty = true;
+
+ mask = xchg(&dirty_bitmap[i], 0);
+ dirty_bitmap_buffer[i] = mask;
+
+ offset = i * BITS_PER_LONG;
+ kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset,
+ mask);
+ }
+
+ spin_unlock(&kvm->mmu_lock);
+
+ r = -EFAULT;
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
+ goto out;
+
+ r = 0;
+out:
+ return r;
+}
+EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
+#endif
+
bool kvm_largepages_enabled(void)
{
return largepages_enabled;
@@ -1128,43 +1214,6 @@ static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
}
-int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, bool write_fault,
- struct page **pagep)
-{
- int npages;
- int locked = 1;
- int flags = FOLL_TOUCH | FOLL_HWPOISON |
- (pagep ? FOLL_GET : 0) |
- (write_fault ? FOLL_WRITE : 0);
-
- /*
- * If retrying the fault, we get here *not* having allowed the filemap
- * to wait on the page lock. We should now allow waiting on the IO with
- * the mmap semaphore released.
- */
- down_read(&mm->mmap_sem);
- npages = __get_user_pages(tsk, mm, addr, 1, flags, pagep, NULL,
- &locked);
- if (!locked) {
- VM_BUG_ON(npages);
-
- if (!pagep)
- return 0;
-
- /*
- * The previous call has now waited on the IO. Now we can
- * retry and complete. Pass TRIED to ensure we do not re
- * schedule async IO (see e.g. filemap_fault).
- */
- down_read(&mm->mmap_sem);
- npages = __get_user_pages(tsk, mm, addr, 1, flags | FOLL_TRIED,
- pagep, NULL, NULL);
- }
- up_read(&mm->mmap_sem);
- return npages;
-}
-
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
@@ -1227,15 +1276,10 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
npages = get_user_page_nowait(current, current->mm,
addr, write_fault, page);
up_read(&current->mm->mmap_sem);
- } else {
- /*
- * By now we have tried gup_fast, and possibly async_pf, and we
- * are certainly not atomic. Time to retry the gup, allowing
- * mmap semaphore to be relinquished in the case of IO.
- */
- npages = kvm_get_user_page_io(current, current->mm, addr,
- write_fault, page);
- }
+ } else
+ npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
+ write_fault, 0, page,
+ FOLL_TOUCH|FOLL_HWPOISON);
if (npages != 1)
return npages;
@@ -1593,6 +1637,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
return 0;
}
+EXPORT_SYMBOL_GPL(kvm_write_guest);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
@@ -1729,29 +1774,60 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(mark_page_dirty);
+static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
+{
+ if (kvm_arch_vcpu_runnable(vcpu)) {
+ kvm_make_request(KVM_REQ_UNHALT, vcpu);
+ return -EINTR;
+ }
+ if (kvm_cpu_has_pending_timer(vcpu))
+ return -EINTR;
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
+ ktime_t start, cur;
DEFINE_WAIT(wait);
+ bool waited = false;
+
+ start = cur = ktime_get();
+ if (halt_poll_ns) {
+ ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+ do {
+ /*
+ * This sets KVM_REQ_UNHALT if an interrupt
+ * arrives.
+ */
+ if (kvm_vcpu_check_block(vcpu) < 0) {
+ ++vcpu->stat.halt_successful_poll;
+ goto out;
+ }
+ cur = ktime_get();
+ } while (single_task_running() && ktime_before(cur, stop));
+ }
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_arch_vcpu_runnable(vcpu)) {
- kvm_make_request(KVM_REQ_UNHALT, vcpu);
- break;
- }
- if (kvm_cpu_has_pending_timer(vcpu))
- break;
- if (signal_pending(current))
+ if (kvm_vcpu_check_block(vcpu) < 0)
break;
+ waited = true;
schedule();
}
finish_wait(&vcpu->wq, &wait);
+ cur = ktime_get();
+
+out:
+ trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
@@ -1934,7 +2010,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_vcpu_compat_ioctl,
#endif
.mmap = kvm_vcpu_mmap,
@@ -2224,7 +2300,7 @@ out:
return r;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2316,7 +2392,7 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_device_ioctl,
#endif
.release = kvm_device_release,
@@ -2603,7 +2679,7 @@ out:
return r;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
struct compat_kvm_dirty_log {
__u32 slot;
__u32 padding1;
@@ -2650,7 +2726,7 @@ out:
static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_vm_compat_ioctl,
#endif
.llseek = noop_llseek,