diff options
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 17 | ||||
-rw-r--r-- | virt/kvm/arm/pmu.c | 8 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 11 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-kvm-device.c | 2 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v2.c | 3 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v3.c | 2 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 41 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.h | 14 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v2.c | 6 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 6 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 12 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.h | 26 |
12 files changed, 73 insertions, 75 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 27a1f6341d41..ae95fc0e3214 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -425,6 +425,11 @@ int kvm_timer_hyp_init(void) info = arch_timer_get_kvm_info(); timecounter = &info->timecounter; + if (!timecounter->cc) { + kvm_err("kvm_arch_timer: uninitialized timecounter\n"); + return -ENODEV; + } + if (info->virtual_irq <= 0) { kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", info->virtual_irq); @@ -498,17 +503,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) if (ret) return ret; - - /* - * There is a potential race here between VCPUs starting for the first - * time, which may be enabling the timer multiple times. That doesn't - * hurt though, because we're just setting a variable to the same - * variable that it already was. The important thing is that all - * VCPUs have the enabled variable set, before entering the guest, if - * the arch timers are enabled. - */ - if (timecounter) - timer->enabled = 1; + timer->enabled = 1; return 0; } diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 6e9c40eea208..69ccce308458 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) continue; type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) & ARMV8_PMU_EVTYPE_EVENT; - if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR) && (enable & BIT(i))) { reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; reg = lower_32_bits(reg); @@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, eventsel = data & ARMV8_PMU_EVTYPE_EVENT; /* Software increment event does't need to be backed by a perf event */ - if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR && + select_idx != ARMV8_PMU_CYCLE_IDX) return; memset(&attr, 0, sizeof(struct perf_event_attr)); @@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; attr.exclude_hv = 1; /* Don't count EL2 events */ attr.exclude_host = 1; /* Don't count host events */ - attr.config = eventsel; + attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ? + ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel; counter = kvm_pmu_get_counter_value(vcpu, select_idx); /* The initial sample period (overflow count) of an event. */ diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 4660a7d04eea..8c2b3cdcb2c5 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -632,21 +632,22 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id) int index; u64 indirect_ptr; gfn_t gfn; + int esz = GITS_BASER_ENTRY_SIZE(baser); if (!(baser & GITS_BASER_INDIRECT)) { phys_addr_t addr; - if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser))) + if (id >= (l1_tbl_size / esz)) return false; - addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser); + addr = BASER_ADDRESS(baser) + id * esz; gfn = addr >> PAGE_SHIFT; return kvm_is_visible_gfn(its->dev->kvm, gfn); } /* calculate and check the index into the 1st level */ - index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser)); + index = id / (SZ_64K / esz); if (index >= (l1_tbl_size / sizeof(u64))) return false; @@ -670,8 +671,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id) indirect_ptr &= GENMASK_ULL(51, 16); /* Find the address of the actual entry */ - index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser)); - indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser); + index = id % (SZ_64K / esz); + indirect_ptr += index * esz; gfn = indirect_ptr >> PAGE_SHIFT; return kvm_is_visible_gfn(its->dev->kvm, gfn); diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index ce1f4ed9daf4..fbe87a63d250 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -221,11 +221,9 @@ int kvm_register_vgic_device(unsigned long type) ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3); -#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS if (ret) break; ret = kvm_vgic_register_its_device(); -#endif break; } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index b44b359cbbad..78e34bc4d89b 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -129,6 +129,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, unsigned long val) { u32 intid = VGIC_ADDR_TO_INTID(addr, 8); + u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0); int i; /* GICD_ITARGETSR[0-7] are read-only */ @@ -141,7 +142,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, spin_lock(&irq->irq_lock); - irq->targets = (val >> (i * 8)) & 0xff; + irq->targets = (val >> (i * 8)) & cpu_mask; target = irq->targets ? __ffs(irq->targets) : 0; irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 0d3c76a4208b..50f42f0f8c4f 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -42,7 +42,6 @@ u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, return reg | ((u64)val << lower); } -#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS bool vgic_has_its(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; @@ -52,7 +51,6 @@ bool vgic_has_its(struct kvm *kvm) return dist->has_its; } -#endif static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len) diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index e18b30ddcdce..ebe1b9fa3c4d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) return container_of(dev, struct vgic_io_device, dev); } -static bool check_region(const struct vgic_register_region *region, +static bool check_region(const struct kvm *kvm, + const struct vgic_register_region *region, gpa_t addr, int len) { - if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) - return true; - if ((region->access_flags & VGIC_ACCESS_32bit) && - len == sizeof(u32) && !(addr & 3)) - return true; - if ((region->access_flags & VGIC_ACCESS_64bit) && - len == sizeof(u64) && !(addr & 7)) - return true; + int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; + + switch (len) { + case sizeof(u8): + flags = VGIC_ACCESS_8bit; + break; + case sizeof(u32): + flags = VGIC_ACCESS_32bit; + break; + case sizeof(u64): + flags = VGIC_ACCESS_64bit; + break; + default: + return false; + } + + if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { + if (!region->bits_per_irq) + return true; + + /* Do we access a non-allocated IRQ? */ + return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; + } return false; } @@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, addr - iodev->base_addr); - if (!region || !check_region(region, addr, len)) { + if (!region || !check_region(vcpu->kvm, region, addr, len)) { memset(val, 0, len); return 0; } @@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, addr - iodev->base_addr); - if (!region) - return 0; - - if (!check_region(region, addr, len)) + if (!region || !check_region(vcpu->kvm, region, addr, len)) return 0; switch (iodev->iodev_type) { diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 4c34d39d44a0..84961b4e4422 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops; #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) /* - * (addr & mask) gives us the byte offset for the INT ID, so we want to - * divide this with 'bytes per irq' to get the INT ID, which is given - * by '(bits) / 8'. But we do this with fixed-point-arithmetic and - * take advantage of the fact that division by a fraction equals - * multiplication with the inverted fraction, and scale up both the - * numerator and denominator with 8 to support at most 64 bits per IRQ: + * (addr & mask) gives us the _byte_ offset for the INT ID. + * We multiply this by 8 the get the _bit_ offset, then divide this by + * the number of bits to learn the actual INT ID. + * But instead of a division (which requires a "long long div" implementation), + * we shift by the binary logarithm of <bits>. + * This assumes that <bits> is a power of two. */ #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ - 64 / (bits) / 8) + 8 >> ilog2(bits)) /* * Some VGIC registers store per-IRQ information, with a different number diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 0a063af40565..9bab86757fa4 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -50,8 +50,10 @@ void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE); - kvm_notify_acked_irq(vcpu->kvm, 0, - intid - VGIC_NR_PRIVATE_IRQS); + /* Only SPIs require notification */ + if (vgic_valid_spi(vcpu->kvm, intid)) + kvm_notify_acked_irq(vcpu->kvm, 0, + intid - VGIC_NR_PRIVATE_IRQS); } } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 9f0dae397d9c..5c9f9745e6ca 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -41,8 +41,10 @@ void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE); - kvm_notify_acked_irq(vcpu->kvm, 0, - intid - VGIC_NR_PRIVATE_IRQS); + /* Only SPIs require notification */ + if (vgic_valid_spi(vcpu->kvm, intid)) + kvm_notify_acked_irq(vcpu->kvm, 0, + intid - VGIC_NR_PRIVATE_IRQS); } /* diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 2893d5ba523a..6440b56ec90e 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -273,6 +273,18 @@ retry: * no more work for us to do. */ spin_unlock(&irq->irq_lock); + + /* + * We have to kick the VCPU here, because we could be + * queueing an edge-triggered interrupt for which we + * get no EOI maintenance interrupt. In that case, + * while the IRQ is already on the VCPU's AP list, the + * VCPU could have EOI'ed the original interrupt and + * won't see this one until it exits for some other + * reason. + */ + if (vcpu) + kvm_vcpu_kick(vcpu); return false; } diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 9d9e014765a2..859f65c6e056 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -84,37 +84,11 @@ int vgic_v3_probe(const struct gic_kvm_info *info); int vgic_v3_map_resources(struct kvm *kvm); int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); -#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS int vgic_register_its_iodevs(struct kvm *kvm); bool vgic_has_its(struct kvm *kvm); int kvm_vgic_register_its_device(void); void vgic_enable_lpis(struct kvm_vcpu *vcpu); int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi); -#else -static inline int vgic_register_its_iodevs(struct kvm *kvm) -{ - return -ENODEV; -} - -static inline bool vgic_has_its(struct kvm *kvm) -{ - return false; -} - -static inline int kvm_vgic_register_its_device(void) -{ - return -ENODEV; -} - -static inline void vgic_enable_lpis(struct kvm_vcpu *vcpu) -{ -} - -static inline int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) -{ - return -ENODEV; -} -#endif int kvm_register_vgic_device(unsigned long type); int vgic_lazy_init(struct kvm *kvm); |