diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-04-30 15:05:54 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-04-30 15:05:54 +0300 |
commit | 720d690e3680d8ae5fcf86f8614c1a10b4ee7605 (patch) | |
tree | 02158973d51022d7374f5b6e2f1953b530127f43 /virt/kvm/arm | |
parent | 72b6500c1cc68b949faa1bac76359dc601b482d6 (diff) | |
parent | 6da6c0db5316275015e8cc2959f12a17584aeb64 (diff) | |
download | linux-720d690e3680d8ae5fcf86f8614c1a10b4ee7605.tar.xz |
Merge 4.17-rc3 into char-misc-next
We want the fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r-- | virt/kvm/arm/arm.c | 15 | ||||
-rw-r--r-- | virt/kvm/arm/psci.c | 60 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v2.c | 5 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 22 |
4 files changed, 93 insertions, 9 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index dba629c5f8ac..a4c1b76240df 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static u32 kvm_next_vmid; static unsigned int kvm_vmid_bits __read_mostly; -static DEFINE_SPINLOCK(kvm_vmid_lock); +static DEFINE_RWLOCK(kvm_vmid_lock); static bool vgic_present; @@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm) { phys_addr_t pgd_phys; u64 vmid; + bool new_gen; - if (!need_new_vmid_gen(kvm)) + read_lock(&kvm_vmid_lock); + new_gen = need_new_vmid_gen(kvm); + read_unlock(&kvm_vmid_lock); + + if (!new_gen) return; - spin_lock(&kvm_vmid_lock); + write_lock(&kvm_vmid_lock); /* * We need to re-check the vmid_gen here to ensure that if another vcpu @@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm) * use the same vmid. */ if (!need_new_vmid_gen(kvm)) { - spin_unlock(&kvm_vmid_lock); + write_unlock(&kvm_vmid_lock); return; } @@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm) vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; - spin_unlock(&kvm_vmid_lock); + write_unlock(&kvm_vmid_lock); } static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 6919352cbf15..c4762bef13c6 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -18,6 +18,7 @@ #include <linux/arm-smccc.h> #include <linux/preempt.h> #include <linux/kvm_host.h> +#include <linux/uaccess.h> #include <linux/wait.h> #include <asm/cputype.h> @@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) smccc_set_retval(vcpu, val, 0, 0, 0); return 1; } + +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) +{ + return 1; /* PSCI version */ +} + +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices)) + return -EFAULT; + + return 0; +} + +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + if (reg->id == KVM_REG_ARM_PSCI_VERSION) { + void __user *uaddr = (void __user *)(long)reg->addr; + u64 val; + + val = kvm_psci_version(vcpu, vcpu->kvm); + if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; + } + + return -EINVAL; +} + +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + if (reg->id == KVM_REG_ARM_PSCI_VERSION) { + void __user *uaddr = (void __user *)(long)reg->addr; + bool wants_02; + u64 val; + + if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); + + switch (val) { + case KVM_ARM_PSCI_0_1: + if (wants_02) + return -EINVAL; + vcpu->kvm->arch.psci_version = val; + return 0; + case KVM_ARM_PSCI_0_2: + case KVM_ARM_PSCI_1_0: + if (!wants_02) + return -EINVAL; + vcpu->kvm->arch.psci_version = val; + return 0; + } + } + + return -EINVAL; +} diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index e21e2f49b005..ffc587bf4742 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -14,6 +14,8 @@ #include <linux/irqchip/arm-gic.h> #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <linux/nospec.h> + #include <kvm/iodev.h> #include <kvm/arm_vgic.h> @@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu, if (n > vgic_v3_max_apr_idx(vcpu)) return 0; + + n = array_index_nospec(n, 4); + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ return vgicv3->vgic_ap1r[n]; } diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index e74baec76361..702936cbe173 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -14,11 +14,13 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/list_sort.h> -#include <linux/interrupt.h> -#include <linux/irq.h> +#include <linux/nospec.h> + #include <asm/kvm_hyp.h> #include "vgic.h" @@ -101,12 +103,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid) { /* SGIs and PPIs */ - if (intid <= VGIC_MAX_PRIVATE) + if (intid <= VGIC_MAX_PRIVATE) { + intid = array_index_nospec(intid, VGIC_MAX_PRIVATE); return &vcpu->arch.vgic_cpu.private_irqs[intid]; + } /* SPIs */ - if (intid <= VGIC_MAX_SPI) + if (intid <= VGIC_MAX_SPI) { + intid = array_index_nospec(intid, VGIC_MAX_SPI); return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; + } /* LPIs */ if (intid >= VGIC_MIN_LPI) @@ -594,6 +600,7 @@ retry: list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; + bool target_vcpu_needs_kick = false; spin_lock(&irq->irq_lock); @@ -664,11 +671,18 @@ retry: list_del(&irq->ap_list); irq->vcpu = target_vcpu; list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); + target_vcpu_needs_kick = true; } spin_unlock(&irq->irq_lock); spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); + + if (target_vcpu_needs_kick) { + kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); + kvm_vcpu_kick(target_vcpu); + } + goto retry; } |