diff options
Diffstat (limited to 'arch/arm64/kvm/arm.c')
-rw-r--r-- | arch/arm64/kvm/arm.c | 104 |
1 files changed, 63 insertions, 41 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 73e12869afe3..acf9a993dfb6 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -106,22 +106,15 @@ static int kvm_arm_default_max_vcpus(void) */ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { - int ret, cpu; + int ret; ret = kvm_arm_setup_stage2(kvm, type); if (ret) return ret; - kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); - if (!kvm->arch.last_vcpu_ran) - return -ENOMEM; - - for_each_possible_cpu(cpu) - *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; - - ret = kvm_alloc_stage2_pgd(kvm); + ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); if (ret) - goto out_fail_alloc; + return ret; ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); if (ret) @@ -129,18 +122,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_vgic_early_init(kvm); - /* Mark the initial VMID generation invalid */ - kvm->arch.vmid.vmid_gen = 0; - /* The maximum number of VCPUs is limited by the host's GIC model */ kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); return ret; out_free_stage2_pgd: - kvm_free_stage2_pgd(kvm); -out_fail_alloc: - free_percpu(kvm->arch.last_vcpu_ran); - kvm->arch.last_vcpu_ran = NULL; + kvm_free_stage2_pgd(&kvm->arch.mmu); return ret; } @@ -160,9 +147,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_vgic_destroy(kvm); - free_percpu(kvm->arch.last_vcpu_ran); - kvm->arch.last_vcpu_ran = NULL; - for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { kvm_vcpu_destroy(kvm->vcpus[i]); @@ -222,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) */ r = 1; break; + case KVM_CAP_STEAL_TIME: + r = kvm_arm_pvtime_supported(); + break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; @@ -281,6 +268,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_arm_pvtime_vcpu_init(&vcpu->arch); + vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; + err = kvm_vgic_vcpu_init(vcpu); if (err) return err; @@ -336,16 +325,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { + struct kvm_s2_mmu *mmu; int *last_ran; - last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); + mmu = vcpu->arch.hw_mmu; + last_ran = this_cpu_ptr(mmu->last_vcpu_ran); /* * We might get preempted before the vCPU actually runs, but * over-invalidation doesn't affect correctness. */ if (*last_ran != vcpu->vcpu_id) { - kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); + kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu); *last_ran = vcpu->vcpu_id; } @@ -353,7 +344,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); - kvm_vcpu_load_sysregs(vcpu); + if (has_vhe()) + kvm_vcpu_load_sysregs_vhe(vcpu); kvm_arch_vcpu_load_fp(vcpu); kvm_vcpu_pmu_restore_guest(vcpu); if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) @@ -371,7 +363,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_put_fp(vcpu); - kvm_vcpu_put_sysregs(vcpu); + if (has_vhe()) + kvm_vcpu_put_sysregs_vhe(vcpu); kvm_timer_vcpu_put(vcpu); kvm_vgic_put(vcpu); kvm_vcpu_pmu_restore_host(vcpu); @@ -468,7 +461,6 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid) /** * update_vmid - Update the vmid with a valid VMID for the current generation - * @kvm: The guest that struct vmid belongs to * @vmid: The stage-2 VMID information struct */ static void update_vmid(struct kvm_vmid *vmid) @@ -680,7 +672,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ cond_resched(); - update_vmid(&vcpu->kvm->arch.vmid); + update_vmid(&vcpu->arch.hw_mmu->vmid); check_vcpu_requests(vcpu); @@ -729,13 +721,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ smp_store_mb(vcpu->mode, IN_GUEST_MODE); - if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) || + if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) || kvm_request_pending(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; isb(); /* Ensure work in x_flush_hwstate is committed */ kvm_pmu_sync_hwstate(vcpu); if (static_branch_unlikely(&userspace_irqchip_in_use)) - kvm_timer_sync_hwstate(vcpu); + kvm_timer_sync_user(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); preempt_enable(); @@ -750,11 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trace_kvm_entry(*vcpu_pc(vcpu)); guest_enter_irqoff(); - if (has_vhe()) { - ret = kvm_vcpu_run_vhe(vcpu); - } else { - ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu); - } + ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->stat.exits++; @@ -784,7 +772,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * timer virtual interrupt state. */ if (static_branch_unlikely(&userspace_irqchip_in_use)) - kvm_timer_sync_hwstate(vcpu); + kvm_timer_sync_user(vcpu); kvm_arch_vcpu_ctxsync_fp(vcpu); @@ -1271,6 +1259,40 @@ long kvm_arch_vm_ioctl(struct file *filp, } } +static int kvm_map_vectors(void) +{ + /* + * SV2 = ARM64_SPECTRE_V2 + * HEL2 = ARM64_HARDEN_EL2_VECTORS + * + * !SV2 + !HEL2 -> use direct vectors + * SV2 + !HEL2 -> use hardened vectors in place + * !SV2 + HEL2 -> allocate one vector slot and use exec mapping + * SV2 + HEL2 -> use hardened vectors and use exec mapping + */ + if (cpus_have_const_cap(ARM64_SPECTRE_V2)) { + __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); + __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); + } + + if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { + phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); + unsigned long size = __BP_HARDEN_HYP_VECS_SZ; + + /* + * Always allocate a spare vector slot, as we don't + * know yet which CPUs have a BP hardening slot that + * we can reuse. + */ + __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); + BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); + return create_hyp_exec_mappings(vect_pa, size, + &__kvm_bp_vect_base); + } + + return 0; +} + static void cpu_init_hyp_mode(void) { phys_addr_t pgd_ptr; @@ -1287,7 +1309,7 @@ static void cpu_init_hyp_mode(void) * so that we can use adr_l to access per-cpu variables in EL2. */ tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) - - (unsigned long)kvm_ksym_ref(kvm_host_data)); + (unsigned long)kvm_ksym_ref(&kvm_host_data)); pgd_ptr = kvm_mmu_get_httbr(); hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; @@ -1307,8 +1329,8 @@ static void cpu_init_hyp_mode(void) * at EL2. */ if (this_cpu_has_cap(ARM64_SSBS) && - arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { - kvm_call_hyp(__kvm_enable_ssbs); + arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { + kvm_call_hyp_nvhe(__kvm_enable_ssbs); } } @@ -1564,10 +1586,6 @@ static int init_hyp_mode(void) } } - err = hyp_map_aux_data(); - if (err) - kvm_err("Cannot map host auxiliary data: %d\n", err); - return 0; out_err: @@ -1655,6 +1673,10 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)) + kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ + "Only trusted guests should be used on this system.\n"); + for_each_online_cpu(cpu) { smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); if (ret < 0) { |