From 13acfec2dbccfafff3331a3810cd7dde2fb16891 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 9 May 2022 10:44:05 +0530 Subject: RISC-V: KVM: Add remote HFENCE functions based on VCPU requests The generic KVM has support for VCPU requests which can be used to do arch-specific work in the run-loop. We introduce remote HFENCE functions which will internally use VCPU requests instead of host SBI calls. Advantages of doing remote HFENCEs as VCPU requests are: 1) Multiple VCPUs of a Guest may be running on different Host CPUs so it is not always possible to determine the Host CPU mask for doing Host SBI call. For example, when VCPU X wants to do HFENCE on VCPU Y, it is possible that VCPU Y is blocked or in user-space (i.e. vcpu->cpu < 0). 2) To support nested virtualization, we will be having a separate shadow G-stage for each VCPU and a common host G-stage for the entire Guest/VM. The VCPU requests based remote HFENCEs helps us easily synchronize the common host G-stage and shadow G-stage of each VCPU without any additional IPI calls. This is also a preparatory patch for upcoming nested virtualization support where we will be having a shadow G-stage page table for each Guest VCPU. Signed-off-by: Anup Patel Reviewed-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/kvm/tlb.c | 227 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 226 insertions(+), 1 deletion(-) (limited to 'arch/riscv/kvm/tlb.c') diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index e2d4fd610745..c0f86d09c41d 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -3,11 +3,14 @@ * Copyright (c) 2022 Ventana Micro Systems Inc. */ -#include +#include +#include #include #include #include +#include #include +#include #include /* @@ -211,3 +214,225 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid) csr_write(CSR_HGATP, hgatp); } + +void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu) +{ + local_flush_icache_all(); +} + +void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu) +{ + struct kvm_vmid *vmid; + + vmid = &vcpu->kvm->arch.vmid; + kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid)); +} + +void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu) +{ + struct kvm_vmid *vmid; + + vmid = &vcpu->kvm->arch.vmid; + kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid)); +} + +static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu, + struct kvm_riscv_hfence *out_data) +{ + bool ret = false; + struct kvm_vcpu_arch *varch = &vcpu->arch; + + spin_lock(&varch->hfence_lock); + + if (varch->hfence_queue[varch->hfence_head].type) { + memcpy(out_data, &varch->hfence_queue[varch->hfence_head], + sizeof(*out_data)); + varch->hfence_queue[varch->hfence_head].type = 0; + + varch->hfence_head++; + if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE) + varch->hfence_head = 0; + + ret = true; + } + + spin_unlock(&varch->hfence_lock); + + return ret; +} + +static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu, + const struct kvm_riscv_hfence *data) +{ + bool ret = false; + struct kvm_vcpu_arch *varch = &vcpu->arch; + + spin_lock(&varch->hfence_lock); + + if (!varch->hfence_queue[varch->hfence_tail].type) { + memcpy(&varch->hfence_queue[varch->hfence_tail], + data, sizeof(*data)); + + varch->hfence_tail++; + if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE) + varch->hfence_tail = 0; + + ret = true; + } + + spin_unlock(&varch->hfence_lock); + + return ret; +} + +void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu) +{ + struct kvm_riscv_hfence d = { 0 }; + struct kvm_vmid *v = &vcpu->kvm->arch.vmid; + + while (vcpu_hfence_dequeue(vcpu, &d)) { + switch (d.type) { + case KVM_RISCV_HFENCE_UNKNOWN: + break; + case KVM_RISCV_HFENCE_GVMA_VMID_GPA: + kvm_riscv_local_hfence_gvma_vmid_gpa( + READ_ONCE(v->vmid), + d.addr, d.size, d.order); + break; + case KVM_RISCV_HFENCE_VVMA_ASID_GVA: + kvm_riscv_local_hfence_vvma_asid_gva( + READ_ONCE(v->vmid), d.asid, + d.addr, d.size, d.order); + break; + case KVM_RISCV_HFENCE_VVMA_ASID_ALL: + kvm_riscv_local_hfence_vvma_asid_all( + READ_ONCE(v->vmid), d.asid); + break; + case KVM_RISCV_HFENCE_VVMA_GVA: + kvm_riscv_local_hfence_vvma_gva( + READ_ONCE(v->vmid), + d.addr, d.size, d.order); + break; + default: + break; + } + } +} + +static void make_xfence_request(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned int req, unsigned int fallback_req, + const struct kvm_riscv_hfence *data) +{ + unsigned long i; + struct kvm_vcpu *vcpu; + unsigned int actual_req = req; + DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); + + bitmap_clear(vcpu_mask, 0, KVM_MAX_VCPUS); + kvm_for_each_vcpu(i, vcpu, kvm) { + if (hbase != -1UL) { + if (vcpu->vcpu_id < hbase) + continue; + if (!(hmask & (1UL << (vcpu->vcpu_id - hbase)))) + continue; + } + + bitmap_set(vcpu_mask, i, 1); + + if (!data || !data->type) + continue; + + /* + * Enqueue hfence data to VCPU hfence queue. If we don't + * have space in the VCPU hfence queue then fallback to + * a more conservative hfence request. + */ + if (!vcpu_hfence_enqueue(vcpu, data)) + actual_req = fallback_req; + } + + kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask); +} + +void kvm_riscv_fence_i(struct kvm *kvm, + unsigned long hbase, unsigned long hmask) +{ + make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I, + KVM_REQ_FENCE_I, NULL); +} + +void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + gpa_t gpa, gpa_t gpsz, + unsigned long order) +{ + struct kvm_riscv_hfence data; + + data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA; + data.asid = 0; + data.addr = gpa; + data.size = gpsz; + data.order = order; + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, + KVM_REQ_HFENCE_GVMA_VMID_ALL, &data); +} + +void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask) +{ + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL, + KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL); +} + +void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order, unsigned long asid) +{ + struct kvm_riscv_hfence data; + + data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA; + data.asid = asid; + data.addr = gva; + data.size = gvsz; + data.order = order; + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, + KVM_REQ_HFENCE_VVMA_ALL, &data); +} + +void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long asid) +{ + struct kvm_riscv_hfence data; + + data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL; + data.asid = asid; + data.addr = data.size = data.order = 0; + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, + KVM_REQ_HFENCE_VVMA_ALL, &data); +} + +void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order) +{ + struct kvm_riscv_hfence data; + + data.type = KVM_RISCV_HFENCE_VVMA_GVA; + data.asid = 0; + data.addr = gva; + data.size = gvsz; + data.order = order; + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, + KVM_REQ_HFENCE_VVMA_ALL, &data); +} + +void kvm_riscv_hfence_vvma_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask) +{ + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL, + KVM_REQ_HFENCE_VVMA_ALL, NULL); +} -- cgit v1.2.3