diff options
Diffstat (limited to 'arch/riscv/kvm')
-rw-r--r-- | arch/riscv/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/aia_device.c | 38 | ||||
-rw-r--r-- | arch/riscv/kvm/aia_imsic.c | 1 | ||||
-rw-r--r-- | arch/riscv/kvm/main.c | 4 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 62 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_onereg.c | 6 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_pmu.c | 1 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi.c | 32 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi_hsm.c | 24 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi_replace.c | 23 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi_system.c | 13 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_timer.c | 7 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_vector.c | 13 | ||||
-rw-r--r-- | arch/riscv/kvm/vm.c | 13 |
14 files changed, 127 insertions, 112 deletions
diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index 0c3cbb0915ff..704c2899197e 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -18,7 +18,7 @@ menuconfig VIRTUALIZATION if VIRTUALIZATION config KVM - tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)" + tristate "Kernel-based Virtual Machine (KVM) support" depends on RISCV_SBI && MMU select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQ_ROUTING diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c index 39cd26af5a69..806c41931cde 100644 --- a/arch/riscv/kvm/aia_device.c +++ b/arch/riscv/kvm/aia_device.c @@ -12,36 +12,6 @@ #include <linux/kvm_host.h> #include <linux/uaccess.h> -static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) -{ - struct kvm_vcpu *tmp_vcpu; - - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { - tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); - mutex_unlock(&tmp_vcpu->mutex); - } -} - -static void unlock_all_vcpus(struct kvm *kvm) -{ - unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); -} - -static bool lock_all_vcpus(struct kvm *kvm) -{ - struct kvm_vcpu *tmp_vcpu; - unsigned long c; - - kvm_for_each_vcpu(c, tmp_vcpu, kvm) { - if (!mutex_trylock(&tmp_vcpu->mutex)) { - unlock_vcpus(kvm, c - 1); - return false; - } - } - - return true; -} - static int aia_create(struct kvm_device *dev, u32 type) { int ret; @@ -53,7 +23,7 @@ static int aia_create(struct kvm_device *dev, u32 type) return -EEXIST; ret = -EBUSY; - if (!lock_all_vcpus(kvm)) + if (kvm_trylock_all_vcpus(kvm)) return ret; kvm_for_each_vcpu(i, vcpu, kvm) { @@ -65,7 +35,7 @@ static int aia_create(struct kvm_device *dev, u32 type) kvm->arch.aia.in_kernel = true; out_unlock: - unlock_all_vcpus(kvm); + kvm_unlock_all_vcpus(kvm); return ret; } @@ -526,12 +496,10 @@ int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu) void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu) { struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; - struct kvm_vcpu_aia_csr *reset_csr = - &vcpu->arch.aia_context.guest_reset_csr; if (!kvm_riscv_aia_available()) return; - memcpy(csr, reset_csr, sizeof(*csr)); + memset(csr, 0, sizeof(*csr)); /* Proceed only if AIA was initialized successfully */ if (!kvm_riscv_aia_initialized(vcpu->kvm)) diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index a8085cd8215e..29ef9c2133a9 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu, if (imsic->vsfile_cpu >= 0) { writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE); - kvm_vcpu_kick(vcpu); } else { eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)]; set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip); diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 1fa8be5ee509..4b24705dc63a 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -172,8 +172,8 @@ module_init(riscv_kvm_init); static void __exit riscv_kvm_exit(void) { - kvm_riscv_teardown(); - kvm_exit(); + + kvm_riscv_teardown(); } module_exit(riscv_kvm_exit); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 60d684c76c58..e0a01af426ff 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -51,12 +51,33 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; -static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) +static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu, + bool kvm_sbi_reset) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; + void *vector_datap = cntx->vector.datap; + + memset(cntx, 0, sizeof(*cntx)); + memset(csr, 0, sizeof(*csr)); + memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); + + /* Restore datap as it's not a part of the guest context. */ + cntx->vector.datap = vector_datap; + + if (kvm_sbi_reset) + kvm_riscv_vcpu_sbi_load_reset_state(vcpu); + + /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ + cntx->sstatus = SR_SPP | SR_SPIE; + + cntx->hstatus |= HSTATUS_VTW; + cntx->hstatus |= HSTATUS_SPVP; + cntx->hstatus |= HSTATUS_SPV; +} + +static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset) +{ bool loaded; /** @@ -71,11 +92,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.last_exit_cpu = -1; - memcpy(csr, reset_csr, sizeof(*csr)); - - spin_lock(&vcpu->arch.reset_cntx_lock); - memcpy(cntx, reset_cntx, sizeof(*cntx)); - spin_unlock(&vcpu->arch.reset_cntx_lock); + kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset); kvm_riscv_vcpu_fp_reset(vcpu); @@ -110,8 +127,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int rc; - struct kvm_cpu_context *cntx; - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; spin_lock_init(&vcpu->arch.mp_state_lock); @@ -131,24 +146,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Setup VCPU hfence queue */ spin_lock_init(&vcpu->arch.hfence_lock); - /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ - spin_lock_init(&vcpu->arch.reset_cntx_lock); + spin_lock_init(&vcpu->arch.reset_state.lock); - spin_lock(&vcpu->arch.reset_cntx_lock); - cntx = &vcpu->arch.guest_reset_context; - cntx->sstatus = SR_SPP | SR_SPIE; - cntx->hstatus = 0; - cntx->hstatus |= HSTATUS_VTW; - cntx->hstatus |= HSTATUS_SPVP; - cntx->hstatus |= HSTATUS_SPV; - spin_unlock(&vcpu->arch.reset_cntx_lock); - - if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx)) + if (kvm_riscv_vcpu_alloc_vector_context(vcpu)) return -ENOMEM; - /* By default, make CY, TM, and IR counters accessible in VU mode */ - reset_csr->scounteren = 0x7; - /* Setup VCPU timer */ kvm_riscv_vcpu_timer_init(vcpu); @@ -167,7 +169,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_sbi_init(vcpu); /* Reset VCPU */ - kvm_riscv_reset_vcpu(vcpu); + kvm_riscv_reset_vcpu(vcpu, false); return 0; } @@ -516,6 +518,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, case KVM_MP_STATE_STOPPED: __kvm_riscv_vcpu_power_off(vcpu); break; + case KVM_MP_STATE_INIT_RECEIVED: + if (vcpu->kvm->arch.mp_state_reset) + kvm_riscv_reset_vcpu(vcpu, false); + else + ret = -EINVAL; + break; default: ret = -EINVAL; } @@ -704,7 +712,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) - kvm_riscv_reset_vcpu(vcpu); + kvm_riscv_reset_vcpu(vcpu, true); if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) kvm_riscv_gstage_update_hgatp(vcpu); diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index f6d27b59c641..2e1b646f0d61 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -47,8 +47,10 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(SVNAPOT), KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(SVVPTC), + KVM_ISA_EXT_ARR(ZAAMO), KVM_ISA_EXT_ARR(ZABHA), KVM_ISA_EXT_ARR(ZACAS), + KVM_ISA_EXT_ARR(ZALRSC), KVM_ISA_EXT_ARR(ZAWRS), KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), @@ -149,8 +151,10 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_RISCV_ISA_EXT_SVVPTC: + case KVM_RISCV_ISA_EXT_ZAAMO: case KVM_RISCV_ISA_EXT_ZABHA: case KVM_RISCV_ISA_EXT_ZACAS: + case KVM_RISCV_ISA_EXT_ZALRSC: case KVM_RISCV_ISA_EXT_ZAWRS: case KVM_RISCV_ISA_EXT_ZBA: case KVM_RISCV_ISA_EXT_ZBB: @@ -203,7 +207,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_SVADE: /* * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero. - * Svade is not allowed to disable when the platform use Svade. + * Svade can't be disabled unless we support Svadu. */ return arch_has_hw_pte_young(); default: diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 2707a51b082c..78ac3216a54d 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba .type = etype, .size = sizeof(struct perf_event_attr), .pinned = true, + .disabled = true, /* * It should never reach here if the platform doesn't support the sscofpmf * extension as mode filtering won't work without it. diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index d1c83a77735e..6e09b518a5d1 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -143,9 +143,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, struct kvm_vcpu *tmp; kvm_for_each_vcpu(i, tmp, vcpu->kvm) { - spin_lock(&vcpu->arch.mp_state_lock); + spin_lock(&tmp->arch.mp_state_lock); WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); - spin_unlock(&vcpu->arch.mp_state_lock); + spin_unlock(&tmp->arch.mp_state_lock); } kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); @@ -156,6 +156,34 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, run->exit_reason = KVM_EXIT_SYSTEM_EVENT; } +void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu, + unsigned long pc, unsigned long a1) +{ + spin_lock(&vcpu->arch.reset_state.lock); + vcpu->arch.reset_state.pc = pc; + vcpu->arch.reset_state.a1 = a1; + spin_unlock(&vcpu->arch.reset_state.lock); + + kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); +} + +void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state; + + cntx->a0 = vcpu->vcpu_id; + + spin_lock(&vcpu->arch.reset_state.lock); + cntx->sepc = reset_state->pc; + cntx->a1 = reset_state->a1; + spin_unlock(&vcpu->arch.reset_state.lock); + + cntx->sstatus &= ~SR_SIE; + csr->vsatp = 0; +} + int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c index dce667f4b6ab..f26207f84bab 100644 --- a/arch/riscv/kvm/vcpu_sbi_hsm.c +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -9,12 +9,12 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> +#include <linux/wordpart.h> #include <asm/sbi.h> #include <asm/kvm_vcpu_sbi.h> static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *reset_cntx; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; struct kvm_vcpu *target_vcpu; unsigned long target_vcpuid = cp->a0; @@ -31,17 +31,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu) goto out; } - spin_lock(&target_vcpu->arch.reset_cntx_lock); - reset_cntx = &target_vcpu->arch.guest_reset_context; - /* start address */ - reset_cntx->sepc = cp->a1; - /* target vcpu id to start */ - reset_cntx->a0 = target_vcpuid; - /* private data passed from kernel */ - reset_cntx->a1 = cp->a2; - spin_unlock(&target_vcpu->arch.reset_cntx_lock); - - kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu); + kvm_riscv_vcpu_sbi_request_reset(target_vcpu, cp->a1, cp->a2); __kvm_riscv_vcpu_power_on(target_vcpu); @@ -79,12 +69,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu) target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); if (!target_vcpu) return SBI_ERR_INVALID_PARAM; - if (!kvm_riscv_vcpu_stopped(target_vcpu)) - return SBI_HSM_STATE_STARTED; - else if (vcpu->stat.generic.blocking) + if (kvm_riscv_vcpu_stopped(target_vcpu)) + return SBI_HSM_STATE_STOPPED; + else if (target_vcpu->stat.generic.blocking) return SBI_HSM_STATE_SUSPENDED; else - return SBI_HSM_STATE_STOPPED; + return SBI_HSM_STATE_STARTED; } static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, @@ -109,7 +99,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, } return 0; case SBI_EXT_HSM_HART_SUSPEND: - switch (cp->a0) { + switch (lower_32_bits(cp->a0)) { case SBI_HSM_SUSPEND_RET_DEFAULT: kvm_riscv_vcpu_wfi(vcpu); break; diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 9c2ab3dfa93a..b17fad091bab 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, u64 next_cycle; if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_context *cp = &vcpu->arch.guest_context; unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; + unsigned long hart_bit = 0, sentmask = 0; if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, if (hbase != -1UL) { if (tmp->vcpu_id < hbase) continue; - if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) + hart_bit = tmp->vcpu_id - hbase; + if (hart_bit >= __riscv_xlen) + goto done; + if (!(hmask & (1UL << hart_bit))) continue; } ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); if (ret < 0) break; + sentmask |= 1UL << hart_bit; kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD); } +done: + if (hbase != -1UL && (hmask ^ sentmask)) + retdata->err_val = SBI_ERR_INVALID_PARAM; + return ret; } @@ -94,7 +103,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: - if (cp->a2 == 0 && cp->a3 == 0) + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask); else kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, @@ -102,7 +111,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: - if (cp->a2 == 0 && cp->a3 == 0) + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask, cp->a4); else @@ -118,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: /* * Until nested virtualization is implemented, the - * SBI HFENCE calls should be treated as NOPs + * SBI HFENCE calls should return not supported + * hence fallthrough. */ - break; default: retdata->err_val = SBI_ERR_NOT_SUPPORTED; } diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c index 5d55e08791fa..359be90b0fc5 100644 --- a/arch/riscv/kvm/vcpu_sbi_system.c +++ b/arch/riscv/kvm/vcpu_sbi_system.c @@ -4,6 +4,7 @@ */ #include <linux/kvm_host.h> +#include <linux/wordpart.h> #include <asm/kvm_vcpu_sbi.h> #include <asm/sbi.h> @@ -12,14 +13,13 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_vcpu_sbi_return *retdata) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; - struct kvm_cpu_context *reset_cntx; unsigned long funcid = cp->a6; unsigned long hva, i; struct kvm_vcpu *tmp; switch (funcid) { case SBI_EXT_SUSP_SYSTEM_SUSPEND: - if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) { + if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) { retdata->err_val = SBI_ERR_INVALID_PARAM; return 0; } @@ -44,14 +44,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, } } - spin_lock(&vcpu->arch.reset_cntx_lock); - reset_cntx = &vcpu->arch.guest_reset_context; - reset_cntx->sepc = cp->a1; - reset_cntx->a0 = vcpu->vcpu_id; - reset_cntx->a1 = cp->a2; - spin_unlock(&vcpu->arch.reset_cntx_lock); - - kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); + kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2); /* userspace provides the suspend implementation */ kvm_riscv_vcpu_sbi_forward(vcpu, run); diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 96e7a4e463f7..ff672fa71fcc 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -248,18 +248,19 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) if (t->init_done) return -EINVAL; - hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); t->init_done = true; t->next_set = false; /* Enable sstc for every vcpu if available in hardware */ if (riscv_isa_extension_available(NULL, SSTC)) { t->sstc_enabled = true; - t->hrt.function = kvm_riscv_vcpu_vstimer_expired; + hrtimer_setup(&t->hrt, kvm_riscv_vcpu_vstimer_expired, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; } else { t->sstc_enabled = false; - t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; + hrtimer_setup(&t->hrt, kvm_riscv_vcpu_hrtimer_expired, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; } diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c index d92d1348045c..a5f88cb717f3 100644 --- a/arch/riscv/kvm/vcpu_vector.c +++ b/arch/riscv/kvm/vcpu_vector.c @@ -22,6 +22,9 @@ void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; cntx->sstatus &= ~SR_VS; + + cntx->vector.vlenb = riscv_v_vsize / 32; + if (riscv_isa_extension_available(isa, v)) { cntx->sstatus |= SR_VS_INITIAL; WARN_ON(!cntx->vector.datap); @@ -70,13 +73,11 @@ void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) __kvm_riscv_vector_restore(cntx); } -int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *cntx) +int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu) { - cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); - if (!cntx->vector.datap) + vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); + if (!vcpu->arch.guest_context.vector.datap) return -ENOMEM; - cntx->vector.vlenb = riscv_v_vsize / 32; vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); if (!vcpu->arch.host_context.vector.datap) @@ -87,7 +88,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) { - kfree(vcpu->arch.guest_reset_context.vector.datap); + kfree(vcpu->arch.guest_context.vector.datap); kfree(vcpu->arch.host_context.vector.datap); } #endif diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 7396b8654f45..b27ec8f96697 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -209,6 +209,19 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + switch (cap->cap) { + case KVM_CAP_RISCV_MP_STATE_RESET: + if (cap->flags) + return -EINVAL; + kvm->arch.mp_state_reset = true; + return 0; + default: + return -EINVAL; + } +} + int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; |