summaryrefslogtreecommitdiff
path: root/arch/loongarch/kvm/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/kvm/exit.c')
-rw-r--r--arch/loongarch/kvm/exit.c137
1 files changed, 90 insertions, 47 deletions
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index 69f3e3782cc9..2ce41f93b2a4 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -156,7 +156,7 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- int ret;
+ int idx, ret;
unsigned long *val;
u32 addr, rd, rj, opcode;
@@ -167,7 +167,6 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
rj = inst.reg2_format.rj;
opcode = inst.reg2_format.opcode;
addr = vcpu->arch.gprs[rj];
- ret = EMULATE_DO_IOCSR;
run->iocsr_io.phys_addr = addr;
run->iocsr_io.is_write = 0;
val = &vcpu->arch.gprs[rd];
@@ -207,20 +206,28 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
}
if (run->iocsr_io.is_write) {
- if (!kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (ret == 0)
ret = EMULATE_DONE;
- else
+ else {
+ ret = EMULATE_DO_IOCSR;
/* Save data and let user space to write it */
memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
-
+ }
trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
} else {
- if (!kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (ret == 0)
ret = EMULATE_DONE;
- else
+ else {
+ ret = EMULATE_DO_IOCSR;
/* Save register id for iocsr read completion */
vcpu->arch.io_gpr = rd;
-
+ }
trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
}
@@ -282,9 +289,11 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
+ trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
+ trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
er = kvm_handle_csr(vcpu, inst);
break;
case 0x6: /* Cache, Idle and IOCSR GSPR */
@@ -334,7 +343,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
* 2) Execute CACOP/IDLE instructions;
* 3) Access to unimplemented CSRs/IOCSRs.
*/
-static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
+static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
{
int ret = RESUME_GUEST;
enum emulation_result er = EMULATE_DONE;
@@ -359,7 +368,7 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
{
- int ret;
+ int idx, ret;
unsigned int op8, opcode, rd;
struct kvm_run *run = vcpu->run;
@@ -464,8 +473,10 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
* it need not return to user space to handle the mmio
* exception.
*/
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
run->mmio.len, &vcpu->arch.gprs[rd]);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret) {
update_pc(&vcpu->arch);
vcpu->mmio_needed = 0;
@@ -531,7 +542,7 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
{
- int ret;
+ int idx, ret;
unsigned int rd, op8, opcode;
unsigned long curr_pc, rd_val = 0;
struct kvm_run *run = vcpu->run;
@@ -631,7 +642,9 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
* it need not return to user space to handle the mmio
* exception.
*/
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret)
return EMULATE_DONE;
@@ -650,7 +663,7 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
return ret;
}
-static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
+static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode)
{
int ret;
larch_inst inst;
@@ -658,7 +671,13 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
struct kvm_run *run = vcpu->run;
unsigned long badv = vcpu->arch.badv;
- ret = kvm_handle_mm_fault(vcpu, badv, write);
+ /* Inject ADE exception if exceed max GPA size */
+ if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
+ kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
+ return RESUME_GUEST;
+ }
+
+ ret = kvm_handle_mm_fault(vcpu, badv, write, ecode);
if (ret) {
/* Treat as MMIO */
inst.word = vcpu->arch.badi;
@@ -688,24 +707,33 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
return ret;
}
-static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode)
+{
+ return kvm_handle_rdwr_fault(vcpu, false, ecode);
+}
+
+static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, false);
+ return kvm_handle_rdwr_fault(vcpu, true, ecode);
}
-static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
+int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- return kvm_handle_rdwr_fault(vcpu, true);
+ update_pc(&vcpu->arch);
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
+
+ return 0;
}
/**
* kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use fpu which hasn't been allowed
* by the root context.
*/
-static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
{
struct kvm_run *run = vcpu->run;
@@ -758,11 +786,12 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LSX when it is disabled in the root
* context.
*/
-static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lsx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -773,11 +802,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LASX when it is disabled in the root
* context.
*/
-static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lasx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -785,7 +815,7 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lbt(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -793,32 +823,25 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
+static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
{
- unsigned int min, cpu, i;
- unsigned long ipi_bitmap;
+ unsigned int min, cpu;
struct kvm_vcpu *dest;
+ DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = {
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A1),
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A2)
+ };
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
- for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
- ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
- if (!ipi_bitmap)
+ for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
+ dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
+ if (!dest)
continue;
- cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
- while (cpu < BITS_PER_LONG) {
- dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
- cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
- if (!dest)
- continue;
-
- /* Send SWI0 to dest vcpu to emulate IPI interrupt */
- kvm_queue_irq(dest, INT_SWI0);
- kvm_vcpu_kick(dest);
- }
+ /* Send SWI0 to dest vcpu to emulate IPI interrupt */
+ kvm_queue_irq(dest, INT_SWI0);
+ kvm_vcpu_kick(dest);
}
-
- return 0;
}
/*
@@ -847,7 +870,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
-static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
+static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
{
int ret;
larch_inst inst;
@@ -862,6 +885,28 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
vcpu->stat.hypercall_exits++;
kvm_handle_service(vcpu);
break;
+ case KVM_HCALL_USER_SERVICE:
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
+ break;
+ }
+
+ vcpu->stat.hypercall_exits++;
+ vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+ vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
+ vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
+ vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+ vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+ vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
+ vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
+ vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
+ vcpu->run->hypercall.flags = 0;
+ /*
+ * Set invalid return value by default, let user-mode VMM modify it.
+ */
+ vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
+ ret = RESUME_HOST;
+ break;
case KVM_HCALL_SWDBG:
/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
@@ -885,16 +930,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
-static int kvm_fault_ni(struct kvm_vcpu *vcpu)
+static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode)
{
- unsigned int ecode, inst;
- unsigned long estat, badv;
+ unsigned int inst;
+ unsigned long badv;
/* Fetch the instruction */
inst = vcpu->arch.badi;
badv = vcpu->arch.badv;
- estat = vcpu->arch.host_estat;
- ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
kvm_arch_vcpu_dump_regs(vcpu);
@@ -919,5 +962,5 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
{
- return kvm_fault_tables[fault](vcpu);
+ return kvm_fault_tables[fault](vcpu, fault);
}