diff options
Diffstat (limited to 'arch/loongarch/kvm')
-rw-r--r-- | arch/loongarch/kvm/Kconfig | 6 | ||||
-rw-r--r-- | arch/loongarch/kvm/Makefile | 8 | ||||
-rw-r--r-- | arch/loongarch/kvm/exit.c | 172 | ||||
-rw-r--r-- | arch/loongarch/kvm/intc/eiointc.c | 1060 | ||||
-rw-r--r-- | arch/loongarch/kvm/intc/ipi.c | 479 | ||||
-rw-r--r-- | arch/loongarch/kvm/intc/pch_pic.c | 519 | ||||
-rw-r--r-- | arch/loongarch/kvm/irqfd.c | 89 | ||||
-rw-r--r-- | arch/loongarch/kvm/main.c | 55 | ||||
-rw-r--r-- | arch/loongarch/kvm/mmu.c | 55 | ||||
-rw-r--r-- | arch/loongarch/kvm/switch.S | 14 | ||||
-rw-r--r-- | arch/loongarch/kvm/vcpu.c | 68 | ||||
-rw-r--r-- | arch/loongarch/kvm/vm.c | 27 |
12 files changed, 2446 insertions, 106 deletions
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index 248744b4d086..40eea6da7c25 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -21,15 +21,19 @@ config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on AS_HAS_LVZ_EXTENSION select HAVE_KVM_DIRTY_RING_ACQ_REL + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_IRQCHIP + select HAVE_KVM_MSI + select HAVE_KVM_READONLY_MEM select HAVE_KVM_VCPU_ASYNC_IOCTL select KVM_COMMON select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_MMU_NOTIFIER select KVM_MMIO - select HAVE_KVM_READONLY_MEM select KVM_XFER_TO_GUEST_WORK select SCHED_INFO + select GUEST_PERF_EVENTS if PERF_EVENTS help Support hosting virtualized guest machines using hardware virtualization extensions. You will need diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index b2f4cbe01ae8..cb41d9265662 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -3,8 +3,6 @@ # Makefile for LoongArch KVM support # -ccflags-y += -I $(src) - include $(srctree)/virt/kvm/Makefile.kvm obj-$(CONFIG_KVM) += kvm.o @@ -18,5 +16,9 @@ kvm-y += timer.o kvm-y += tlb.o kvm-y += vcpu.o kvm-y += vm.o +kvm-y += intc/ipi.o +kvm-y += intc/eiointc.o +kvm-y += intc/pch_pic.o +kvm-y += irqfd.o -CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) +CFLAGS_exit.o += $(call cc-disable-warning, override-init) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 90894f70ff4a..fa52251b3bf1 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -156,8 +156,8 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) { - int ret; - unsigned long val; + int idx, ret; + unsigned long *val; u32 addr, rd, rj, opcode; /* @@ -167,9 +167,9 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) rj = inst.reg2_format.rj; opcode = inst.reg2_format.opcode; addr = vcpu->arch.gprs[rj]; - ret = EMULATE_DO_IOCSR; run->iocsr_io.phys_addr = addr; run->iocsr_io.is_write = 0; + val = &vcpu->arch.gprs[rd]; /* LoongArch is Little endian */ switch (opcode) { @@ -202,16 +202,33 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) run->iocsr_io.is_write = 1; break; default: - ret = EMULATE_FAIL; - break; + return EMULATE_FAIL; } - if (ret == EMULATE_DO_IOCSR) { - if (run->iocsr_io.is_write) { - val = vcpu->arch.gprs[rd]; - memcpy(run->iocsr_io.data, &val, run->iocsr_io.len); + if (run->iocsr_io.is_write) { + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + if (ret == 0) + ret = EMULATE_DONE; + else { + ret = EMULATE_DO_IOCSR; + /* Save data and let user space to write it */ + memcpy(run->iocsr_io.data, val, run->iocsr_io.len); } - vcpu->arch.io_gpr = rd; + trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val); + } else { + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + if (ret == 0) + ret = EMULATE_DONE; + else { + ret = EMULATE_DO_IOCSR; + /* Save register id for iocsr read completion */ + vcpu->arch.io_gpr = rd; + } + trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL); } return ret; @@ -324,7 +341,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) * 2) Execute CACOP/IDLE instructions; * 3) Access to unimplemented CSRs/IOCSRs. */ -static int kvm_handle_gspr(struct kvm_vcpu *vcpu) +static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode) { int ret = RESUME_GUEST; enum emulation_result er = EMULATE_DONE; @@ -349,7 +366,7 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu) int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) { - int ret; + int idx, ret; unsigned int op8, opcode, rd; struct kvm_run *run = vcpu->run; @@ -447,19 +464,35 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) } if (ret == EMULATE_DO_MMIO) { + trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL); + + /* + * If mmio device such as PCH-PIC is emulated in KVM, + * it need not return to user space to handle the mmio + * exception. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, + run->mmio.len, &vcpu->arch.gprs[rd]); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + if (!ret) { + update_pc(&vcpu->arch); + vcpu->mmio_needed = 0; + return EMULATE_DONE; + } + /* Set for kvm_complete_mmio_read() use */ vcpu->arch.io_gpr = rd; run->mmio.is_write = 0; vcpu->mmio_is_write = 0; - trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len, - run->mmio.phys_addr, NULL); - } else { - kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", - inst.word, vcpu->arch.pc, vcpu->arch.badv); - kvm_arch_vcpu_dump_regs(vcpu); - vcpu->mmio_needed = 0; + return EMULATE_DO_MMIO; } + kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->mmio_needed = 0; + return ret; } @@ -507,7 +540,7 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) { - int ret; + int idx, ret; unsigned int rd, op8, opcode; unsigned long curr_pc, rd_val = 0; struct kvm_run *run = vcpu->run; @@ -600,23 +633,35 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) } if (ret == EMULATE_DO_MMIO) { + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data); + + /* + * If mmio device such as PCH-PIC is emulated in KVM, + * it need not return to user space to handle the mmio + * exception. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + if (!ret) + return EMULATE_DONE; + run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; - trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, - run->mmio.phys_addr, data); - } else { - vcpu->arch.pc = curr_pc; - kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", - inst.word, vcpu->arch.pc, vcpu->arch.badv); - kvm_arch_vcpu_dump_regs(vcpu); - /* Rollback PC if emulation was unsuccessful */ + return EMULATE_DO_MMIO; } + vcpu->arch.pc = curr_pc; + kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + /* Rollback PC if emulation was unsuccessful */ + return ret; } -static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) +static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode) { int ret; larch_inst inst; @@ -624,7 +669,13 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) struct kvm_run *run = vcpu->run; unsigned long badv = vcpu->arch.badv; - ret = kvm_handle_mm_fault(vcpu, badv, write); + /* Inject ADE exception if exceed max GPA size */ + if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); + return RESUME_GUEST; + } + + ret = kvm_handle_mm_fault(vcpu, badv, write, ecode); if (ret) { /* Treat as MMIO */ inst.word = vcpu->arch.badi; @@ -654,24 +705,33 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) return ret; } -static int kvm_handle_read_fault(struct kvm_vcpu *vcpu) +static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode) { - return kvm_handle_rdwr_fault(vcpu, false); + return kvm_handle_rdwr_fault(vcpu, false, ecode); } -static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) +static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode) { - return kvm_handle_rdwr_fault(vcpu, true); + return kvm_handle_rdwr_fault(vcpu, true, ecode); +} + +int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + update_pc(&vcpu->arch); + kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret); + + return 0; } /** * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use fpu which hasn't been allowed * by the root context. */ -static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode) { struct kvm_run *run = vcpu->run; @@ -724,11 +784,12 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) /* * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use LSX when it is disabled in the root * context. */ -static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lsx(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -739,11 +800,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) /* * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use LASX when it is disabled in the root * context. */ -static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lasx(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -751,7 +813,7 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lbt(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -813,7 +875,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu) kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); } -static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode) { int ret; larch_inst inst; @@ -828,6 +890,28 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) vcpu->stat.hypercall_exits++; kvm_handle_service(vcpu); break; + case KVM_HCALL_USER_SERVICE: + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) { + kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE); + break; + } + + vcpu->stat.hypercall_exits++; + vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; + vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE; + vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); + vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1); + vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2); + vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3); + vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4); + vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5); + vcpu->run->hypercall.flags = 0; + /* + * Set invalid return value by default, let user-mode VMM modify it. + */ + vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE; + ret = RESUME_HOST; + break; case KVM_HCALL_SWDBG: /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */ if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) { @@ -851,16 +935,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) /* * LoongArch KVM callback handling for unimplemented guest exiting */ -static int kvm_fault_ni(struct kvm_vcpu *vcpu) +static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode) { - unsigned int ecode, inst; - unsigned long estat, badv; + unsigned int inst; + unsigned long badv; /* Fetch the instruction */ inst = vcpu->arch.badi; badv = vcpu->arch.badv; - estat = vcpu->arch.host_estat; - ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n", ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); kvm_arch_vcpu_dump_regs(vcpu); @@ -885,5 +967,5 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) { - return kvm_fault_tables[fault](vcpu); + return kvm_fault_tables[fault](vcpu, fault); } diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c new file mode 100644 index 000000000000..a75f865d6fb9 --- /dev/null +++ b/arch/loongarch/kvm/intc/eiointc.c @@ -0,0 +1,1060 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include <asm/kvm_eiointc.h> +#include <asm/kvm_vcpu.h> +#include <linux/count_zeros.h> + +static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s) +{ + int ipnum, cpu, cpuid, irq_index, irq_mask, irq; + struct kvm_vcpu *vcpu; + + for (irq = 0; irq < EIOINTC_IRQS; irq++) { + ipnum = s->ipmap.reg_u8[irq / 32]; + if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) { + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + } + irq_index = irq / 32; + irq_mask = BIT(irq & 0x1f); + + cpuid = s->coremap.reg_u8[irq]; + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); + if (!vcpu) + continue; + + cpu = vcpu->vcpu_id; + if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask)) + set_bit(irq, s->sw_coreisr[cpu][ipnum]); + else + clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + } +} + +static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level) +{ + int ipnum, cpu, found, irq_index, irq_mask; + struct kvm_vcpu *vcpu; + struct kvm_interrupt vcpu_irq; + + ipnum = s->ipmap.reg_u8[irq / 32]; + if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) { + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + } + + cpu = s->sw_coremap[irq]; + vcpu = kvm_get_vcpu(s->kvm, cpu); + irq_index = irq / 32; + irq_mask = BIT(irq & 0x1f); + + if (level) { + /* if not enable return false */ + if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0) + return; + s->coreisr.reg_u32[cpu][irq_index] |= irq_mask; + found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS); + set_bit(irq, s->sw_coreisr[cpu][ipnum]); + } else { + s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask; + clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS); + } + + if (found < EIOINTC_IRQS) + return; /* other irq is handling, needn't update parent irq */ + + vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum); + kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq); +} + +static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s, + int irq, u64 val, u32 len, bool notify) +{ + int i, cpu, cpuid; + struct kvm_vcpu *vcpu; + + for (i = 0; i < len; i++) { + cpuid = val & 0xff; + val = val >> 8; + + if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) { + cpuid = ffs(cpuid) - 1; + cpuid = (cpuid >= 4) ? 0 : cpuid; + } + + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); + if (!vcpu) + continue; + + cpu = vcpu->vcpu_id; + if (s->sw_coremap[irq + i] == cpu) + continue; + + if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) { + /* lower irq at old cpu and raise irq at new cpu */ + eiointc_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + eiointc_update_irq(s, irq + i, 1); + } else { + s->sw_coremap[irq + i] = cpu; + } + } +} + +void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level) +{ + unsigned long flags; + unsigned long *isr = (unsigned long *)s->isr.reg_u8; + + level ? set_bit(irq, isr) : clear_bit(irq, isr); + spin_lock_irqsave(&s->lock, flags); + eiointc_update_irq(s, irq, level); + spin_unlock_irqrestore(&s->lock, flags); +} + +static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu, + struct loongarch_eiointc *s, int index, u8 mask, int level) +{ + u8 val; + int irq; + + val = mask & s->isr.reg_u8[index]; + irq = ffs(val); + while (irq != 0) { + /* + * enable bit change from 0 to 1, + * need to update irq by pending bits + */ + eiointc_update_irq(s, irq - 1 + index * 8, level); + val &= ~BIT(irq - 1); + irq = ffs(val); + } +} + +static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + u8 data = 0; + gpa_t offset; + + offset = addr - EIOINTC_BASE; + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = offset - EIOINTC_NODETYPE_START; + data = s->nodetype.reg_u8[index]; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + index = offset - EIOINTC_IPMAP_START; + data = s->ipmap.reg_u8[index]; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = offset - EIOINTC_ENABLE_START; + data = s->enable.reg_u8[index]; + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + index = offset - EIOINTC_BOUNCE_START; + data = s->bounce.reg_u8[index]; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = offset - EIOINTC_COREISR_START; + data = s->coreisr.reg_u8[vcpu->vcpu_id][index]; + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + index = offset - EIOINTC_COREMAP_START; + data = s->coremap.reg_u8[index]; + break; + default: + ret = -EINVAL; + break; + } + *(u8 *)val = data; + + return ret; +} + +static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + u16 data = 0; + gpa_t offset; + + offset = addr - EIOINTC_BASE; + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = (offset - EIOINTC_NODETYPE_START) >> 1; + data = s->nodetype.reg_u16[index]; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + index = (offset - EIOINTC_IPMAP_START) >> 1; + data = s->ipmap.reg_u16[index]; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = (offset - EIOINTC_ENABLE_START) >> 1; + data = s->enable.reg_u16[index]; + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + index = (offset - EIOINTC_BOUNCE_START) >> 1; + data = s->bounce.reg_u16[index]; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = (offset - EIOINTC_COREISR_START) >> 1; + data = s->coreisr.reg_u16[vcpu->vcpu_id][index]; + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + index = (offset - EIOINTC_COREMAP_START) >> 1; + data = s->coremap.reg_u16[index]; + break; + default: + ret = -EINVAL; + break; + } + *(u16 *)val = data; + + return ret; +} + +static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + u32 data = 0; + gpa_t offset; + + offset = addr - EIOINTC_BASE; + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = (offset - EIOINTC_NODETYPE_START) >> 2; + data = s->nodetype.reg_u32[index]; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + index = (offset - EIOINTC_IPMAP_START) >> 2; + data = s->ipmap.reg_u32[index]; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = (offset - EIOINTC_ENABLE_START) >> 2; + data = s->enable.reg_u32[index]; + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + index = (offset - EIOINTC_BOUNCE_START) >> 2; + data = s->bounce.reg_u32[index]; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = (offset - EIOINTC_COREISR_START) >> 2; + data = s->coreisr.reg_u32[vcpu->vcpu_id][index]; + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + index = (offset - EIOINTC_COREMAP_START) >> 2; + data = s->coremap.reg_u32[index]; + break; + default: + ret = -EINVAL; + break; + } + *(u32 *)val = data; + + return ret; +} + +static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + u64 data = 0; + gpa_t offset; + + offset = addr - EIOINTC_BASE; + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = (offset - EIOINTC_NODETYPE_START) >> 3; + data = s->nodetype.reg_u64[index]; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + index = (offset - EIOINTC_IPMAP_START) >> 3; + data = s->ipmap.reg_u64; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = (offset - EIOINTC_ENABLE_START) >> 3; + data = s->enable.reg_u64[index]; + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + index = (offset - EIOINTC_BOUNCE_START) >> 3; + data = s->bounce.reg_u64[index]; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = (offset - EIOINTC_COREISR_START) >> 3; + data = s->coreisr.reg_u64[vcpu->vcpu_id][index]; + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + index = (offset - EIOINTC_COREMAP_START) >> 3; + data = s->coremap.reg_u64[index]; + break; + default: + ret = -EINVAL; + break; + } + *(u64 *)val = data; + + return ret; +} + +static int kvm_eiointc_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + int ret = -EINVAL; + unsigned long flags; + struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; + + if (!eiointc) { + kvm_err("%s: eiointc irqchip not valid!\n", __func__); + return -EINVAL; + } + + if (addr & (len - 1)) { + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + + vcpu->kvm->stat.eiointc_read_exits++; + spin_lock_irqsave(&eiointc->lock, flags); + switch (len) { + case 1: + ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val); + break; + case 2: + ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val); + break; + case 4: + ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val); + break; + case 8: + ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val); + break; + default: + WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", + __func__, addr, len); + } + spin_unlock_irqrestore(&eiointc->lock, flags); + + return ret; +} + +static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu, + struct loongarch_eiointc *s, + gpa_t addr, int len, const void *val) +{ + int index, irq, bits, ret = 0; + u8 cpu; + u8 data, old_data; + u8 coreisr, old_coreisr; + gpa_t offset; + + data = *(u8 *)val; + offset = addr - EIOINTC_BASE; + + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = (offset - EIOINTC_NODETYPE_START); + s->nodetype.reg_u8[index] = data; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of irqchip driver, need not update upper irq level + */ + index = (offset - EIOINTC_IPMAP_START); + s->ipmap.reg_u8[index] = data; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = (offset - EIOINTC_ENABLE_START); + old_data = s->enable.reg_u8[index]; + s->enable.reg_u8[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index]; + eiointc_enable_irq(vcpu, s, index, data, 1); + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index]; + eiointc_enable_irq(vcpu, s, index, data, 0); + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = offset - EIOINTC_BOUNCE_START; + s->bounce.reg_u8[index] = data; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = (offset - EIOINTC_COREISR_START); + /* use attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u8[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + bits = sizeof(data) * 8; + irq = find_first_bit((void *)&coreisr, bits); + while (irq < bits) { + eiointc_update_irq(s, irq + index * bits, 0); + bitmap_clear((void *)&coreisr, irq, 1); + irq = find_first_bit((void *)&coreisr, bits); + } + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + irq = offset - EIOINTC_COREMAP_START; + index = irq; + s->coremap.reg_u8[index] = data; + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu, + struct loongarch_eiointc *s, + gpa_t addr, int len, const void *val) +{ + int i, index, irq, bits, ret = 0; + u8 cpu; + u16 data, old_data; + u16 coreisr, old_coreisr; + gpa_t offset; + + data = *(u16 *)val; + offset = addr - EIOINTC_BASE; + + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = (offset - EIOINTC_NODETYPE_START) >> 1; + s->nodetype.reg_u16[index] = data; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of irqchip driver, need not update upper irq level + */ + index = (offset - EIOINTC_IPMAP_START) >> 1; + s->ipmap.reg_u16[index] = data; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = (offset - EIOINTC_ENABLE_START) >> 1; + old_data = s->enable.reg_u16[index]; + s->enable.reg_u16[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 1); + } + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 0); + } + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = (offset - EIOINTC_BOUNCE_START) >> 1; + s->bounce.reg_u16[index] = data; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = (offset - EIOINTC_COREISR_START) >> 1; + /* use attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u16[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + bits = sizeof(data) * 8; + irq = find_first_bit((void *)&coreisr, bits); + while (irq < bits) { + eiointc_update_irq(s, irq + index * bits, 0); + bitmap_clear((void *)&coreisr, irq, 1); + irq = find_first_bit((void *)&coreisr, bits); + } + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + irq = offset - EIOINTC_COREMAP_START; + index = irq >> 1; + s->coremap.reg_u16[index] = data; + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu, + struct loongarch_eiointc *s, + gpa_t addr, int len, const void *val) +{ + int i, index, irq, bits, ret = 0; + u8 cpu; + u32 data, old_data; + u32 coreisr, old_coreisr; + gpa_t offset; + + data = *(u32 *)val; + offset = addr - EIOINTC_BASE; + + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = (offset - EIOINTC_NODETYPE_START) >> 2; + s->nodetype.reg_u32[index] = data; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of irqchip driver, need not update upper irq level + */ + index = (offset - EIOINTC_IPMAP_START) >> 2; + s->ipmap.reg_u32[index] = data; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = (offset - EIOINTC_ENABLE_START) >> 2; + old_data = s->enable.reg_u32[index]; + s->enable.reg_u32[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 1); + } + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 0); + } + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = (offset - EIOINTC_BOUNCE_START) >> 2; + s->bounce.reg_u32[index] = data; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = (offset - EIOINTC_COREISR_START) >> 2; + /* use attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u32[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + bits = sizeof(data) * 8; + irq = find_first_bit((void *)&coreisr, bits); + while (irq < bits) { + eiointc_update_irq(s, irq + index * bits, 0); + bitmap_clear((void *)&coreisr, irq, 1); + irq = find_first_bit((void *)&coreisr, bits); + } + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + irq = offset - EIOINTC_COREMAP_START; + index = irq >> 2; + s->coremap.reg_u32[index] = data; + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu, + struct loongarch_eiointc *s, + gpa_t addr, int len, const void *val) +{ + int i, index, irq, bits, ret = 0; + u8 cpu; + u64 data, old_data; + u64 coreisr, old_coreisr; + gpa_t offset; + + data = *(u64 *)val; + offset = addr - EIOINTC_BASE; + + switch (offset) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + index = (offset - EIOINTC_NODETYPE_START) >> 3; + s->nodetype.reg_u64[index] = data; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of irqchip driver, need not update upper irq level + */ + index = (offset - EIOINTC_IPMAP_START) >> 3; + s->ipmap.reg_u64 = data; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + index = (offset - EIOINTC_ENABLE_START) >> 3; + old_data = s->enable.reg_u64[index]; + s->enable.reg_u64[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 1); + } + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 0); + } + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = (offset - EIOINTC_BOUNCE_START) >> 3; + s->bounce.reg_u64[index] = data; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + index = (offset - EIOINTC_COREISR_START) >> 3; + /* use attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u64[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + bits = sizeof(data) * 8; + irq = find_first_bit((void *)&coreisr, bits); + while (irq < bits) { + eiointc_update_irq(s, irq + index * bits, 0); + bitmap_clear((void *)&coreisr, irq, 1); + irq = find_first_bit((void *)&coreisr, bits); + } + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + irq = offset - EIOINTC_COREMAP_START; + index = irq >> 3; + s->coremap.reg_u64[index] = data; + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_eiointc_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret = -EINVAL; + unsigned long flags; + struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; + + if (!eiointc) { + kvm_err("%s: eiointc irqchip not valid!\n", __func__); + return -EINVAL; + } + + if (addr & (len - 1)) { + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + + vcpu->kvm->stat.eiointc_write_exits++; + spin_lock_irqsave(&eiointc->lock, flags); + switch (len) { + case 1: + ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val); + break; + case 2: + ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val); + break; + case 4: + ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val); + break; + case 8: + ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val); + break; + default: + WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", + __func__, addr, len); + } + spin_unlock_irqrestore(&eiointc->lock, flags); + + return ret; +} + +static const struct kvm_io_device_ops kvm_eiointc_ops = { + .read = kvm_eiointc_read, + .write = kvm_eiointc_write, +}; + +static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + unsigned long flags; + u32 *data = val; + struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; + + if (!eiointc) { + kvm_err("%s: eiointc irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr -= EIOINTC_VIRT_BASE; + spin_lock_irqsave(&eiointc->lock, flags); + switch (addr) { + case EIOINTC_VIRT_FEATURES: + *data = eiointc->features; + break; + case EIOINTC_VIRT_CONFIG: + *data = eiointc->status; + break; + default: + break; + } + spin_unlock_irqrestore(&eiointc->lock, flags); + + return 0; +} + +static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret = 0; + unsigned long flags; + u32 value = *(u32 *)val; + struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; + + if (!eiointc) { + kvm_err("%s: eiointc irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr -= EIOINTC_VIRT_BASE; + spin_lock_irqsave(&eiointc->lock, flags); + switch (addr) { + case EIOINTC_VIRT_FEATURES: + ret = -EPERM; + break; + case EIOINTC_VIRT_CONFIG: + /* + * eiointc features can only be set at disabled status + */ + if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) { + ret = -EPERM; + break; + } + eiointc->status = value & eiointc->features; + break; + default: + break; + } + spin_unlock_irqrestore(&eiointc->lock, flags); + + return ret; +} + +static const struct kvm_io_device_ops kvm_eiointc_virt_ops = { + .read = kvm_eiointc_virt_read, + .write = kvm_eiointc_virt_write, +}; + +static int kvm_eiointc_ctrl_access(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret = 0; + unsigned long flags; + unsigned long type = (unsigned long)attr->attr; + u32 i, start_irq, val; + void __user *data; + struct loongarch_eiointc *s = dev->kvm->arch.eiointc; + + data = (void __user *)attr->addr; + spin_lock_irqsave(&s->lock, flags); + switch (type) { + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: + if (copy_from_user(&val, data, 4)) + ret = -EFAULT; + else { + if (val >= EIOINTC_ROUTE_MAX_VCPUS) + ret = -EINVAL; + else + s->num_cpu = val; + } + break; + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: + if (copy_from_user(&s->features, data, 4)) + ret = -EFAULT; + if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION))) + s->status |= BIT(EIOINTC_ENABLE); + break; + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED: + eiointc_set_sw_coreisr(s); + for (i = 0; i < (EIOINTC_IRQS / 4); i++) { + start_irq = i * 4; + eiointc_update_sw_coremap(s, start_irq, + s->coremap.reg_u32[i], sizeof(u32), false); + } + break; + default: + break; + } + spin_unlock_irqrestore(&s->lock, flags); + + return ret; +} + +static int kvm_eiointc_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int addr, cpu, offset, ret = 0; + unsigned long flags; + void *p = NULL; + void __user *data; + struct loongarch_eiointc *s; + + s = dev->kvm->arch.eiointc; + addr = attr->attr; + cpu = addr >> 16; + addr &= 0xffff; + data = (void __user *)attr->addr; + switch (addr) { + case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: + offset = (addr - EIOINTC_NODETYPE_START) / 4; + p = &s->nodetype.reg_u32[offset]; + break; + case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END: + offset = (addr - EIOINTC_IPMAP_START) / 4; + p = &s->ipmap.reg_u32[offset]; + break; + case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: + offset = (addr - EIOINTC_ENABLE_START) / 4; + p = &s->enable.reg_u32[offset]; + break; + case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: + offset = (addr - EIOINTC_BOUNCE_START) / 4; + p = &s->bounce.reg_u32[offset]; + break; + case EIOINTC_ISR_START ... EIOINTC_ISR_END: + offset = (addr - EIOINTC_ISR_START) / 4; + p = &s->isr.reg_u32[offset]; + break; + case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + if (cpu >= s->num_cpu) + return -EINVAL; + + offset = (addr - EIOINTC_COREISR_START) / 4; + p = &s->coreisr.reg_u32[cpu][offset]; + break; + case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: + offset = (addr - EIOINTC_COREMAP_START) / 4; + p = &s->coremap.reg_u32[offset]; + break; + default: + kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr); + return -EINVAL; + } + + spin_lock_irqsave(&s->lock, flags); + if (is_write) { + if (copy_from_user(p, data, 4)) + ret = -EFAULT; + } else { + if (copy_to_user(data, p, 4)) + ret = -EFAULT; + } + spin_unlock_irqrestore(&s->lock, flags); + + return ret; +} + +static int kvm_eiointc_sw_status_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int addr, ret = 0; + unsigned long flags; + void *p = NULL; + void __user *data; + struct loongarch_eiointc *s; + + s = dev->kvm->arch.eiointc; + addr = attr->attr; + addr &= 0xffff; + + data = (void __user *)attr->addr; + switch (addr) { + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: + if (is_write) + return ret; + + p = &s->num_cpu; + break; + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE: + if (is_write) + return ret; + + p = &s->features; + break; + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE: + p = &s->status; + break; + default: + kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr); + return -EINVAL; + } + spin_lock_irqsave(&s->lock, flags); + if (is_write) { + if (copy_from_user(p, data, 4)) + ret = -EFAULT; + } else { + if (copy_to_user(data, p, 4)) + ret = -EFAULT; + } + spin_unlock_irqrestore(&s->lock, flags); + + return ret; +} + +static int kvm_eiointc_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: + return kvm_eiointc_regs_access(dev, attr, false); + case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: + return kvm_eiointc_sw_status_access(dev, attr, false); + default: + return -EINVAL; + } +} + +static int kvm_eiointc_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL: + return kvm_eiointc_ctrl_access(dev, attr); + case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: + return kvm_eiointc_regs_access(dev, attr, true); + case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: + return kvm_eiointc_sw_status_access(dev, attr, true); + default: + return -EINVAL; + } +} + +static int kvm_eiointc_create(struct kvm_device *dev, u32 type) +{ + int ret; + struct loongarch_eiointc *s; + struct kvm_io_device *device, *device1; + struct kvm *kvm = dev->kvm; + + /* eiointc has been created */ + if (kvm->arch.eiointc) + return -EINVAL; + + s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL); + if (!s) + return -ENOMEM; + + spin_lock_init(&s->lock); + s->kvm = kvm; + + /* + * Initialize IOCSR device + */ + device = &s->device; + kvm_iodevice_init(device, &kvm_eiointc_ops); + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, + EIOINTC_BASE, EIOINTC_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kfree(s); + return ret; + } + + device1 = &s->device_vext; + kvm_iodevice_init(device1, &kvm_eiointc_virt_ops); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, + EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1); + if (ret < 0) { + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device); + kfree(s); + return ret; + } + kvm->arch.eiointc = s; + + return 0; +} + +static void kvm_eiointc_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_eiointc *eiointc; + + if (!dev || !dev->kvm || !dev->kvm->arch.eiointc) + return; + + kvm = dev->kvm; + eiointc = kvm->arch.eiointc; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device); + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext); + kfree(eiointc); +} + +static struct kvm_device_ops kvm_eiointc_dev_ops = { + .name = "kvm-loongarch-eiointc", + .create = kvm_eiointc_create, + .destroy = kvm_eiointc_destroy, + .set_attr = kvm_eiointc_set_attr, + .get_attr = kvm_eiointc_get_attr, +}; + +int kvm_loongarch_register_eiointc_device(void) +{ + return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC); +} diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c new file mode 100644 index 000000000000..fe734dc062ed --- /dev/null +++ b/arch/loongarch/kvm/intc/ipi.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_ipi.h> +#include <asm/kvm_vcpu.h> + +static void ipi_send(struct kvm *kvm, uint64_t data) +{ + int cpu, action; + uint32_t status; + struct kvm_vcpu *vcpu; + struct kvm_interrupt irq; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return; + } + + action = BIT(data & 0x1f); + spin_lock(&vcpu->arch.ipi_state.lock); + status = vcpu->arch.ipi_state.status; + vcpu->arch.ipi_state.status |= action; + spin_unlock(&vcpu->arch.ipi_state.lock); + if (status == 0) { + irq.irq = LARCH_INT_IPI; + kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } +} + +static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data) +{ + uint32_t status; + struct kvm_interrupt irq; + + spin_lock(&vcpu->arch.ipi_state.lock); + vcpu->arch.ipi_state.status &= ~data; + status = vcpu->arch.ipi_state.status; + spin_unlock(&vcpu->arch.ipi_state.lock); + if (status == 0) { + irq.irq = -LARCH_INT_IPI; + kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } +} + +static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len) +{ + uint64_t data = 0; + + spin_lock(&vcpu->arch.ipi_state.lock); + data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20)); + spin_unlock(&vcpu->arch.ipi_state.lock); + + switch (len) { + case 1: + return data & 0xff; + case 2: + return data & 0xffff; + case 4: + return data & 0xffffffff; + case 8: + return data; + default: + kvm_err("%s: unknown data len: %d\n", __func__, len); + return 0; + } +} + +static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len) +{ + void *pbuf; + + spin_lock(&vcpu->arch.ipi_state.lock); + pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20); + + switch (len) { + case 1: + *(unsigned char *)pbuf = (unsigned char)data; + break; + case 2: + *(unsigned short *)pbuf = (unsigned short)data; + break; + case 4: + *(unsigned int *)pbuf = (unsigned int)data; + break; + case 8: + *(unsigned long *)pbuf = (unsigned long)data; + break; + default: + kvm_err("%s: unknown data len: %d\n", __func__, len); + } + spin_unlock(&vcpu->arch.ipi_state.lock); +} + +static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) +{ + int i, idx, ret; + uint32_t val = 0, mask = 0; + + /* + * Bit 27-30 is mask for byte writing. + * If the mask is 0, we need not to do anything. + */ + if ((data >> 27) & 0xf) { + /* Read the old val */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + if (unlikely(ret)) { + kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); + return ret; + } + /* Construct the mask by scanning the bit 27-30 */ + for (i = 0; i < 4; i++) { + if (data & (BIT(27 + i))) + mask |= (0xff << (i * 8)); + } + /* Save the old part of val */ + val &= mask; + } + val |= ((uint32_t)(data >> 32) & ~mask); + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + if (unlikely(ret)) + kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); + + return ret; +} + +static int mail_send(struct kvm *kvm, uint64_t data) +{ + int cpu, mailbox, offset; + struct kvm_vcpu *vcpu; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + mailbox = ((data & 0xffffffff) >> 2) & 0x7; + offset = IOCSR_IPI_BASE + IOCSR_IPI_BUF_20 + mailbox * 4; + + return send_ipi_data(vcpu, offset, data); +} + +static int any_send(struct kvm *kvm, uint64_t data) +{ + int cpu, offset; + struct kvm_vcpu *vcpu; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + offset = data & 0xffff; + + return send_ipi_data(vcpu, offset, data); +} + +static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val) +{ + int ret = 0; + uint32_t offset; + uint64_t res = 0; + + offset = (uint32_t)(addr & 0x1ff); + WARN_ON_ONCE(offset & (len - 1)); + + switch (offset) { + case IOCSR_IPI_STATUS: + spin_lock(&vcpu->arch.ipi_state.lock); + res = vcpu->arch.ipi_state.status; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case IOCSR_IPI_EN: + spin_lock(&vcpu->arch.ipi_state.lock); + res = vcpu->arch.ipi_state.en; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case IOCSR_IPI_SET: + res = 0; + break; + case IOCSR_IPI_CLEAR: + res = 0; + break; + case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7: + if (offset + len > IOCSR_IPI_BUF_38 + 8) { + kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", + __func__, offset, len); + ret = -EINVAL; + break; + } + res = read_mailbox(vcpu, offset, len); + break; + default: + kvm_err("%s: unknown addr: %llx\n", __func__, addr); + ret = -EINVAL; + break; + } + *(uint64_t *)val = res; + + return ret; +} + +static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val) +{ + int ret = 0; + uint64_t data; + uint32_t offset; + + data = *(uint64_t *)val; + + offset = (uint32_t)(addr & 0x1ff); + WARN_ON_ONCE(offset & (len - 1)); + + switch (offset) { + case IOCSR_IPI_STATUS: + ret = -EINVAL; + break; + case IOCSR_IPI_EN: + spin_lock(&vcpu->arch.ipi_state.lock); + vcpu->arch.ipi_state.en = data; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case IOCSR_IPI_SET: + ret = -EINVAL; + break; + case IOCSR_IPI_CLEAR: + /* Just clear the status of the current vcpu */ + ipi_clear(vcpu, data); + break; + case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7: + if (offset + len > IOCSR_IPI_BUF_38 + 8) { + kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", + __func__, offset, len); + ret = -EINVAL; + break; + } + write_mailbox(vcpu, offset, data, len); + break; + case IOCSR_IPI_SEND: + ipi_send(vcpu->kvm, data); + break; + case IOCSR_MAIL_SEND: + ret = mail_send(vcpu->kvm, *(uint64_t *)val); + break; + case IOCSR_ANY_SEND: + ret = any_send(vcpu->kvm, *(uint64_t *)val); + break; + default: + kvm_err("%s: unknown addr: %llx\n", __func__, addr); + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_ipi_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + int ret; + struct loongarch_ipi *ipi; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + ipi->kvm->stat.ipi_read_exits++; + ret = loongarch_ipi_readl(vcpu, addr, len, val); + + return ret; +} + +static int kvm_ipi_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret; + struct loongarch_ipi *ipi; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + ipi->kvm->stat.ipi_write_exits++; + ret = loongarch_ipi_writel(vcpu, addr, len, val); + + return ret; +} + +static const struct kvm_io_device_ops kvm_ipi_ops = { + .read = kvm_ipi_read, + .write = kvm_ipi_write, +}; + +static int kvm_ipi_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int len = 4; + int cpu, addr; + uint64_t val; + void *p = NULL; + struct kvm_vcpu *vcpu; + + cpu = (attr->attr >> 16) & 0x3ff; + addr = attr->attr & 0xff; + + vcpu = kvm_get_vcpu(dev->kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + + switch (addr) { + case IOCSR_IPI_STATUS: + p = &vcpu->arch.ipi_state.status; + break; + case IOCSR_IPI_EN: + p = &vcpu->arch.ipi_state.en; + break; + case IOCSR_IPI_SET: + p = &vcpu->arch.ipi_state.set; + break; + case IOCSR_IPI_CLEAR: + p = &vcpu->arch.ipi_state.clear; + break; + case IOCSR_IPI_BUF_20: + p = &vcpu->arch.ipi_state.buf[0]; + len = 8; + break; + case IOCSR_IPI_BUF_28: + p = &vcpu->arch.ipi_state.buf[1]; + len = 8; + break; + case IOCSR_IPI_BUF_30: + p = &vcpu->arch.ipi_state.buf[2]; + len = 8; + break; + case IOCSR_IPI_BUF_38: + p = &vcpu->arch.ipi_state.buf[3]; + len = 8; + break; + default: + kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr); + return -EINVAL; + } + + if (is_write) { + if (len == 4) { + if (get_user(val, (uint32_t __user *)attr->addr)) + return -EFAULT; + *(uint32_t *)p = (uint32_t)val; + } else if (len == 8) { + if (get_user(val, (uint64_t __user *)attr->addr)) + return -EFAULT; + *(uint64_t *)p = val; + } + } else { + if (len == 4) { + val = *(uint32_t *)p; + return put_user(val, (uint32_t __user *)attr->addr); + } else if (len == 8) { + val = *(uint64_t *)p; + return put_user(val, (uint64_t __user *)attr->addr); + } + } + + return 0; +} + +static int kvm_ipi_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_LOONGARCH_IPI_GRP_REGS: + return kvm_ipi_regs_access(dev, attr, false); + default: + kvm_err("%s: unknown group (%d)\n", __func__, attr->group); + return -EINVAL; + } +} + +static int kvm_ipi_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_LOONGARCH_IPI_GRP_REGS: + return kvm_ipi_regs_access(dev, attr, true); + default: + kvm_err("%s: unknown group (%d)\n", __func__, attr->group); + return -EINVAL; + } +} + +static int kvm_ipi_create(struct kvm_device *dev, u32 type) +{ + int ret; + struct kvm *kvm; + struct kvm_io_device *device; + struct loongarch_ipi *s; + + if (!dev) { + kvm_err("%s: kvm_device ptr is invalid!\n", __func__); + return -EINVAL; + } + + kvm = dev->kvm; + if (kvm->arch.ipi) { + kvm_err("%s: LoongArch IPI has already been created!\n", __func__); + return -EINVAL; + } + + s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL); + if (!s) + return -ENOMEM; + + spin_lock_init(&s->lock); + s->kvm = kvm; + + /* + * Initialize IOCSR device + */ + device = &s->device; + kvm_iodevice_init(device, &kvm_ipi_ops); + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret); + goto err; + } + + kvm->arch.ipi = s; + return 0; + +err: + kfree(s); + return -EFAULT; +} + +static void kvm_ipi_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_ipi *ipi; + + if (!dev || !dev->kvm || !dev->kvm->arch.ipi) + return; + + kvm = dev->kvm; + ipi = kvm->arch.ipi; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device); + kfree(ipi); +} + +static struct kvm_device_ops kvm_ipi_dev_ops = { + .name = "kvm-loongarch-ipi", + .create = kvm_ipi_create, + .destroy = kvm_ipi_destroy, + .set_attr = kvm_ipi_set_attr, + .get_attr = kvm_ipi_get_attr, +}; + +int kvm_loongarch_register_ipi_device(void) +{ + return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI); +} diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c new file mode 100644 index 000000000000..08fce845f668 --- /dev/null +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include <asm/kvm_eiointc.h> +#include <asm/kvm_pch_pic.h> +#include <asm/kvm_vcpu.h> +#include <linux/count_zeros.h> + +/* update the isr according to irq level and route irq to eiointc */ +static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level) +{ + u64 mask = BIT(irq); + + /* + * set isr and route irq to eiointc and + * the route table is in htmsi_vector[] + */ + if (level) { + if (mask & s->irr & ~s->mask) { + s->isr |= mask; + irq = s->htmsi_vector[irq]; + eiointc_set_irq(s->kvm->arch.eiointc, irq, level); + } + } else { + if (mask & s->isr & ~s->irr) { + s->isr &= ~mask; + irq = s->htmsi_vector[irq]; + eiointc_set_irq(s->kvm->arch.eiointc, irq, level); + } + } +} + +/* update batch irqs, the irq_mask is a bitmap of irqs */ +static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level) +{ + int irq, bits; + + /* find each irq by irqs bitmap and update each irq */ + bits = sizeof(irq_mask) * 8; + irq = find_first_bit((void *)&irq_mask, bits); + while (irq < bits) { + pch_pic_update_irq(s, irq, level); + bitmap_clear((void *)&irq_mask, irq, 1); + irq = find_first_bit((void *)&irq_mask, bits); + } +} + +/* called when a irq is triggered in pch pic */ +void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level) +{ + u64 mask = BIT(irq); + + spin_lock(&s->lock); + if (level) + s->irr |= mask; /* set irr */ + else { + /* + * In edge triggered mode, 0 does not mean to clear irq + * The irr register variable is cleared when cpu writes to the + * PCH_PIC_CLEAR_START address area + */ + if (s->edge & mask) { + spin_unlock(&s->lock); + return; + } + s->irr &= ~mask; + } + pch_pic_update_irq(s, irq, level); + spin_unlock(&s->lock); +} + +/* msi irq handler */ +void pch_msi_set_irq(struct kvm *kvm, int irq, int level) +{ + eiointc_set_irq(kvm->arch.eiointc, irq, level); +} + +/* + * pch pic register is 64-bit, but it is accessed by 32-bit, + * so we use high to get whether low or high 32 bits we want + * to read. + */ +static u32 pch_pic_read_reg(u64 *s, int high) +{ + u64 val = *s; + + /* read the high 32 bits when high is 1 */ + return high ? (u32)(val >> 32) : (u32)val; +} + +/* + * pch pic register is 64-bit, but it is accessed by 32-bit, + * so we use high to get whether low or high 32 bits we want + * to write. + */ +static u32 pch_pic_write_reg(u64 *s, int high, u32 v) +{ + u64 val = *s, data = v; + + if (high) { + /* + * Clear val high 32 bits + * Write the high 32 bits when the high is 1 + */ + *s = (val << 32 >> 32) | (data << 32); + val >>= 32; + } else + /* + * Clear val low 32 bits + * Write the low 32 bits when the high is 0 + */ + *s = (val >> 32 << 32) | v; + + return (u32)val; +} + +static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val) +{ + int offset, index, ret = 0; + u32 data = 0; + u64 int_id = 0; + + offset = addr - s->pch_pic_base; + + spin_lock(&s->lock); + switch (offset) { + case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END: + /* int id version */ + int_id |= (u64)PCH_PIC_INT_ID_VER << 32; + /* irq number */ + int_id |= (u64)31 << (32 + 16); + /* int id value */ + int_id |= PCH_PIC_INT_ID_VAL; + *(u64 *)val = int_id; + break; + case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: + offset -= PCH_PIC_MASK_START; + index = offset >> 2; + /* read mask reg */ + data = pch_pic_read_reg(&s->mask, index); + *(u32 *)val = data; + break; + case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: + offset -= PCH_PIC_HTMSI_EN_START; + index = offset >> 2; + /* read htmsi enable reg */ + data = pch_pic_read_reg(&s->htmsi_en, index); + *(u32 *)val = data; + break; + case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: + offset -= PCH_PIC_EDGE_START; + index = offset >> 2; + /* read edge enable reg */ + data = pch_pic_read_reg(&s->edge, index); + *(u32 *)val = data; + break; + case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: + case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: + /* we only use default mode: fixed interrupt distribution mode */ + *(u32 *)val = 0; + break; + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: + /* only route to int0: eiointc */ + *(u8 *)val = 1; + break; + case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: + offset -= PCH_PIC_HTMSI_VEC_START; + /* read htmsi vector */ + data = s->htmsi_vector[offset]; + *(u8 *)val = data; + break; + case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: + /* we only use defalut value 0: high level triggered */ + *(u32 *)val = 0; + break; + default: + ret = -EINVAL; + } + spin_unlock(&s->lock); + + return ret; +} + +static int kvm_pch_pic_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + int ret; + struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; + + if (!s) { + kvm_err("%s: pch pic irqchip not valid!\n", __func__); + return -EINVAL; + } + + /* statistics of pch pic reading */ + vcpu->kvm->stat.pch_pic_read_exits++; + ret = loongarch_pch_pic_read(s, addr, len, val); + + return ret; +} + +static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr, + int len, const void *val) +{ + int ret; + u32 old, data, offset, index; + u64 irq; + + ret = 0; + data = *(u32 *)val; + offset = addr - s->pch_pic_base; + + spin_lock(&s->lock); + switch (offset) { + case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: + offset -= PCH_PIC_MASK_START; + /* get whether high or low 32 bits we want to write */ + index = offset >> 2; + old = pch_pic_write_reg(&s->mask, index, data); + /* enable irq when mask value change to 0 */ + irq = (old & ~data) << (32 * index); + pch_pic_update_batch_irqs(s, irq, 1); + /* disable irq when mask value change to 1 */ + irq = (~old & data) << (32 * index); + pch_pic_update_batch_irqs(s, irq, 0); + break; + case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: + offset -= PCH_PIC_HTMSI_EN_START; + index = offset >> 2; + pch_pic_write_reg(&s->htmsi_en, index, data); + break; + case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: + offset -= PCH_PIC_EDGE_START; + index = offset >> 2; + /* 1: edge triggered, 0: level triggered */ + pch_pic_write_reg(&s->edge, index, data); + break; + case PCH_PIC_CLEAR_START ... PCH_PIC_CLEAR_END: + offset -= PCH_PIC_CLEAR_START; + index = offset >> 2; + /* write 1 to clear edge irq */ + old = pch_pic_read_reg(&s->irr, index); + /* + * get the irq bitmap which is edge triggered and + * already set and to be cleared + */ + irq = old & pch_pic_read_reg(&s->edge, index) & data; + /* write irr to the new state where irqs have been cleared */ + pch_pic_write_reg(&s->irr, index, old & ~irq); + /* update cleared irqs */ + pch_pic_update_batch_irqs(s, irq, 0); + break; + case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: + offset -= PCH_PIC_AUTO_CTRL0_START; + index = offset >> 2; + /* we only use default mode: fixed interrupt distribution mode */ + pch_pic_write_reg(&s->auto_ctrl0, index, 0); + break; + case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: + offset -= PCH_PIC_AUTO_CTRL1_START; + index = offset >> 2; + /* we only use default mode: fixed interrupt distribution mode */ + pch_pic_write_reg(&s->auto_ctrl1, index, 0); + break; + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: + offset -= PCH_PIC_ROUTE_ENTRY_START; + /* only route to int0: eiointc */ + s->route_entry[offset] = 1; + break; + case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: + /* route table to eiointc */ + offset -= PCH_PIC_HTMSI_VEC_START; + s->htmsi_vector[offset] = (u8)data; + break; + case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: + offset -= PCH_PIC_POLARITY_START; + index = offset >> 2; + /* we only use defalut value 0: high level triggered */ + pch_pic_write_reg(&s->polarity, index, 0); + break; + default: + ret = -EINVAL; + break; + } + spin_unlock(&s->lock); + + return ret; +} + +static int kvm_pch_pic_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret; + struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; + + if (!s) { + kvm_err("%s: pch pic irqchip not valid!\n", __func__); + return -EINVAL; + } + + /* statistics of pch pic writing */ + vcpu->kvm->stat.pch_pic_write_exits++; + ret = loongarch_pch_pic_write(s, addr, len, val); + + return ret; +} + +static const struct kvm_io_device_ops kvm_pch_pic_ops = { + .read = kvm_pch_pic_read, + .write = kvm_pch_pic_write, +}; + +static int kvm_pch_pic_init(struct kvm_device *dev, u64 addr) +{ + int ret; + struct kvm *kvm = dev->kvm; + struct kvm_io_device *device; + struct loongarch_pch_pic *s = dev->kvm->arch.pch_pic; + + s->pch_pic_base = addr; + device = &s->device; + /* init device by pch pic writing and reading ops */ + kvm_iodevice_init(device, &kvm_pch_pic_ops); + mutex_lock(&kvm->slots_lock); + /* register pch pic device */ + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, PCH_PIC_SIZE, device); + mutex_unlock(&kvm->slots_lock); + + return (ret < 0) ? -EFAULT : 0; +} + +/* used by user space to get or set pch pic registers */ +static int kvm_pch_pic_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int addr, offset, len = 8, ret = 0; + void __user *data; + void *p = NULL; + struct loongarch_pch_pic *s; + + s = dev->kvm->arch.pch_pic; + addr = attr->attr; + data = (void __user *)attr->addr; + + /* get pointer to pch pic register by addr */ + switch (addr) { + case PCH_PIC_MASK_START: + p = &s->mask; + break; + case PCH_PIC_HTMSI_EN_START: + p = &s->htmsi_en; + break; + case PCH_PIC_EDGE_START: + p = &s->edge; + break; + case PCH_PIC_AUTO_CTRL0_START: + p = &s->auto_ctrl0; + break; + case PCH_PIC_AUTO_CTRL1_START: + p = &s->auto_ctrl1; + break; + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: + offset = addr - PCH_PIC_ROUTE_ENTRY_START; + p = &s->route_entry[offset]; + len = 1; + break; + case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: + offset = addr - PCH_PIC_HTMSI_VEC_START; + p = &s->htmsi_vector[offset]; + len = 1; + break; + case PCH_PIC_INT_IRR_START: + p = &s->irr; + break; + case PCH_PIC_INT_ISR_START: + p = &s->isr; + break; + case PCH_PIC_POLARITY_START: + p = &s->polarity; + break; + default: + return -EINVAL; + } + + spin_lock(&s->lock); + /* write or read value according to is_write */ + if (is_write) { + if (copy_from_user(p, data, len)) + ret = -EFAULT; + } else { + if (copy_to_user(data, p, len)) + ret = -EFAULT; + } + spin_unlock(&s->lock); + + return ret; +} + +static int kvm_pch_pic_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS: + return kvm_pch_pic_regs_access(dev, attr, false); + default: + return -EINVAL; + } +} + +static int kvm_pch_pic_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + u64 addr; + void __user *uaddr = (void __user *)(long)attr->addr; + + switch (attr->group) { + case KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT: + if (copy_from_user(&addr, uaddr, sizeof(addr))) + return -EFAULT; + + if (!dev->kvm->arch.pch_pic) { + kvm_err("%s: please create pch_pic irqchip first!\n", __func__); + return -ENODEV; + } + + return kvm_pch_pic_init(dev, addr); + default: + kvm_err("%s: unknown group (%d) attr (%lld)\n", __func__, attr->group, + attr->attr); + return -EINVAL; + } + case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS: + return kvm_pch_pic_regs_access(dev, attr, true); + default: + return -EINVAL; + } +} + +static int kvm_setup_default_irq_routing(struct kvm *kvm) +{ + int i, ret; + u32 nr = KVM_IRQCHIP_NUM_PINS; + struct kvm_irq_routing_entry *entries; + + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < nr; i++) { + entries[i].gsi = i; + entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; + entries[i].u.irqchip.irqchip = 0; + entries[i].u.irqchip.pin = i; + } + ret = kvm_set_irq_routing(kvm, entries, nr, 0); + kfree(entries); + + return ret; +} + +static int kvm_pch_pic_create(struct kvm_device *dev, u32 type) +{ + int ret; + struct kvm *kvm = dev->kvm; + struct loongarch_pch_pic *s; + + /* pch pic should not has been created */ + if (kvm->arch.pch_pic) + return -EINVAL; + + ret = kvm_setup_default_irq_routing(kvm); + if (ret) + return -ENOMEM; + + s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL); + if (!s) + return -ENOMEM; + + spin_lock_init(&s->lock); + s->kvm = kvm; + kvm->arch.pch_pic = s; + + return 0; +} + +static void kvm_pch_pic_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_pch_pic *s; + + if (!dev || !dev->kvm || !dev->kvm->arch.pch_pic) + return; + + kvm = dev->kvm; + s = kvm->arch.pch_pic; + /* unregister pch pic device and free it's memory */ + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &s->device); + kfree(s); +} + +static struct kvm_device_ops kvm_pch_pic_dev_ops = { + .name = "kvm-loongarch-pch-pic", + .create = kvm_pch_pic_create, + .destroy = kvm_pch_pic_destroy, + .set_attr = kvm_pch_pic_set_attr, + .get_attr = kvm_pch_pic_get_attr, +}; + +int kvm_loongarch_register_pch_pic_device(void) +{ + return kvm_register_device_ops(&kvm_pch_pic_dev_ops, KVM_DEV_TYPE_LOONGARCH_PCHPIC); +} diff --git a/arch/loongarch/kvm/irqfd.c b/arch/loongarch/kvm/irqfd.c new file mode 100644 index 000000000000..9a39627aecf0 --- /dev/null +++ b/arch/loongarch/kvm/irqfd.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include <linux/kvm_host.h> +#include <trace/events/kvm.h> +#include <asm/kvm_pch_pic.h> + +static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, bool line_status) +{ + /* PCH-PIC pin (0 ~ 64) <---> GSI (0 ~ 64) */ + pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level); + + return 0; +} + +/* + * kvm_set_msi: inject the MSI corresponding to the + * MSI routing entry + * + * This is the entry point for irqfd MSI injection + * and userspace MSI injection. + */ +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, bool line_status) +{ + if (!level) + return -1; + + pch_msi_set_irq(kvm, e->msi.data, level); + + return 0; +} + +/* + * kvm_set_routing_entry: populate a kvm routing entry + * from a user routing entry + * + * @kvm: the VM this entry is applied to + * @e: kvm kernel routing entry handle + * @ue: user api routing entry handle + * return 0 on success, -EINVAL on errors. + */ +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + switch (ue->type) { + case KVM_IRQ_ROUTING_IRQCHIP: + e->set = kvm_set_pic_irq; + e->irqchip.irqchip = ue->u.irqchip.irqchip; + e->irqchip.pin = ue->u.irqchip.pin; + + if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) + return -EINVAL; + + return 0; + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + return 0; + default: + return -EINVAL; + } +} + +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, bool line_status) +{ + switch (e->type) { + case KVM_IRQ_ROUTING_IRQCHIP: + pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level); + return 0; + case KVM_IRQ_ROUTING_MSI: + pch_msi_set_irq(kvm, e->msi.data, level); + return 0; + default: + return -EWOULDBLOCK; + } +} + +bool kvm_arch_intc_initialized(struct kvm *kvm) +{ + return kvm_arch_irqchip_in_kernel(kvm); +} diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 27e9b94c0a0b..80ea63d465b8 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -9,6 +9,8 @@ #include <asm/cacheflush.h> #include <asm/cpufeature.h> #include <asm/kvm_csr.h> +#include <asm/kvm_eiointc.h> +#include <asm/kvm_pch_pic.h> #include "trace.h" unsigned long vpid_mask; @@ -243,6 +245,24 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); vcpu->cpu = cpu; kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + + /* + * LLBCTL is a separated guest CSR register from host, a general + * exception ERET instruction clears the host LLBCTL register in + * host mode, and clears the guest LLBCTL register in guest mode. + * ERET in tlb refill exception does not clear LLBCTL register. + * + * When secondary mmu mapping is changed, guest OS does not know + * even if the content is changed after mapping is changed. + * + * Here clear WCLLB of the guest LLBCTL register when mapping is + * changed. Otherwise, if mmu mapping is changed while guest is + * executing LL/SC pair, LL loads with the old address and set + * the LLBCTL flag, SC checks the LLBCTL flag and will store the + * new address successfully since LLBCTL_WCLLB is on, even if + * memory with new address is changed on other VCPUs. + */ + set_gcsr_llbctl(CSR_LLBCTL_WCLLB); } /* Restore GSTAT(0x50).vpid */ @@ -276,16 +296,16 @@ int kvm_arch_enable_virtualization_cpu(void) /* * Enable virtualization features granting guest direct control of * certain features: - * GCI=2: Trap on init or unimplement cache instruction. + * GCI=2: Trap on init or unimplemented cache instruction. * TORU=0: Trap on Root Unimplement. * CACTRL=1: Root control cache. - * TOP=0: Trap on Previlege. + * TOP=0: Trap on Privilege. * TOE=0: Trap on Exception. * TIT=0: Trap on Timer. */ - if (env & CSR_GCFG_GCIP_ALL) + if (env & CSR_GCFG_GCIP_SECURE) gcfg |= CSR_GCFG_GCI_SECURE; - if (env & CSR_GCFG_MATC_ROOT) + if (env & CSR_GCFG_MATP_ROOT) gcfg |= CSR_GCFG_MATC_ROOT; write_csr_gcfg(gcfg); @@ -297,6 +317,13 @@ int kvm_arch_enable_virtualization_cpu(void) kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + /* + * HW Guest CSR registers are lost after CPU suspend and resume. + * Clear last_vcpu so that Guest CSR registers forced to reload + * from vCPU SW state. + */ + this_cpu_ptr(vmcs)->last_vcpu = NULL; + return 0; } @@ -313,7 +340,7 @@ void kvm_arch_disable_virtualization_cpu(void) static int kvm_loongarch_env_init(void) { - int cpu, order; + int cpu, order, ret; void *addr; struct kvm_context *context; @@ -367,8 +394,22 @@ static int kvm_loongarch_env_init(void) } kvm_init_gcsr_flag(); + kvm_register_perf_callbacks(NULL); - return 0; + /* Register LoongArch IPI interrupt controller interface. */ + ret = kvm_loongarch_register_ipi_device(); + if (ret) + return ret; + + /* Register LoongArch EIOINTC interrupt controller interface. */ + ret = kvm_loongarch_register_eiointc_device(); + if (ret) + return ret; + + /* Register LoongArch PCH-PIC interrupt controller interface. */ + ret = kvm_loongarch_register_pch_pic_device(); + + return ret; } static void kvm_loongarch_env_exit(void) @@ -385,6 +426,8 @@ static void kvm_loongarch_env_exit(void) } kfree(kvm_loongarch_ops); } + + kvm_unregister_perf_callbacks(); } static int kvm_loongarch_init(void) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 28681dfb4b85..ed956c5cf2cc 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -552,12 +552,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) { int ret = 0; - kvm_pfn_t pfn = 0; kvm_pte_t *ptep, changed, new; gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *slot; - struct page *page; spin_lock(&kvm->mmu_lock); @@ -570,8 +568,6 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ /* Track access to pages marked old */ new = kvm_pte_mkyoung(*ptep); - /* call kvm_set_pfn_accessed() after unlock */ - if (write && !kvm_pte_dirty(new)) { if (!kvm_pte_write(new)) { ret = -EFAULT; @@ -595,26 +591,14 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ } changed = new ^ (*ptep); - if (changed) { + if (changed) kvm_set_pte(ptep, new); - pfn = kvm_pte_pfn(new); - page = kvm_pfn_to_refcounted_page(pfn); - if (page) - get_page(page); - } + spin_unlock(&kvm->mmu_lock); - if (changed) { - if (kvm_pte_young(changed)) - kvm_set_pfn_accessed(pfn); + if (kvm_pte_dirty(changed)) + mark_page_dirty(kvm, gfn); - if (kvm_pte_dirty(changed)) { - mark_page_dirty(kvm, gfn); - kvm_set_pfn_dirty(pfn); - } - if (page) - put_page(page); - } return ret; out: spin_unlock(&kvm->mmu_lock); @@ -796,6 +780,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *memslot; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + struct page *page; /* Try the fast path to handle old / clean pages */ srcu_idx = srcu_read_lock(&kvm->srcu); @@ -823,7 +808,7 @@ retry: mmu_seq = kvm->mmu_invalidate_seq; /* * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in - * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't + * kvm_faultin_pfn() (which calls get_user_pages()), so that we don't * risk the page we get a reference to getting unmapped before we have a * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. * @@ -835,7 +820,7 @@ retry: smp_rmb(); /* Slow path - ask KVM core whether we can access this GPA */ - pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable); + pfn = kvm_faultin_pfn(vcpu, gfn, write, &writeable, &page); if (is_error_noslot_pfn(pfn)) { err = -EFAULT; goto out; @@ -847,10 +832,10 @@ retry: /* * This can happen when mappings are changed asynchronously, but * also synchronously if a COW is triggered by - * gfn_to_pfn_prot(). + * kvm_faultin_pfn(). */ spin_unlock(&kvm->mmu_lock); - kvm_release_pfn_clean(pfn); + kvm_release_page_unused(page); if (retry_no > 100) { retry_no = 0; schedule(); @@ -915,20 +900,19 @@ retry: else ++kvm->stat.pages; kvm_set_pte(ptep, new_pte); + + kvm_release_faultin_page(kvm, page, false, writeable); spin_unlock(&kvm->mmu_lock); - if (prot_bits & _PAGE_DIRTY) { + if (prot_bits & _PAGE_DIRTY) mark_page_dirty_in_slot(kvm, memslot, gfn); - kvm_set_pfn_dirty(pfn); - } - kvm_release_pfn_clean(pfn); out: srcu_read_unlock(&kvm->srcu, srcu_idx); return err; } -int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write, int ecode) { int ret; @@ -937,8 +921,17 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) return ret; /* Invalidate this entry in the TLB */ - vcpu->arch.flush_gpa = gpa; - kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + if (!cpu_has_ptw || (ecode == EXCCODE_TLBM)) { + /* + * With HW PTW, invalid TLB is not added when page fault. But + * for EXCCODE_TLBM exception, stale TLB may exist because of + * the last read access. + * + * With SW PTW, invalid TLB is added in TLB refill exception. + */ + vcpu->arch.flush_gpa = gpa; + kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + } return 0; } diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 0c292f818492..f1768b7a6194 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -60,16 +60,8 @@ ld.d t0, a2, KVM_ARCH_GPC csrwr t0, LOONGARCH_CSR_ERA - /* Save host PGDL */ - csrrd t0, LOONGARCH_CSR_PGDL - st.d t0, a2, KVM_ARCH_HPGD - - /* Switch to kvm */ - ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH - - /* Load guest PGDL */ - li.w t0, KVM_GPGD - ldx.d t0, t1, t0 + /* Load PGD for KVM hypervisor */ + ld.d t0, a2, KVM_ARCH_KVMPGD csrwr t0, LOONGARCH_CSR_PGDL /* Mix GID and RID */ @@ -85,7 +77,7 @@ * Guest CRMD comes from separate GCSR_CRMD register */ ori t0, zero, CSR_PRMD_PIE - csrxchg t0, t0, LOONGARCH_CSR_PRMD + csrwr t0, LOONGARCH_CSR_PRMD /* Set PVM bit to setup ertn to guest context */ ori t0, zero, CSR_GSTAT_PVM diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 174734a23d0a..5af32ec62cb1 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -240,7 +240,7 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu) */ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) { - int ret; + int idx, ret; /* * Check conditions before entering the guest @@ -249,7 +249,9 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) if (ret < 0) return ret; + idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_check_requests(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; } @@ -292,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { + kvm_lose_pmu(vcpu); /* make sure the vcpu mode has been written */ smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); local_irq_enable(); @@ -309,7 +312,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { int ret = RESUME_GUEST; unsigned long estat = vcpu->arch.host_estat; - u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 intr = estat & CSR_ESTAT_IS; u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; vcpu->mode = OUTSIDE_GUEST_MODE; @@ -359,6 +362,34 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { + unsigned long val; + + preempt_disable(); + val = gcsr_read(LOONGARCH_CSR_CRMD); + preempt_enable(); + + return (val & CSR_PRMD_PPLV) == PLV_KERN; +} + +#ifdef CONFIG_GUEST_PERF_EVENTS +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.pc; +} + +/* + * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, + * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM, + * any event that arrives while a vCPU is loaded is considered to be "in guest". + */ +bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU)); +} +#endif + +bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) +{ return false; } @@ -872,6 +903,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, vcpu->arch.st.guest_addr = 0; memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); + + /* + * When vCPU reset, clear the ESTAT and GINTC registers + * Other CSR registers are cleared with function _kvm_setcsr(). + */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); break; default: ret = -EINVAL; @@ -1457,8 +1495,17 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.vpid = 0; vcpu->arch.flush_gpa = INVALID_GPA; - hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); - vcpu->arch.swtimer.function = kvm_swtimer_wakeup; + hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS_PINNED_HARD); + + /* Get GPA (=HVA) of PGD for kvm hypervisor */ + vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd); + + /* + * Get PGD for primary mmu, virtual address is used since there is + * memory access after loading from CSR_PGD in tlb exception fast path. + */ + vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd; vcpu->arch.handle_exit = kvm_handle_exit; vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; @@ -1475,6 +1522,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Init */ vcpu->arch.last_sched_cpu = -1; + /* Init ipi_state lock */ + spin_lock_init(&vcpu->arch.ipi_state.lock); + /* * Initialize guest register state to valid architectural reset state. */ @@ -1543,9 +1593,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Restore timer state regardless */ kvm_restore_timer(vcpu); - - /* Control guest page CCA attribute */ - change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); /* Restore hardware PMU CSRs */ @@ -1727,9 +1774,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu->mmio_needed = 0; } - if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) { + switch (run->exit_reason) { + case KVM_EXIT_HYPERCALL: + kvm_complete_user_service(vcpu, run); + break; + case KVM_EXIT_LOONGARCH_IOCSR: if (!run->iocsr_io.is_write) kvm_complete_iocsr_read(vcpu, run); + break; } if (!vcpu->wants_to_run) diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 4ba734aaef87..edccfc8c9cd8 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -6,6 +6,8 @@ #include <linux/kvm_host.h> #include <asm/kvm_mmu.h> #include <asm/kvm_vcpu.h> +#include <asm/kvm_eiointc.h> +#include <asm/kvm_pch_pic.h> const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), @@ -46,7 +48,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (kvm_pvtime_supported()) kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); - kvm->arch.gpa_size = BIT(cpu_vabits - 1); + /* + * cpu_vabits means user address space only (a half of total). + * GPA size of VM is the same with the size of user address space. + */ + kvm->arch.gpa_size = BIT(cpu_vabits); kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; kvm->arch.invalid_ptes[0] = 0; kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table; @@ -76,6 +82,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int r; switch (ext) { + case KVM_CAP_IRQCHIP: case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: case KVM_CAP_READONLY_MEM: @@ -161,6 +168,8 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_device_attr attr; switch (ioctl) { + case KVM_CREATE_IRQCHIP: + return 0; case KVM_HAS_DEVICE_ATTR: if (copy_from_user(&attr, argp, sizeof(attr))) return -EFAULT; @@ -170,3 +179,19 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) return -ENOIOCTLCMD; } } + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) +{ + if (!kvm_arch_irqchip_in_kernel(kvm)) + return -ENXIO; + + irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event->irq, irq_event->level, line_status); + + return 0; +} + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return (kvm->arch.ipi && kvm->arch.eiointc && kvm->arch.pch_pic); +} |