diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2022-10-03 22:33:43 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-10-03 22:33:43 +0300 |
commit | e18d6152ff0f41b7f01f9817372022df04e0d354 (patch) | |
tree | 9ddc6841785bb3349933c6d832e722ba91cea131 /arch/riscv | |
parent | fe4d9e4abf622598bd199d0805d20afa12f70c92 (diff) | |
parent | b60ca69715fcc39a5f4bdd56ca2ea691b7358455 (diff) | |
download | linux-e18d6152ff0f41b7f01f9817372022df04e0d354.tar.xz |
Merge tag 'kvm-riscv-6.1-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv changes for 6.1
- Improved instruction encoding infrastructure for
instructions not yet supported by binutils
- Svinval support for both KVM Host and KVM Guest
- Zihintpause support for KVM Guest
- Zicbom support for KVM Guest
- Record number of signal exits as a VCPU stat
- Use generic guest entry infrastructure
Diffstat (limited to 'arch/riscv')
-rw-r--r-- | arch/riscv/Kconfig | 5 | ||||
-rw-r--r-- | arch/riscv/Kconfig.erratas | 4 | ||||
-rw-r--r-- | arch/riscv/errata/thead/errata.c | 1 | ||||
-rw-r--r-- | arch/riscv/include/asm/cacheflush.h | 5 | ||||
-rw-r--r-- | arch/riscv/include/asm/gpr-num.h | 8 | ||||
-rw-r--r-- | arch/riscv/include/asm/hwcap.h | 4 | ||||
-rw-r--r-- | arch/riscv/include/asm/insn-def.h | 137 | ||||
-rw-r--r-- | arch/riscv/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/riscv/include/asm/kvm_vcpu_sbi.h | 4 | ||||
-rw-r--r-- | arch/riscv/include/uapi/asm/kvm.h | 4 | ||||
-rw-r--r-- | arch/riscv/kernel/cpu.c | 1 | ||||
-rw-r--r-- | arch/riscv/kernel/cpufeature.c | 1 | ||||
-rw-r--r-- | arch/riscv/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/riscv/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/riscv/kvm/main.c | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/tlb.c | 155 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 60 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_exit.c | 39 | ||||
-rw-r--r-- | arch/riscv/mm/dma-noncoherent.c | 25 |
20 files changed, 285 insertions, 176 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index ed66c31e4655..74082e2d7ce8 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -103,6 +103,7 @@ config RISCV select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_STACKPROTECTOR @@ -227,6 +228,9 @@ config RISCV_DMA_NONCOHERENT select ARCH_HAS_SETUP_DMA_OPS select DMA_DIRECT_REMAP +config AS_HAS_INSN + def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero) + source "arch/riscv/Kconfig.socs" source "arch/riscv/Kconfig.erratas" @@ -386,6 +390,7 @@ config RISCV_ISA_C config RISCV_ISA_SVPBMT bool "SVPBMT extension support" depends on 64BIT && MMU + depends on !XIP_KERNEL select RISCV_ALTERNATIVE default y help diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas index 6850e9389930..f3623df23b5f 100644 --- a/arch/riscv/Kconfig.erratas +++ b/arch/riscv/Kconfig.erratas @@ -46,7 +46,7 @@ config ERRATA_THEAD config ERRATA_THEAD_PBMT bool "Apply T-Head memory type errata" - depends on ERRATA_THEAD && 64BIT + depends on ERRATA_THEAD && 64BIT && MMU select RISCV_ALTERNATIVE_EARLY default y help @@ -57,7 +57,7 @@ config ERRATA_THEAD_PBMT config ERRATA_THEAD_CMO bool "Apply T-Head cache management errata" - depends on ERRATA_THEAD + depends on ERRATA_THEAD && MMU select RISCV_DMA_NONCOHERENT default y help diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c index 202c83f677b2..96648c176f37 100644 --- a/arch/riscv/errata/thead/errata.c +++ b/arch/riscv/errata/thead/errata.c @@ -37,6 +37,7 @@ static bool errata_probe_cmo(unsigned int stage, if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) return false; + riscv_cbom_block_size = L1_CACHE_BYTES; riscv_noncoherent_supported(); return true; #else diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index a60acaecfeda..273ece6b622f 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -42,6 +42,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ +/* + * The T-Head CMO errata internally probe the CBOM block size, but otherwise + * don't depend on Zicbom. + */ +extern unsigned int riscv_cbom_block_size; #ifdef CONFIG_RISCV_ISA_ZICBOM void riscv_init_cbom_blocksize(void); #else diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h index dfee2829fc7c..efeb5edf8a3a 100644 --- a/arch/riscv/include/asm/gpr-num.h +++ b/arch/riscv/include/asm/gpr-num.h @@ -3,6 +3,11 @@ #define __ASM_GPR_NUM_H #ifdef __ASSEMBLY__ + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + .equ .L__gpr_num_x\num, \num + .endr + .equ .L__gpr_num_zero, 0 .equ .L__gpr_num_ra, 1 .equ .L__gpr_num_sp, 2 @@ -39,6 +44,9 @@ #else /* __ASSEMBLY__ */ #define __DEFINE_ASM_GPR_NUMS \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ +" .equ .L__gpr_num_x\\num, \\num\n" \ +" .endr\n" \ " .equ .L__gpr_num_zero, 0\n" \ " .equ .L__gpr_num_ra, 1\n" \ " .equ .L__gpr_num_sp, 2\n" \ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 6f59ec64175e..b22525290073 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -58,6 +58,7 @@ enum riscv_isa_ext_id { RISCV_ISA_EXT_ZICBOM, RISCV_ISA_EXT_ZIHINTPAUSE, RISCV_ISA_EXT_SSTC, + RISCV_ISA_EXT_SVINVAL, RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, }; @@ -69,6 +70,7 @@ enum riscv_isa_ext_id { enum riscv_isa_ext_key { RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */ RISCV_ISA_EXT_KEY_ZIHINTPAUSE, + RISCV_ISA_EXT_KEY_SVINVAL, RISCV_ISA_EXT_KEY_MAX, }; @@ -90,6 +92,8 @@ static __always_inline int riscv_isa_ext2key(int num) return RISCV_ISA_EXT_KEY_FPU; case RISCV_ISA_EXT_ZIHINTPAUSE: return RISCV_ISA_EXT_KEY_ZIHINTPAUSE; + case RISCV_ISA_EXT_SVINVAL: + return RISCV_ISA_EXT_KEY_SVINVAL; default: return -EINVAL; } diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h new file mode 100644 index 000000000000..16044affa57c --- /dev/null +++ b/arch/riscv/include/asm/insn-def.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_INSN_DEF_H +#define __ASM_INSN_DEF_H + +#include <asm/asm.h> + +#define INSN_R_FUNC7_SHIFT 25 +#define INSN_R_RS2_SHIFT 20 +#define INSN_R_RS1_SHIFT 15 +#define INSN_R_FUNC3_SHIFT 12 +#define INSN_R_RD_SHIFT 7 +#define INSN_R_OPCODE_SHIFT 0 + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_AS_HAS_INSN + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .insn r \opcode, \func3, \func7, \rd, \rs1, \rs2 + .endm + +#else + +#include <asm/gpr-num.h> + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .4byte ((\opcode << INSN_R_OPCODE_SHIFT) | \ + (\func3 << INSN_R_FUNC3_SHIFT) | \ + (\func7 << INSN_R_FUNC7_SHIFT) | \ + (.L__gpr_num_\rd << INSN_R_RD_SHIFT) | \ + (.L__gpr_num_\rs1 << INSN_R_RS1_SHIFT) | \ + (.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT)) + .endm + +#endif + +#define __INSN_R(...) insn_r __VA_ARGS__ + +#else /* ! __ASSEMBLY__ */ + +#ifdef CONFIG_AS_HAS_INSN + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + ".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" + +#else + +#include <linux/stringify.h> +#include <asm/gpr-num.h> + +#define DEFINE_INSN_R \ + __DEFINE_ASM_GPR_NUMS \ +" .macro insn_r, opcode, func3, func7, rd, rs1, rs2\n" \ +" .4byte ((\\opcode << " __stringify(INSN_R_OPCODE_SHIFT) ") |" \ +" (\\func3 << " __stringify(INSN_R_FUNC3_SHIFT) ") |" \ +" (\\func7 << " __stringify(INSN_R_FUNC7_SHIFT) ") |" \ +" (.L__gpr_num_\\rd << " __stringify(INSN_R_RD_SHIFT) ") |" \ +" (.L__gpr_num_\\rs1 << " __stringify(INSN_R_RS1_SHIFT) ") |" \ +" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \ +" .endm\n" + +#define UNDEFINE_INSN_R \ +" .purgem insn_r\n" + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + DEFINE_INSN_R \ + "insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \ + UNDEFINE_INSN_R + +#endif + +#endif /* ! __ASSEMBLY__ */ + +#define INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \ + RV_##rd, RV_##rs1, RV_##rs2) + +#define RV_OPCODE(v) __ASM_STR(v) +#define RV_FUNC3(v) __ASM_STR(v) +#define RV_FUNC7(v) __ASM_STR(v) +#define RV_RD(v) __ASM_STR(v) +#define RV_RS1(v) __ASM_STR(v) +#define RV_RS2(v) __ASM_STR(v) +#define __RV_REG(v) __ASM_STR(x ## v) +#define RV___RD(v) __RV_REG(v) +#define RV___RS1(v) __RV_REG(v) +#define RV___RS2(v) __RV_REG(v) + +#define RV_OPCODE_SYSTEM RV_OPCODE(115) + +#define HFENCE_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(17), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HFENCE_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(49), \ + __RD(0), RS1(gaddr), RS2(vmid)) + +#define HLVX_HU(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(50), \ + RD(dest), RS1(addr), __RS2(3)) + +#define HLV_W(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(52), \ + RD(dest), RS1(addr), __RS2(0)) + +#ifdef CONFIG_64BIT +#define HLV_D(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(54), \ + RD(dest), RS1(addr), __RS2(0)) +#else +#define HLV_D(dest, addr) \ + __ASM_STR(.error "hlv.d requires 64-bit support") +#endif + +#define SINVAL_VMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(11), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define SFENCE_W_INVAL() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(0)) + +#define SFENCE_INVAL_IR() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(1)) + +#define HINVAL_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(19), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HINVAL_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \ + __RD(0), RS1(gaddr), RS2(vmid)) + +#endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 60c517e4d576..dbbf43d52623 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -67,6 +67,7 @@ struct kvm_vcpu_stat { u64 mmio_exit_kernel; u64 csr_exit_user; u64 csr_exit_kernel; + u64 signal_exits; u64 exits; }; diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 26a446a34057..d4e3e600beef 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -11,8 +11,8 @@ #define KVM_SBI_IMPID 3 -#define KVM_SBI_VERSION_MAJOR 0 -#define KVM_SBI_VERSION_MINOR 3 +#define KVM_SBI_VERSION_MAJOR 1 +#define KVM_SBI_VERSION_MINOR 0 struct kvm_vcpu_sbi_extension { unsigned long extid_start; diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 7351417afd62..8985ff234c01 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -48,6 +48,7 @@ struct kvm_sregs { /* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_config { unsigned long isa; + unsigned long zicbom_block_size; }; /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -98,6 +99,9 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_M, KVM_RISCV_ISA_EXT_SVPBMT, KVM_RISCV_ISA_EXT_SSTC, + KVM_RISCV_ISA_EXT_SVINVAL, + KVM_RISCV_ISA_EXT_ZIHINTPAUSE, + KVM_RISCV_ISA_EXT_ZICBOM, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 0be8a2403212..7d1cd653ca02 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -96,6 +96,7 @@ static struct riscv_isa_ext_data isa_ext_arr[] = { __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), + __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3b5583db9d80..9774f1271f93 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -204,6 +204,7 @@ void __init riscv_fill_hwcap(void) SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM); SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE); SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC); + SET_ISA_EXT_MAP("svinval", RISCV_ISA_EXT_SVINVAL); } #undef SET_ISA_EXT_MAP } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 95ef6e2bf45c..2dfc463b86bb 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -296,8 +296,8 @@ void __init setup_arch(char **cmdline_p) setup_smp(); #endif - riscv_fill_hwcap(); riscv_init_cbom_blocksize(); + riscv_fill_hwcap(); apply_boot_alternatives(); } diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 5a2de6b6f882..5c591123c440 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -124,6 +124,8 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_altstack(&frame->uc.uc_stack)) goto badframe; + regs->cause = -1UL; + return regs->a0; badframe: diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index f5a342fa1b1d..f36a737d5f96 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -24,6 +24,7 @@ config KVM select PREEMPT_NOTIFIERS select KVM_MMIO select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_XFER_TO_GUEST_WORK select HAVE_KVM_VCPU_ASYNC_IOCTL select HAVE_KVM_EVENTFD select SRCU diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 1549205fe5fe..df2d8716851f 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -122,7 +122,7 @@ void kvm_arch_exit(void) { } -static int riscv_kvm_init(void) +static int __init riscv_kvm_init(void) { return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); } diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 1a76d0b1907d..309d79b3e5cd 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -12,22 +12,11 @@ #include <linux/kvm_host.h> #include <asm/cacheflush.h> #include <asm/csr.h> +#include <asm/hwcap.h> +#include <asm/insn-def.h> -/* - * Instruction encoding of hfence.gvma is: - * HFENCE.GVMA rs1, rs2 - * HFENCE.GVMA zero, rs2 - * HFENCE.GVMA rs1 - * HFENCE.GVMA - * - * rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2 - * rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2 - * rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1 - * rs1==zero and rs2==zero ==> HFENCE.GVMA - * - * Instruction encoding of HFENCE.GVMA is: - * 0110001 rs2(5) rs1(5) 000 00000 1110011 - */ +#define has_svinval() \ + static_branch_unlikely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_SVINVAL]) void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, gpa_t gpa, gpa_t gpsz, @@ -40,32 +29,22 @@ void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, return; } - for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) { - /* - * rs1 = a0 (GPA >> 2) - * rs2 = a1 (VMID) - * HFENCE.GVMA a0, a1 - * 0110001 01011 01010 000 00000 1110011 - */ - asm volatile ("srli a0, %0, 2\n" - "add a1, %1, zero\n" - ".word 0x62b50073\n" - :: "r" (pos), "r" (vmid) - : "a0", "a1", "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile (HINVAL_GVMA(%0, %1) + : : "r" (pos >> 2), "r" (vmid) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile (HFENCE_GVMA(%0, %1) + : : "r" (pos >> 2), "r" (vmid) : "memory"); } } void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid) { - /* - * rs1 = zero - * rs2 = a0 (VMID) - * HFENCE.GVMA zero, a0 - * 0110001 01010 00000 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - ".word 0x62a00073\n" - :: "r" (vmid) : "a0", "memory"); + asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory"); } void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, @@ -78,46 +57,24 @@ void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, return; } - for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) { - /* - * rs1 = a0 (GPA >> 2) - * rs2 = zero - * HFENCE.GVMA a0 - * 0110001 00000 01010 000 00000 1110011 - */ - asm volatile ("srli a0, %0, 2\n" - ".word 0x62050073\n" - :: "r" (pos) : "a0", "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile(HINVAL_GVMA(%0, zero) + : : "r" (pos >> 2) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) + asm volatile(HFENCE_GVMA(%0, zero) + : : "r" (pos >> 2) : "memory"); } } void kvm_riscv_local_hfence_gvma_all(void) { - /* - * rs1 = zero - * rs2 = zero - * HFENCE.GVMA - * 0110001 00000 00000 000 00000 1110011 - */ - asm volatile (".word 0x62000073" ::: "memory"); + asm volatile(HFENCE_GVMA(zero, zero) : : : "memory"); } -/* - * Instruction encoding of hfence.gvma is: - * HFENCE.VVMA rs1, rs2 - * HFENCE.VVMA zero, rs2 - * HFENCE.VVMA rs1 - * HFENCE.VVMA - * - * rs1!=zero and rs2!=zero ==> HFENCE.VVMA rs1, rs2 - * rs1==zero and rs2!=zero ==> HFENCE.VVMA zero, rs2 - * rs1!=zero and rs2==zero ==> HFENCE.VVMA rs1 - * rs1==zero and rs2==zero ==> HFENCE.VVMA - * - * Instruction encoding of HFENCE.VVMA is: - * 0010001 rs2(5) rs1(5) 000 00000 1110011 - */ - void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, unsigned long asid, unsigned long gva, @@ -133,18 +90,16 @@ void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) { - /* - * rs1 = a0 (GVA) - * rs2 = a1 (ASID) - * HFENCE.VVMA a0, a1 - * 0010001 01011 01010 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - "add a1, %1, zero\n" - ".word 0x22b50073\n" - :: "r" (pos), "r" (asid) - : "a0", "a1", "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HINVAL_VVMA(%0, %1) + : : "r" (pos), "r" (asid) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HFENCE_VVMA(%0, %1) + : : "r" (pos), "r" (asid) : "memory"); } csr_write(CSR_HGATP, hgatp); @@ -157,15 +112,7 @@ void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - /* - * rs1 = zero - * rs2 = a0 (ASID) - * HFENCE.VVMA zero, a0 - * 0010001 01010 00000 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - ".word 0x22a00073\n" - :: "r" (asid) : "a0", "memory"); + asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory"); csr_write(CSR_HGATP, hgatp); } @@ -183,16 +130,16 @@ void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) { - /* - * rs1 = a0 (GVA) - * rs2 = zero - * HFENCE.VVMA a0 - * 0010001 00000 01010 000 00000 1110011 - */ - asm volatile ("add a0, %0, zero\n" - ".word 0x22050073\n" - :: "r" (pos) : "a0", "memory"); + if (has_svinval()) { + asm volatile (SFENCE_W_INVAL() ::: "memory"); + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HINVAL_VVMA(%0, zero) + : : "r" (pos) : "memory"); + asm volatile (SFENCE_INVAL_IR() ::: "memory"); + } else { + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) + asm volatile(HFENCE_VVMA(%0, zero) + : : "r" (pos) : "memory"); } csr_write(CSR_HGATP, hgatp); @@ -204,13 +151,7 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid) hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); - /* - * rs1 = zero - * rs2 = zero - * HFENCE.VVMA - * 0010001 00000 00000 000 00000 1110011 - */ - asm volatile (".word 0x22000073" ::: "memory"); + asm volatile(HFENCE_VVMA(zero, zero) : : : "memory"); csr_write(CSR_HGATP, hgatp); } diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index d0f08d5b4282..a032c4f0d600 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -7,6 +7,7 @@ */ #include <linux/bitops.h> +#include <linux/entry-kvm.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/kdebug.h> @@ -18,6 +19,7 @@ #include <linux/fs.h> #include <linux/kvm_host.h> #include <asm/csr.h> +#include <asm/cacheflush.h> #include <asm/hwcap.h> const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { @@ -28,6 +30,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), STATS_DESC_COUNTER(VCPU, csr_exit_user), STATS_DESC_COUNTER(VCPU, csr_exit_kernel), + STATS_DESC_COUNTER(VCPU, signal_exits), STATS_DESC_COUNTER(VCPU, exits) }; @@ -42,17 +45,23 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) +#define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext + /* Mapping between KVM ISA Extension ID & Host ISA extension ID */ static const unsigned long kvm_isa_ext_arr[] = { - RISCV_ISA_EXT_a, - RISCV_ISA_EXT_c, - RISCV_ISA_EXT_d, - RISCV_ISA_EXT_f, - RISCV_ISA_EXT_h, - RISCV_ISA_EXT_i, - RISCV_ISA_EXT_m, - RISCV_ISA_EXT_SVPBMT, - RISCV_ISA_EXT_SSTC, + [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a, + [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c, + [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d, + [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f, + [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, + [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, + [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, + + KVM_ISA_EXT_ARR(SSTC), + KVM_ISA_EXT_ARR(SVINVAL), + KVM_ISA_EXT_ARR(SVPBMT), + KVM_ISA_EXT_ARR(ZIHINTPAUSE), + KVM_ISA_EXT_ARR(ZICBOM), }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -87,6 +96,8 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: case KVM_RISCV_ISA_EXT_SSTC: + case KVM_RISCV_ISA_EXT_SVINVAL: + case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: return false; default: break; @@ -254,6 +265,11 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_CONFIG_REG(isa): reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; break; + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): + if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + return -EINVAL; + reg_val = riscv_cbom_block_size; + break; default: return -EINVAL; } @@ -311,6 +327,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, return -EOPNOTSUPP; } break; + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): + return -EOPNOTSUPP; default: return -EINVAL; } @@ -784,11 +802,15 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa) { u64 henvcfg = 0; - if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT)) + if (riscv_isa_extension_available(isa, SVPBMT)) henvcfg |= ENVCFG_PBMTE; - if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC)) + if (riscv_isa_extension_available(isa, SSTC)) henvcfg |= ENVCFG_STCE; + + if (riscv_isa_extension_available(isa, ZICBOM)) + henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); + csr_write(CSR_HENVCFG, henvcfg); #ifdef CONFIG_32BIT csr_write(CSR_HENVCFGH, henvcfg >> 32); @@ -958,7 +980,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; while (ret > 0) { /* Check conditions before entering the guest */ - cond_resched(); + ret = xfer_to_guest_mode_handle_work(vcpu); + if (!ret) + ret = 1; kvm_riscv_gstage_vmid_update(vcpu); @@ -967,15 +991,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) local_irq_disable(); /* - * Exit if we have a signal pending so that we can deliver - * the signal to user space. - */ - if (signal_pending(current)) { - ret = -EINTR; - run->exit_reason = KVM_EXIT_INTR; - } - - /* * Ensure we set mode to IN_GUEST_MODE after we disable * interrupts and before the final VCPU requests check. * See the comment in kvm_vcpu_exiting_guest_mode() and @@ -997,7 +1012,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (ret <= 0 || kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || - kvm_request_pending(vcpu)) { + kvm_request_pending(vcpu) || + xfer_to_guest_mode_work_pending()) { vcpu->mode = OUTSIDE_GUEST_MODE; local_irq_enable(); kvm_vcpu_srcu_read_lock(vcpu); diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index d5c36386878a..c9f741ab26f5 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -8,6 +8,7 @@ #include <linux/kvm_host.h> #include <asm/csr.h> +#include <asm/insn-def.h> static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap) @@ -62,11 +63,7 @@ unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, { register unsigned long taddr asm("a0") = (unsigned long)trap; register unsigned long ttmp asm("a1"); - register unsigned long val asm("t0"); - register unsigned long tmp asm("t1"); - register unsigned long addr asm("t2") = guest_addr; - unsigned long flags; - unsigned long old_stvec, old_hstatus; + unsigned long flags, val, tmp, old_stvec, old_hstatus; local_irq_save(flags); @@ -82,29 +79,19 @@ unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, ".option push\n" ".option norvc\n" "add %[ttmp], %[taddr], 0\n" - /* - * HLVX.HU %[val], (%[addr]) - * HLVX.HU t0, (t2) - * 0110010 00011 00111 100 00101 1110011 - */ - ".word 0x6433c2f3\n" + HLVX_HU(%[val], %[addr]) "andi %[tmp], %[val], 3\n" "addi %[tmp], %[tmp], -3\n" "bne %[tmp], zero, 2f\n" "addi %[addr], %[addr], 2\n" - /* - * HLVX.HU %[tmp], (%[addr]) - * HLVX.HU t1, (t2) - * 0110010 00011 00111 100 00110 1110011 - */ - ".word 0x6433c373\n" + HLVX_HU(%[tmp], %[addr]) "sll %[tmp], %[tmp], 16\n" "add %[val], %[val], %[tmp]\n" "2:\n" ".option pop" : [val] "=&r" (val), [tmp] "=&r" (tmp), [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp), - [addr] "+&r" (addr) : : "memory"); + [addr] "+&r" (guest_addr) : : "memory"); if (trap->scause == EXC_LOAD_PAGE_FAULT) trap->scause = EXC_INST_PAGE_FAULT; @@ -121,24 +108,14 @@ unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, ".option norvc\n" "add %[ttmp], %[taddr], 0\n" #ifdef CONFIG_64BIT - /* - * HLV.D %[val], (%[addr]) - * HLV.D t0, (t2) - * 0110110 00000 00111 100 00101 1110011 - */ - ".word 0x6c03c2f3\n" + HLV_D(%[val], %[addr]) #else - /* - * HLV.W %[val], (%[addr]) - * HLV.W t0, (t2) - * 0110100 00000 00111 100 00101 1110011 - */ - ".word 0x6803c2f3\n" + HLV_W(%[val], %[addr]) #endif ".option pop" : [val] "=&r" (val), [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp) - : [addr] "r" (addr) : "memory"); + : [addr] "r" (guest_addr) : "memory"); } csr_write(CSR_STVEC, old_stvec); diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index cd2225304c82..b0add983530a 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -12,7 +12,9 @@ #include <linux/of_device.h> #include <asm/cacheflush.h> -static unsigned int riscv_cbom_block_size = L1_CACHE_BYTES; +unsigned int riscv_cbom_block_size; +EXPORT_SYMBOL_GPL(riscv_cbom_block_size); + static bool noncoherent_supported; void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, @@ -79,38 +81,41 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void riscv_init_cbom_blocksize(void) { struct device_node *node; + unsigned long cbom_hartid; + u32 val, probed_block_size; int ret; - u32 val; + probed_block_size = 0; for_each_of_cpu_node(node) { unsigned long hartid; - int cbom_hartid; ret = riscv_of_processor_hartid(node, &hartid); if (ret) continue; - if (hartid < 0) - continue; - /* set block-size for cbom extension if available */ ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); if (ret) continue; - if (!riscv_cbom_block_size) { - riscv_cbom_block_size = val; + if (!probed_block_size) { + probed_block_size = val; cbom_hartid = hartid; } else { - if (riscv_cbom_block_size != val) - pr_warn("cbom-block-size mismatched between harts %d and %lu\n", + if (probed_block_size != val) + pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", cbom_hartid, hartid); } } + + if (probed_block_size) + riscv_cbom_block_size = probed_block_size; } #endif void riscv_noncoherent_supported(void) { + WARN(!riscv_cbom_block_size, + "Non-coherent DMA support enabled without a block size\n"); noncoherent_supported = true; } |