summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/Kconfig12
-rw-r--r--arch/riscv/net/bpf_jit.h18
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c12
3 files changed, 37 insertions, 5 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 5468ccfd7223..08db25006e35 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -604,6 +604,18 @@ config TOOLCHAIN_HAS_VECTOR_CRYPTO
def_bool $(as-instr, .option arch$(comma) +v$(comma) +zvkb)
depends on AS_HAS_OPTION_ARCH
+config RISCV_ISA_ZBA
+ bool "Zba extension support for bit manipulation instructions"
+ default y
+ help
+ Add support for enabling optimisations in the kernel when the Zba
+ extension is detected at boot.
+
+ The Zba extension provides instructions to accelerate the generation
+ of addresses that index into arrays of basic data types.
+
+ If you don't know what to do here, say Y.
+
config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions"
depends on TOOLCHAIN_HAS_ZBB
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index fdbf88ca8b70..97041b58237a 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -18,6 +18,11 @@ static inline bool rvc_enabled(void)
return IS_ENABLED(CONFIG_RISCV_ISA_C);
}
+static inline bool rvzba_enabled(void)
+{
+ return IS_ENABLED(CONFIG_RISCV_ISA_ZBA) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBA);
+}
+
static inline bool rvzbb_enabled(void)
{
return IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBB);
@@ -939,6 +944,14 @@ static inline u16 rvc_sdsp(u32 imm9, u8 rs2)
return rv_css_insn(0x7, imm, rs2, 0x2);
}
+/* RV64-only ZBA instructions. */
+
+static inline u32 rvzba_zextw(u8 rd, u8 rs1)
+{
+ /* add.uw rd, rs1, ZERO */
+ return rv_r_insn(0x04, RV_REG_ZERO, rs1, 0, rd, 0x3b);
+}
+
#endif /* __riscv_xlen == 64 */
/* Helper functions that emit RVC instructions when possible. */
@@ -1161,6 +1174,11 @@ static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
{
+ if (rvzba_enabled()) {
+ emit(rvzba_zextw(rd, rs), ctx);
+ return;
+ }
+
emit_slli(rd, rs, 32, ctx);
emit_srli(rd, rd, 32, ctx);
}
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 79a001d5533e..2bc4c14ea59a 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -537,8 +537,10 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
/* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
case BPF_CMPXCHG:
r0 = bpf_to_rv_reg(BPF_REG_0, ctx);
- emit(is64 ? rv_addi(RV_REG_T2, r0, 0) :
- rv_addiw(RV_REG_T2, r0, 0), ctx);
+ if (is64)
+ emit_mv(RV_REG_T2, r0, ctx);
+ else
+ emit_addiw(RV_REG_T2, r0, 0, ctx);
emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) :
rv_lr_w(r0, 0, rd, 0, 0), ctx);
jmp_offset = ninsns_rvoff(8);
@@ -868,7 +870,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
stack_size += 8;
sreg_off = stack_size;
- stack_size = round_up(stack_size, 16);
+ stack_size = round_up(stack_size, STACK_ALIGN);
if (!is_struct_ops) {
/* For the trampoline called from function entry,
@@ -1960,7 +1962,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
{
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
- bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
+ bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
if (bpf_stack_adjust)
mark_fp(ctx);
@@ -1982,7 +1984,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
if (ctx->arena_vm_start)
stack_adjust += 8;
- stack_adjust = round_up(stack_adjust, 16);
+ stack_adjust = round_up(stack_adjust, STACK_ALIGN);
stack_adjust += bpf_stack_adjust;
store_offset = stack_adjust - 8;