summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTiezhu Yang <yangtiezhu@loongson.cn>2026-04-22 10:45:34 +0300
committerHuacai Chen <chenhuacai@loongson.cn>2026-04-22 10:45:34 +0300
commitee823fe7c12f92bac5e5b1ea6dd0ac8b267dd464 (patch)
tree46a489a51a308b18269302f0c38034ad128cc136
parentfc935c190c7967070506a2795575adc7f9f501ef (diff)
downloadlinux-ee823fe7c12f92bac5e5b1ea6dd0ac8b267dd464.tar.xz
LoongArch: BPF: Support load-acquire and store-release instructions
Use the LoongArch common memory access instructions with the barrier 'dbar' to support the BPF load-acquire and store-release instructions. With this patch, the following testcases passed on LoongArch if the macro CAN_USE_LOAD_ACQ_STORE_REL is usable in bpf selftests: sudo ./test_progs -t verifier_load_acquire sudo ./test_progs -t verifier_store_release sudo ./test_progs -t verifier_precision/bpf_load_acquire sudo ./test_progs -t verifier_precision/bpf_store_release sudo ./test_progs -t compute_live_registers/atomic_load_acq_store_rel Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
-rw-r--r--arch/loongarch/net/bpf_jit.c98
1 files changed, 97 insertions, 1 deletions
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index 6bd2d20a9f2d..648a42c559a8 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -512,6 +512,99 @@ static int emit_atomic_rmw(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 0;
}
+static int emit_atomic_ld_st(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 t1 = LOONGARCH_GPR_T1;
+ const u8 src = regmap[insn->src_reg];
+ const u8 dst = regmap[insn->dst_reg];
+ const s16 off = insn->off;
+ const s32 imm = insn->imm;
+
+ switch (imm) {
+ /* dst_reg = load_acquire(src_reg + off16) */
+ case BPF_LOAD_ACQ:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, ldbu, dst, src, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, ldxbu, dst, src, t1);
+ }
+ break;
+ case BPF_H:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, ldhu, dst, src, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, ldxhu, dst, src, t1);
+ }
+ break;
+ case BPF_W:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, ldwu, dst, src, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, ldxwu, dst, src, t1);
+ }
+ break;
+ case BPF_DW:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, ldd, dst, src, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, ldxd, dst, src, t1);
+ }
+ break;
+ }
+ emit_insn(ctx, dbar, 0b10100);
+ break;
+ /* store_release(dst_reg + off16, src_reg) */
+ case BPF_STORE_REL:
+ emit_insn(ctx, dbar, 0b10010);
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, stb, src, dst, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, stxb, src, dst, t1);
+ }
+ break;
+ case BPF_H:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, sth, src, dst, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, stxh, src, dst, t1);
+ }
+ break;
+ case BPF_W:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, stw, src, dst, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, stxw, src, dst, t1);
+ }
+ break;
+ case BPF_DW:
+ if (is_signed_imm12(off)) {
+ emit_insn(ctx, std, src, dst, off);
+ } else {
+ move_imm(ctx, t1, off, false);
+ emit_insn(ctx, stxd, src, dst, t1);
+ }
+ break;
+ }
+ break;
+ default:
+ pr_err_once("bpf-jit: invalid atomic load/store opcode %02x\n", imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static bool is_signed_bpf_cond(u8 cond)
{
return cond == BPF_JSGT || cond == BPF_JSLT ||
@@ -1320,7 +1413,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
case BPF_STX | BPF_ATOMIC | BPF_H:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
- ret = emit_atomic_rmw(insn, ctx);
+ if (!bpf_atomic_is_load_store(insn))
+ ret = emit_atomic_rmw(insn, ctx);
+ else
+ ret = emit_atomic_ld_st(insn, ctx);
if (ret)
return ret;
break;