From b8f3b15a7ba0a91de86547de8bbd5a633db03ab1 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Fri, 15 Mar 2019 16:31:33 +0000 Subject: MIPS: entry: Remove unneeded need_resched() loop Since the enabling and disabling of IRQs within preempt_schedule_irq() is contained in a need_resched() loop, we don't need the outer arch code loop. Note that commit a18815abcdfd ("Use preempt_schedule_irq.") initially removed the existing loop, but missed the final branch to restore_all. Commit cdaed73afb61 ("Fix preemption bug.") missed that and reintroduced the loop. Signed-off-by: Valentin Schneider Cc: Ralf Baechle Cc: Paul Burton Cc: James Hogan Cc: linux-mips@vger.kernel.org Signed-off-by: Paul Burton Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/entry.S | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index d7de8adcfcc8..5469d43b6966 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -58,15 +58,14 @@ resume_kernel: local_irq_disable lw t0, TI_PRE_COUNT($28) bnez t0, restore_all -need_resched: LONG_L t0, TI_FLAGS($28) andi t1, t0, _TIF_NEED_RESCHED beqz t1, restore_all LONG_L t0, PT_STATUS(sp) # Interrupts off? andi t0, 1 beqz t0, restore_all - jal preempt_schedule_irq - b need_resched + PTR_LA ra, restore_all + j preempt_schedule_irq #endif FEXPORT(ret_from_kernel_thread) -- cgit v1.2.3 From 0d1d17b9ff8e7d69b2169337b263822355404a86 Mon Sep 17 00:00:00 2001 From: Hassan Naveed Date: Tue, 12 Mar 2019 22:47:56 +0000 Subject: MIPS: uasm: Add div, mul and sel instructions for mipsr6 Add the following instructions for use by eBPF on mipsr6: insn_ddivu_r6, insn_divu_r6, insn_dmodu, insn_dmulu, insn_modu, insn_mulu, insn_seleqz, insn_selnez Signed-off-by: Hassan Naveed Reviewed-by: Paul Burton Signed-off-by: Paul Burton Cc: kafai@fb.com Cc: songliubraving@fb.com Cc: yhs@fb.com Cc: netdev@vger.kernel.org Cc: bpf@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: Ralf Baechle Cc: James Hogan Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: open list:MIPS Cc: open list --- arch/mips/include/asm/uasm.h | 8 ++++++++ arch/mips/include/uapi/asm/inst.h | 6 +++--- arch/mips/mm/uasm-mips.c | 14 ++++++++++++++ arch/mips/mm/uasm.c | 39 ++++++++++++++++++++++++--------------- 4 files changed, 49 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index b1990dd75f27..f7effca791a5 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -86,14 +86,18 @@ Ip_u2u1(_ctcmsa); Ip_u2u1s3(_daddiu); Ip_u3u1u2(_daddu); Ip_u1u2(_ddivu); +Ip_u3u1u2(_ddivu_r6); Ip_u1(_di); Ip_u2u1msbu3(_dins); Ip_u2u1msbu3(_dinsm); Ip_u2u1msbu3(_dinsu); Ip_u1u2(_divu); +Ip_u3u1u2(_divu_r6); Ip_u1u2u3(_dmfc0); +Ip_u3u1u2(_dmodu); Ip_u1u2u3(_dmtc0); Ip_u1u2(_dmultu); +Ip_u3u1u2(_dmulu); Ip_u2u1u3(_drotr); Ip_u2u1u3(_drotr32); Ip_u2u1(_dsbh); @@ -131,6 +135,7 @@ Ip_u1u2u3(_mfc0); Ip_u1u2u3(_mfhc0); Ip_u1(_mfhi); Ip_u1(_mflo); +Ip_u3u1u2(_modu); Ip_u3u1u2(_movn); Ip_u3u1u2(_movz); Ip_u1u2u3(_mtc0); @@ -139,6 +144,7 @@ Ip_u1(_mthi); Ip_u1(_mtlo); Ip_u3u1u2(_mul); Ip_u1u2(_multu); +Ip_u3u1u2(_mulu); Ip_u3u1u2(_nor); Ip_u3u1u2(_or); Ip_u2u1u3(_ori); @@ -149,6 +155,8 @@ Ip_u2s3u1(_sb); Ip_u2s3u1(_sc); Ip_u2s3u1(_scd); Ip_u2s3u1(_sd); +Ip_u3u1u2(_seleqz); +Ip_u3u1u2(_selnez); Ip_u2s3u1(_sh); Ip_u2u1u3(_sll); Ip_u3u2u1(_sllv); diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 40fbb5dd66df..eaa3a80affdf 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -55,9 +55,9 @@ enum spec_op { spec3_unused_op, spec4_unused_op, slt_op, sltu_op, dadd_op, daddu_op, dsub_op, dsubu_op, tge_op, tgeu_op, tlt_op, tltu_op, - teq_op, spec5_unused_op, tne_op, spec6_unused_op, - dsll_op, spec7_unused_op, dsrl_op, dsra_op, - dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op + teq_op, seleqz_op, tne_op, selnez_op, + dsll_op, spec5_unused_op, dsrl_op, dsra_op, + dsll32_op, spec6_unused_op, dsrl32_op, dsra32_op }; /* diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 6abe40fc413d..7154a1d99aad 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -76,14 +76,22 @@ static const struct insn insn_table[insn_invalid] = { [insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, [insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD}, [insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT}, + [insn_ddivu_r6] = {M(spec_op, 0, 0, 0, ddivu_ddivu6_op, ddivu_op), + RS | RT | RD}, [insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT}, [insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE}, [insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE}, [insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE}, [insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT}, + [insn_divu_r6] = {M(spec_op, 0, 0, 0, divu_divu6_op, divu_op), + RS | RT | RD}, [insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_dmodu] = {M(spec_op, 0, 0, 0, ddivu_dmodu_op, ddivu_op), + RS | RT | RD}, [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, + [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op), + RS | RT | RD}, [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, [insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD}, @@ -132,12 +140,16 @@ static const struct insn insn_table[insn_invalid] = { [insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET}, [insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD}, [insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD}, + [insn_modu] = {M(spec_op, 0, 0, 0, divu_modu_op, divu_op), + RS | RT | RD}, [insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD}, [insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD}, [insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, [insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET}, [insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS}, [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, + [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op), + RS | RT | RD}, #ifndef CONFIG_CPU_MIPSR6 [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, #else @@ -163,6 +175,8 @@ static const struct insn insn_table[insn_invalid] = { [insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9}, #endif [insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_seleqz] = {M(spec_op, 0, 0, 0, 0, seleqz_op), RS | RT | RD}, + [insn_selnez] = {M(spec_op, 0, 0, 0, 0, selnez_op), RS | RT | RD}, [insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, [insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE}, [insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD}, diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 45b6264ff308..c56f129c9a4b 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -50,21 +50,22 @@ enum opcode { insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez, insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1, insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu, - insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, insn_dmfc0, - insn_dmtc0, insn_dmultu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, - insn_dsll, insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, - insn_dsrl, insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, - insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, - insn_ld, insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, - insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, - insn_mfhc0, insn_mfhi, insn_mflo, insn_movn, insn_movz, insn_mtc0, - insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor, - insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, - insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv, - insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav, - insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, - insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, - insn_xor, insn_xori, insn_yield, + insn_ddivu_r6, insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, + insn_divu_r6, insn_dmfc0, insn_dmodu, insn_dmtc0, insn_dmultu, + insn_dmulu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, insn_dsll, + insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, insn_dsrl, + insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, insn_ins, + insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, insn_ld, + insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld, + insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, + insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0, + insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor, + insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc, + insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll, + insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, + insn_srav, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, + insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, + insn_wsbh, insn_xor, insn_xori, insn_yield, insn_invalid /* insn_invalid must be last */ }; @@ -287,13 +288,17 @@ I_u2u1(_cfcmsa) I_u1u2(_ctc1) I_u2u1(_ctcmsa) I_u1u2(_ddivu) +I_u3u1u2(_ddivu_r6) I_u1u2u3(_dmfc0) +I_u3u1u2(_dmodu) I_u1u2u3(_dmtc0) I_u1u2(_dmultu) +I_u3u1u2(_dmulu) I_u2u1s3(_daddiu) I_u3u1u2(_daddu) I_u1(_di); I_u1u2(_divu) +I_u3u1u2(_divu_r6) I_u2u1(_dsbh); I_u2u1(_dshd); I_u2u1u3(_dsll) @@ -327,6 +332,7 @@ I_u2s3u1(_lw) I_u2s3u1(_lwu) I_u1u2u3(_mfc0) I_u1u2u3(_mfhc0) +I_u3u1u2(_modu) I_u3u1u2(_movn) I_u3u1u2(_movz) I_u1(_mfhi) @@ -337,6 +343,7 @@ I_u1(_mthi) I_u1(_mtlo) I_u3u1u2(_mul) I_u1u2(_multu) +I_u3u1u2(_mulu) I_u3u1u2(_nor) I_u3u1u2(_or) I_u2u1u3(_ori) @@ -345,6 +352,8 @@ I_u2s3u1(_sb) I_u2s3u1(_sc) I_u2s3u1(_scd) I_u2s3u1(_sd) +I_u3u1u2(_seleqz) +I_u3u1u2(_selnez) I_u2s3u1(_sh) I_u2u1u3(_sll) I_u3u2u1(_sllv) -- cgit v1.2.3 From 6c2c8a18886897226ee43e77c0149a8e1874274a Mon Sep 17 00:00:00 2001 From: Hassan Naveed Date: Tue, 12 Mar 2019 22:48:06 +0000 Subject: MIPS: eBPF: Provide eBPF support for MIPS64R6 Currently eBPF support is available on MIPS64R2 only. Use MIPS64R6 variants of instructions like multiply, divide, movn, movz so eBPF can run on the newer ISA. Also, we only need to check ISA revision before JIT'ing code, because we know the CPU is running a 64-bit kernel because eBPF JIT is only included in kernels with CONFIG_64BIT=y due to Kconfig dependencies. Signed-off-by: Hassan Naveed Signed-off-by: Paul Burton Cc: kafai@fb.com Cc: songliubraving@fb.com Cc: yhs@fb.com Cc: netdev@vger.kernel.org Cc: bpf@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: Ralf Baechle Cc: James Hogan Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: open list:MIPS Cc: open list --- arch/mips/net/ebpf_jit.c | 78 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 69 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 0effd3cba9a7..26eef9ad3896 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -22,6 +22,7 @@ #include #include #include +#include #include /* Registers used by JIT */ @@ -677,8 +678,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (insn->imm == 1) /* Mult by 1 is a nop */ break; gen_imm_to_reg(insn, MIPS_R_AT, ctx); - emit_instr(ctx, dmultu, MIPS_R_AT, dst); - emit_instr(ctx, mflo, dst); + if (MIPS_ISA_REV >= 6) { + emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT); + } else { + emit_instr(ctx, dmultu, MIPS_R_AT, dst); + emit_instr(ctx, mflo, dst); + } break; case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ dst = ebpf_to_mips_reg(ctx, insn, dst_reg); @@ -700,8 +705,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (insn->imm == 1) /* Mult by 1 is a nop */ break; gen_imm_to_reg(insn, MIPS_R_AT, ctx); - emit_instr(ctx, multu, dst, MIPS_R_AT); - emit_instr(ctx, mflo, dst); + if (MIPS_ISA_REV >= 6) { + emit_instr(ctx, mulu, dst, dst, MIPS_R_AT); + } else { + emit_instr(ctx, multu, dst, MIPS_R_AT); + emit_instr(ctx, mflo, dst); + } break; case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ dst = ebpf_to_mips_reg(ctx, insn, dst_reg); @@ -732,6 +741,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); + if (MIPS_ISA_REV >= 6) { + if (bpf_op == BPF_DIV) + emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT); + else + emit_instr(ctx, modu, dst, dst, MIPS_R_AT); + break; + } emit_instr(ctx, divu, dst, MIPS_R_AT); if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); @@ -754,6 +770,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); + if (MIPS_ISA_REV >= 6) { + if (bpf_op == BPF_DIV) + emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT); + else + emit_instr(ctx, modu, dst, dst, MIPS_R_AT); + break; + } emit_instr(ctx, ddivu, dst, MIPS_R_AT); if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); @@ -819,11 +842,23 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, and, dst, dst, src); break; case BPF_MUL: - emit_instr(ctx, dmultu, dst, src); - emit_instr(ctx, mflo, dst); + if (MIPS_ISA_REV >= 6) { + emit_instr(ctx, dmulu, dst, dst, src); + } else { + emit_instr(ctx, dmultu, dst, src); + emit_instr(ctx, mflo, dst); + } break; case BPF_DIV: case BPF_MOD: + if (MIPS_ISA_REV >= 6) { + if (bpf_op == BPF_DIV) + emit_instr(ctx, ddivu_r6, + dst, dst, src); + else + emit_instr(ctx, modu, dst, dst, src); + break; + } emit_instr(ctx, ddivu, dst, src); if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); @@ -903,6 +938,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; case BPF_DIV: case BPF_MOD: + if (MIPS_ISA_REV >= 6) { + if (bpf_op == BPF_DIV) + emit_instr(ctx, divu_r6, dst, dst, src); + else + emit_instr(ctx, modu, dst, dst, src); + break; + } emit_instr(ctx, divu, dst, src); if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); @@ -1006,8 +1048,15 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); emit_instr(ctx, sltu, MIPS_R_AT, dst, src); /* SP known to be non-zero, movz becomes boolean not */ - emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); - emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); + if (MIPS_ISA_REV >= 6) { + emit_instr(ctx, seleqz, MIPS_R_T9, + MIPS_R_SP, MIPS_R_T8); + } else { + emit_instr(ctx, movz, MIPS_R_T9, + MIPS_R_SP, MIPS_R_T8); + emit_instr(ctx, movn, MIPS_R_T9, + MIPS_R_ZERO, MIPS_R_T8); + } emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); cmp_eq = bpf_op == BPF_JGT; dst = MIPS_R_AT; @@ -1366,6 +1415,17 @@ jeq_common: if (src < 0) return src; if (BPF_MODE(insn->code) == BPF_XADD) { + /* + * If mem_off does not fit within the 9 bit ll/sc + * instruction immediate field, use a temp reg. + */ + if (MIPS_ISA_REV >= 6 && + (mem_off >= BIT(8) || mem_off < -BIT(8))) { + emit_instr(ctx, daddiu, MIPS_R_T6, + dst, mem_off); + mem_off = 0; + dst = MIPS_R_T6; + } switch (BPF_SIZE(insn->code)) { case BPF_W: if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { @@ -1720,7 +1780,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) unsigned int image_size; u8 *image_ptr; - if (!prog->jit_requested || !cpu_has_mips64r2) + if (!prog->jit_requested || MIPS_ISA_REV < 2) return prog; tmp = bpf_jit_blind_constants(prog); -- cgit v1.2.3 From 716850ab104db154549b01e63d5c71076434b064 Mon Sep 17 00:00:00 2001 From: Hassan Naveed Date: Tue, 12 Mar 2019 22:48:12 +0000 Subject: MIPS: eBPF: Initial eBPF support for MIPS32 architecture. Currently MIPS32 supports a JIT for classic BPF only, not extended BPF. This patch adds JIT support for extended BPF on MIPS32, so code is actually JIT'ed instead of being only interpreted. Instructions with 64-bit operands are not supported at this point. We can delete classic BPF because the kernel will translate classic BPF programs into extended BPF and JIT them, eliminating the need for classic BPF. Signed-off-by: Hassan Naveed Signed-off-by: Paul Burton Cc: kafai@fb.com Cc: songliubraving@fb.com Cc: yhs@fb.com Cc: netdev@vger.kernel.org Cc: bpf@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: Ralf Baechle Cc: James Hogan Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: open list:MIPS Cc: open list --- arch/mips/Kconfig | 3 +- arch/mips/net/Makefile | 1 - arch/mips/net/bpf_jit.c | 1270 ------------------------------------------- arch/mips/net/bpf_jit_asm.S | 285 ---------- arch/mips/net/ebpf_jit.c | 113 ++-- 5 files changed, 70 insertions(+), 1602 deletions(-) delete mode 100644 arch/mips/net/bpf_jit.c delete mode 100644 arch/mips/net/bpf_jit_asm.S (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4a5f5b0ee9a9..cbf16db3366c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -44,8 +44,7 @@ config MIPS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT - select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS) - select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS) + select HAVE_EBPF_JIT if (!CPU_MICROMIPS) select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile index 47d678416715..72a78462f872 100644 --- a/arch/mips/net/Makefile +++ b/arch/mips/net/Makefile @@ -1,4 +1,3 @@ # MIPS networking code -obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c deleted file mode 100644 index 3a0e34f4e615..000000000000 --- a/arch/mips/net/bpf_jit.c +++ /dev/null @@ -1,1270 +0,0 @@ -/* - * Just-In-Time compiler for BPF filters on MIPS - * - * Copyright (c) 2014 Imagination Technologies Ltd. - * Author: Markos Chandras - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2 of the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bpf_jit.h" - -/* ABI - * r_skb_hl SKB header length - * r_data SKB data pointer - * r_off Offset - * r_A BPF register A - * r_X BPF register X - * r_skb *skb - * r_M *scratch memory - * r_skb_len SKB length - * - * On entry (*bpf_func)(*skb, *filter) - * a0 = MIPS_R_A0 = skb; - * a1 = MIPS_R_A1 = filter; - * - * Stack - * ... - * M[15] - * M[14] - * M[13] - * ... - * M[0] <-- r_M - * saved reg k-1 - * saved reg k-2 - * ... - * saved reg 0 <-- r_sp - * - * - * Packet layout - * - * <--------------------- len ------------------------> - * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------> - * ---------------------------------------------------- - * | skb->data | - * ---------------------------------------------------- - */ - -#define ptr typeof(unsigned long) - -#define SCRATCH_OFF(k) (4 * (k)) - -/* JIT flags */ -#define SEEN_CALL (1 << BPF_MEMWORDS) -#define SEEN_SREG_SFT (BPF_MEMWORDS + 1) -#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT) -#define SEEN_SREG(x) (SEEN_SREG_BASE << (x)) -#define SEEN_OFF SEEN_SREG(2) -#define SEEN_A SEEN_SREG(3) -#define SEEN_X SEEN_SREG(4) -#define SEEN_SKB SEEN_SREG(5) -#define SEEN_MEM SEEN_SREG(6) -/* SEEN_SK_DATA also implies skb_hl an skb_len */ -#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0)) - -/* Arguments used by JIT */ -#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ - -#define SBIT(x) (1 << (x)) /* Signed version of BIT() */ - -/** - * struct jit_ctx - JIT context - * @skf: The sk_filter - * @prologue_bytes: Number of bytes for prologue - * @idx: Instruction index - * @flags: JIT flags - * @offsets: Instruction offsets - * @target: Memory location for the compiled filter - */ -struct jit_ctx { - const struct bpf_prog *skf; - unsigned int prologue_bytes; - u32 idx; - u32 flags; - u32 *offsets; - u32 *target; -}; - - -static inline int optimize_div(u32 *k) -{ - /* power of 2 divides can be implemented with right shift */ - if (!(*k & (*k-1))) { - *k = ilog2(*k); - return 1; - } - - return 0; -} - -static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx); - -/* Simply emit the instruction if the JIT memory space has been allocated */ -#define emit_instr(ctx, func, ...) \ -do { \ - if ((ctx)->target != NULL) { \ - u32 *p = &(ctx)->target[ctx->idx]; \ - uasm_i_##func(&p, ##__VA_ARGS__); \ - } \ - (ctx)->idx++; \ -} while (0) - -/* - * Similar to emit_instr but it must be used when we need to emit - * 32-bit or 64-bit instructions - */ -#define emit_long_instr(ctx, func, ...) \ -do { \ - if ((ctx)->target != NULL) { \ - u32 *p = &(ctx)->target[ctx->idx]; \ - UASM_i_##func(&p, ##__VA_ARGS__); \ - } \ - (ctx)->idx++; \ -} while (0) - -/* Determine if immediate is within the 16-bit signed range */ -static inline bool is_range16(s32 imm) -{ - return !(imm >= SBIT(15) || imm < -SBIT(15)); -} - -static inline void emit_addu(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, addu, dst, src1, src2); -} - -static inline void emit_nop(struct jit_ctx *ctx) -{ - emit_instr(ctx, nop); -} - -/* Load a u32 immediate to a register */ -static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - /* addiu can only handle s16 */ - if (!is_range16(imm)) { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); - p = &ctx->target[ctx->idx + 1]; - uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff); - } else { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_addiu(&p, dst, r_zero, imm); - } - } - ctx->idx++; - - if (!is_range16(imm)) - ctx->idx++; -} - -static inline void emit_or(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, or, dst, src1, src2); -} - -static inline void emit_ori(unsigned int dst, unsigned src, u32 imm, - struct jit_ctx *ctx) -{ - if (imm >= BIT(16)) { - emit_load_imm(r_tmp, imm, ctx); - emit_or(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, ori, dst, src, imm); - } -} - -static inline void emit_daddiu(unsigned int dst, unsigned int src, - int imm, struct jit_ctx *ctx) -{ - /* - * Only used for stack, so the imm is relatively small - * and it fits in 15-bits - */ - emit_instr(ctx, daddiu, dst, src, imm); -} - -static inline void emit_addiu(unsigned int dst, unsigned int src, - u32 imm, struct jit_ctx *ctx) -{ - if (!is_range16(imm)) { - emit_load_imm(r_tmp, imm, ctx); - emit_addu(dst, r_tmp, src, ctx); - } else { - emit_instr(ctx, addiu, dst, src, imm); - } -} - -static inline void emit_and(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, and, dst, src1, src2); -} - -static inline void emit_andi(unsigned int dst, unsigned int src, - u32 imm, struct jit_ctx *ctx) -{ - /* If imm does not fit in u16 then load it to register */ - if (imm >= BIT(16)) { - emit_load_imm(r_tmp, imm, ctx); - emit_and(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, andi, dst, src, imm); - } -} - -static inline void emit_xor(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, xor, dst, src1, src2); -} - -static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx) -{ - /* If imm does not fit in u16 then load it to register */ - if (imm >= BIT(16)) { - emit_load_imm(r_tmp, imm, ctx); - emit_xor(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, xori, dst, src, imm); - } -} - -static inline void emit_stack_offset(int offset, struct jit_ctx *ctx) -{ - emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset); -} - -static inline void emit_subu(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, subu, dst, src1, src2); -} - -static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx) -{ - emit_subu(reg, r_zero, reg, ctx); -} - -static inline void emit_sllv(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, sllv, dst, src, sa); -} - -static inline void emit_sll(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - /* sa is 5-bits long */ - if (sa >= BIT(5)) - /* Shifting >= 32 results in zero */ - emit_jit_reg_move(dst, r_zero, ctx); - else - emit_instr(ctx, sll, dst, src, sa); -} - -static inline void emit_srlv(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, srlv, dst, src, sa); -} - -static inline void emit_srl(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - /* sa is 5-bits long */ - if (sa >= BIT(5)) - /* Shifting >= 32 results in zero */ - emit_jit_reg_move(dst, r_zero, ctx); - else - emit_instr(ctx, srl, dst, src, sa); -} - -static inline void emit_slt(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, slt, dst, src1, src2); -} - -static inline void emit_sltu(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, sltu, dst, src1, src2); -} - -static inline void emit_sltiu(unsigned dst, unsigned int src, - unsigned int imm, struct jit_ctx *ctx) -{ - /* 16 bit immediate */ - if (!is_range16((s32)imm)) { - emit_load_imm(r_tmp, imm, ctx); - emit_sltu(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, sltiu, dst, src, imm); - } - -} - -/* Store register on the stack */ -static inline void emit_store_stack_reg(ptr reg, ptr base, - unsigned int offset, - struct jit_ctx *ctx) -{ - emit_long_instr(ctx, SW, reg, offset, base); -} - -static inline void emit_store(ptr reg, ptr base, unsigned int offset, - struct jit_ctx *ctx) -{ - emit_instr(ctx, sw, reg, offset, base); -} - -static inline void emit_load_stack_reg(ptr reg, ptr base, - unsigned int offset, - struct jit_ctx *ctx) -{ - emit_long_instr(ctx, LW, reg, offset, base); -} - -static inline void emit_load(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lw, reg, offset, base); -} - -static inline void emit_load_byte(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lb, reg, offset, base); -} - -static inline void emit_half_load(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lh, reg, offset, base); -} - -static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lhu, reg, offset, base); -} - -static inline void emit_mul(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, mul, dst, src1, src2); -} - -static inline void emit_div(unsigned int dst, unsigned int src, - struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_divu(&p, dst, src); - p = &ctx->target[ctx->idx + 1]; - uasm_i_mflo(&p, dst); - } - ctx->idx += 2; /* 2 insts */ -} - -static inline void emit_mod(unsigned int dst, unsigned int src, - struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_divu(&p, dst, src); - p = &ctx->target[ctx->idx + 1]; - uasm_i_mfhi(&p, dst); - } - ctx->idx += 2; /* 2 insts */ -} - -static inline void emit_dsll(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, dsll, dst, src, sa); -} - -static inline void emit_dsrl32(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, dsrl32, dst, src, sa); -} - -static inline void emit_wsbh(unsigned int dst, unsigned int src, - struct jit_ctx *ctx) -{ - emit_instr(ctx, wsbh, dst, src); -} - -/* load pointer to register */ -static inline void emit_load_ptr(unsigned int dst, unsigned int src, - int imm, struct jit_ctx *ctx) -{ - /* src contains the base addr of the 32/64-pointer */ - emit_long_instr(ctx, LW, dst, imm, src); -} - -/* load a function pointer to register */ -static inline void emit_load_func(unsigned int reg, ptr imm, - struct jit_ctx *ctx) -{ - if (IS_ENABLED(CONFIG_64BIT)) { - /* At this point imm is always 64-bit */ - emit_load_imm(r_tmp, (u64)imm >> 32, ctx); - emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ - emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx); - emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ - emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx); - } else { - emit_load_imm(reg, imm, ctx); - } -} - -/* Move to real MIPS register */ -static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) -{ - emit_long_instr(ctx, ADDU, dst, src, r_zero); -} - -/* Move to JIT (32-bit) register */ -static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) -{ - emit_addu(dst, src, r_zero, ctx); -} - -/* Compute the immediate value for PC-relative branches. */ -static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) -{ - if (ctx->target == NULL) - return 0; - - /* - * We want a pc-relative branch. We only do forward branches - * so tgt is always after pc. tgt is the instruction offset - * we want to jump to. - - * Branch on MIPS: - * I: target_offset <- sign_extend(offset) - * I+1: PC += target_offset (delay slot) - * - * ctx->idx currently points to the branch instruction - * but the offset is added to the delay slot so we need - * to subtract 4. - */ - return ctx->offsets[tgt] - - (ctx->idx * 4 - ctx->prologue_bytes) - 4; -} - -static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2, - unsigned int imm, struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - u32 *p = &ctx->target[ctx->idx]; - - switch (cond) { - case MIPS_COND_EQ: - uasm_i_beq(&p, reg1, reg2, imm); - break; - case MIPS_COND_NE: - uasm_i_bne(&p, reg1, reg2, imm); - break; - case MIPS_COND_ALL: - uasm_i_b(&p, imm); - break; - default: - pr_warn("%s: Unhandled branch conditional: %d\n", - __func__, cond); - } - } - ctx->idx++; -} - -static inline void emit_b(unsigned int imm, struct jit_ctx *ctx) -{ - emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx); -} - -static inline void emit_jalr(unsigned int link, unsigned int reg, - struct jit_ctx *ctx) -{ - emit_instr(ctx, jalr, link, reg); -} - -static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx) -{ - emit_instr(ctx, jr, reg); -} - -static inline u16 align_sp(unsigned int num) -{ - /* Double word alignment for 32-bit, quadword for 64-bit */ - unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8; - num = (num + (align - 1)) & -align; - return num; -} - -static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) -{ - int i = 0, real_off = 0; - u32 sflags, tmp_flags; - - /* Adjust the stack pointer */ - if (offset) - emit_stack_offset(-align_sp(offset), ctx); - - tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; - /* sflags is essentially a bitmap */ - while (tmp_flags) { - if ((sflags >> i) & 0x1) { - emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off, - ctx); - real_off += SZREG; - } - i++; - tmp_flags >>= 1; - } - - /* save return address */ - if (ctx->flags & SEEN_CALL) { - emit_store_stack_reg(r_ra, r_sp, real_off, ctx); - real_off += SZREG; - } - - /* Setup r_M leaving the alignment gap if necessary */ - if (ctx->flags & SEEN_MEM) { - if (real_off % (SZREG * 2)) - real_off += SZREG; - emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off); - } -} - -static void restore_bpf_jit_regs(struct jit_ctx *ctx, - unsigned int offset) -{ - int i, real_off = 0; - u32 sflags, tmp_flags; - - tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; - /* sflags is a bitmap */ - i = 0; - while (tmp_flags) { - if ((sflags >> i) & 0x1) { - emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off, - ctx); - real_off += SZREG; - } - i++; - tmp_flags >>= 1; - } - - /* restore return address */ - if (ctx->flags & SEEN_CALL) - emit_load_stack_reg(r_ra, r_sp, real_off, ctx); - - /* Restore the sp and discard the scrach memory */ - if (offset) - emit_stack_offset(align_sp(offset), ctx); -} - -static unsigned int get_stack_depth(struct jit_ctx *ctx) -{ - int sp_off = 0; - - - /* How may s* regs do we need to preserved? */ - sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG; - - if (ctx->flags & SEEN_MEM) - sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */ - - if (ctx->flags & SEEN_CALL) - sp_off += SZREG; /* Space for our ra register */ - - return sp_off; -} - -static void build_prologue(struct jit_ctx *ctx) -{ - int sp_off; - - /* Calculate the total offset for the stack pointer */ - sp_off = get_stack_depth(ctx); - save_bpf_jit_regs(ctx, sp_off); - - if (ctx->flags & SEEN_SKB) - emit_reg_move(r_skb, MIPS_R_A0, ctx); - - if (ctx->flags & SEEN_SKB_DATA) { - /* Load packet length */ - emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len), - ctx); - emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len), - ctx); - /* Load the data pointer */ - emit_load_ptr(r_skb_data, r_skb, - offsetof(struct sk_buff, data), ctx); - /* Load the header length */ - emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx); - } - - if (ctx->flags & SEEN_X) - emit_jit_reg_move(r_X, r_zero, ctx); - - /* - * Do not leak kernel data to userspace, we only need to clear - * r_A if it is ever used. In fact if it is never used, we - * will not save/restore it, so clearing it in this case would - * corrupt the state of the caller. - */ - if (bpf_needs_clear_a(&ctx->skf->insns[0]) && - (ctx->flags & SEEN_A)) - emit_jit_reg_move(r_A, r_zero, ctx); -} - -static void build_epilogue(struct jit_ctx *ctx) -{ - unsigned int sp_off; - - /* Calculate the total offset for the stack pointer */ - - sp_off = get_stack_depth(ctx); - restore_bpf_jit_regs(ctx, sp_off); - - /* Return */ - emit_jr(r_ra, ctx); - emit_nop(ctx); -} - -#define CHOOSE_LOAD_FUNC(K, func) \ - ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ - func##_positive) - -static int build_body(struct jit_ctx *ctx) -{ - const struct bpf_prog *prog = ctx->skf; - const struct sock_filter *inst; - unsigned int i, off, condt; - u32 k, b_off __maybe_unused; - u8 (*sk_load_func)(unsigned long *skb, int offset); - - for (i = 0; i < prog->len; i++) { - u16 code; - - inst = &(prog->insns[i]); - pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", - __func__, inst->code, inst->jt, inst->jf, inst->k); - k = inst->k; - code = bpf_anc_helper(inst); - - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; - - switch (code) { - case BPF_LD | BPF_IMM: - /* A <- k ==> li r_A, k */ - ctx->flags |= SEEN_A; - emit_load_imm(r_A, k, ctx); - break; - case BPF_LD | BPF_W | BPF_LEN: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - /* A <- len ==> lw r_A, offset(skb) */ - ctx->flags |= SEEN_SKB | SEEN_A; - off = offsetof(struct sk_buff, len); - emit_load(r_A, r_skb, off, ctx); - break; - case BPF_LD | BPF_MEM: - /* A <- M[k] ==> lw r_A, offset(M) */ - ctx->flags |= SEEN_MEM | SEEN_A; - emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_LD | BPF_W | BPF_ABS: - /* A <- P[k:4] */ - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word); - goto load; - case BPF_LD | BPF_H | BPF_ABS: - /* A <- P[k:2] */ - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half); - goto load; - case BPF_LD | BPF_B | BPF_ABS: - /* A <- P[k:1] */ - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte); -load: - emit_load_imm(r_off, k, ctx); -load_common: - ctx->flags |= SEEN_CALL | SEEN_OFF | - SEEN_SKB | SEEN_A | SEEN_SKB_DATA; - - emit_load_func(r_s0, (ptr)sk_load_func, ctx); - emit_reg_move(MIPS_R_A0, r_skb, ctx); - emit_jalr(MIPS_R_RA, r_s0, ctx); - /* Load second argument to delay slot */ - emit_reg_move(MIPS_R_A1, r_off, ctx); - /* Check the error value */ - emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx), - ctx); - /* Load return register on DS for failures */ - emit_reg_move(r_ret, r_zero, ctx); - /* Return with error */ - emit_b(b_imm(prog->len, ctx), ctx); - emit_nop(ctx); - break; - case BPF_LD | BPF_W | BPF_IND: - /* A <- P[X + k:4] */ - sk_load_func = sk_load_word; - goto load_ind; - case BPF_LD | BPF_H | BPF_IND: - /* A <- P[X + k:2] */ - sk_load_func = sk_load_half; - goto load_ind; - case BPF_LD | BPF_B | BPF_IND: - /* A <- P[X + k:1] */ - sk_load_func = sk_load_byte; -load_ind: - ctx->flags |= SEEN_OFF | SEEN_X; - emit_addiu(r_off, r_X, k, ctx); - goto load_common; - case BPF_LDX | BPF_IMM: - /* X <- k */ - ctx->flags |= SEEN_X; - emit_load_imm(r_X, k, ctx); - break; - case BPF_LDX | BPF_MEM: - /* X <- M[k] */ - ctx->flags |= SEEN_X | SEEN_MEM; - emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_LDX | BPF_W | BPF_LEN: - /* X <- len */ - ctx->flags |= SEEN_X | SEEN_SKB; - off = offsetof(struct sk_buff, len); - emit_load(r_X, r_skb, off, ctx); - break; - case BPF_LDX | BPF_B | BPF_MSH: - /* X <- 4 * (P[k:1] & 0xf) */ - ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB; - /* Load offset to a1 */ - emit_load_func(r_s0, (ptr)sk_load_byte, ctx); - /* - * This may emit two instructions so it may not fit - * in the delay slot. So use a0 in the delay slot. - */ - emit_load_imm(MIPS_R_A1, k, ctx); - emit_jalr(MIPS_R_RA, r_s0, ctx); - emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ - /* Check the error value */ - emit_bcond(MIPS_COND_NE, r_ret, 0, - b_imm(prog->len, ctx), ctx); - emit_reg_move(r_ret, r_zero, ctx); - /* We are good */ - /* X <- P[1:K] & 0xf */ - emit_andi(r_X, r_A, 0xf, ctx); - /* X << 2 */ - emit_b(b_imm(i + 1, ctx), ctx); - emit_sll(r_X, r_X, 2, ctx); /* delay slot */ - break; - case BPF_ST: - /* M[k] <- A */ - ctx->flags |= SEEN_MEM | SEEN_A; - emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_STX: - /* M[k] <- X */ - ctx->flags |= SEEN_MEM | SEEN_X; - emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_ALU | BPF_ADD | BPF_K: - /* A += K */ - ctx->flags |= SEEN_A; - emit_addiu(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_ADD | BPF_X: - /* A += X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_addu(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_SUB | BPF_K: - /* A -= K */ - ctx->flags |= SEEN_A; - emit_addiu(r_A, r_A, -k, ctx); - break; - case BPF_ALU | BPF_SUB | BPF_X: - /* A -= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_subu(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_MUL | BPF_K: - /* A *= K */ - /* Load K to scratch register before MUL */ - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - emit_mul(r_A, r_A, r_s0, ctx); - break; - case BPF_ALU | BPF_MUL | BPF_X: - /* A *= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_mul(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_DIV | BPF_K: - /* A /= k */ - if (k == 1) - break; - if (optimize_div(&k)) { - ctx->flags |= SEEN_A; - emit_srl(r_A, r_A, k, ctx); - break; - } - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - emit_div(r_A, r_s0, ctx); - break; - case BPF_ALU | BPF_MOD | BPF_K: - /* A %= k */ - if (k == 1) { - ctx->flags |= SEEN_A; - emit_jit_reg_move(r_A, r_zero, ctx); - } else { - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - emit_mod(r_A, r_s0, ctx); - } - break; - case BPF_ALU | BPF_DIV | BPF_X: - /* A /= X */ - ctx->flags |= SEEN_X | SEEN_A; - /* Check if r_X is zero */ - emit_bcond(MIPS_COND_EQ, r_X, r_zero, - b_imm(prog->len, ctx), ctx); - emit_load_imm(r_ret, 0, ctx); /* delay slot */ - emit_div(r_A, r_X, ctx); - break; - case BPF_ALU | BPF_MOD | BPF_X: - /* A %= X */ - ctx->flags |= SEEN_X | SEEN_A; - /* Check if r_X is zero */ - emit_bcond(MIPS_COND_EQ, r_X, r_zero, - b_imm(prog->len, ctx), ctx); - emit_load_imm(r_ret, 0, ctx); /* delay slot */ - emit_mod(r_A, r_X, ctx); - break; - case BPF_ALU | BPF_OR | BPF_K: - /* A |= K */ - ctx->flags |= SEEN_A; - emit_ori(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_OR | BPF_X: - /* A |= X */ - ctx->flags |= SEEN_A; - emit_ori(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_XOR | BPF_K: - /* A ^= k */ - ctx->flags |= SEEN_A; - emit_xori(r_A, r_A, k, ctx); - break; - case BPF_ANC | SKF_AD_ALU_XOR_X: - case BPF_ALU | BPF_XOR | BPF_X: - /* A ^= X */ - ctx->flags |= SEEN_A; - emit_xor(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_AND | BPF_K: - /* A &= K */ - ctx->flags |= SEEN_A; - emit_andi(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_AND | BPF_X: - /* A &= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_and(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_LSH | BPF_K: - /* A <<= K */ - ctx->flags |= SEEN_A; - emit_sll(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_LSH | BPF_X: - /* A <<= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_sllv(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_RSH | BPF_K: - /* A >>= K */ - ctx->flags |= SEEN_A; - emit_srl(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_RSH | BPF_X: - ctx->flags |= SEEN_A | SEEN_X; - emit_srlv(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_NEG: - /* A = -A */ - ctx->flags |= SEEN_A; - emit_neg(r_A, ctx); - break; - case BPF_JMP | BPF_JA: - /* pc += K */ - emit_b(b_imm(i + k + 1, ctx), ctx); - emit_nop(ctx); - break; - case BPF_JMP | BPF_JEQ | BPF_K: - /* pc += ( A == K ) ? pc->jt : pc->jf */ - condt = MIPS_COND_EQ | MIPS_COND_K; - goto jmp_cmp; - case BPF_JMP | BPF_JEQ | BPF_X: - ctx->flags |= SEEN_X; - /* pc += ( A == X ) ? pc->jt : pc->jf */ - condt = MIPS_COND_EQ | MIPS_COND_X; - goto jmp_cmp; - case BPF_JMP | BPF_JGE | BPF_K: - /* pc += ( A >= K ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GE | MIPS_COND_K; - goto jmp_cmp; - case BPF_JMP | BPF_JGE | BPF_X: - ctx->flags |= SEEN_X; - /* pc += ( A >= X ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GE | MIPS_COND_X; - goto jmp_cmp; - case BPF_JMP | BPF_JGT | BPF_K: - /* pc += ( A > K ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GT | MIPS_COND_K; - goto jmp_cmp; - case BPF_JMP | BPF_JGT | BPF_X: - ctx->flags |= SEEN_X; - /* pc += ( A > X ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GT | MIPS_COND_X; -jmp_cmp: - /* Greater or Equal */ - if ((condt & MIPS_COND_GE) || - (condt & MIPS_COND_GT)) { - if (condt & MIPS_COND_K) { /* K */ - ctx->flags |= SEEN_A; - emit_sltiu(r_s0, r_A, k, ctx); - } else { /* X */ - ctx->flags |= SEEN_A | - SEEN_X; - emit_sltu(r_s0, r_A, r_X, ctx); - } - /* A < (K|X) ? r_scrach = 1 */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, - ctx); - emit_nop(ctx); - /* A > (K|X) ? scratch = 0 */ - if (condt & MIPS_COND_GT) { - /* Checking for equality */ - ctx->flags |= SEEN_A | SEEN_X; - if (condt & MIPS_COND_K) - emit_load_imm(r_s0, k, ctx); - else - emit_jit_reg_move(r_s0, r_X, - ctx); - b_off = b_imm(i + inst->jf + 1, ctx); - emit_bcond(MIPS_COND_EQ, r_A, r_s0, - b_off, ctx); - emit_nop(ctx); - /* Finally, A > K|X */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - } else { - /* A >= (K|X) so jump */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - } - } else { - /* A == K|X */ - if (condt & MIPS_COND_K) { /* K */ - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - /* jump true */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_bcond(MIPS_COND_EQ, r_A, r_s0, - b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, - ctx); - emit_bcond(MIPS_COND_NE, r_A, r_s0, - b_off, ctx); - emit_nop(ctx); - } else { /* X */ - /* jump true */ - ctx->flags |= SEEN_A | SEEN_X; - b_off = b_imm(i + inst->jt + 1, - ctx); - emit_bcond(MIPS_COND_EQ, r_A, r_X, - b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_bcond(MIPS_COND_NE, r_A, r_X, - b_off, ctx); - emit_nop(ctx); - } - } - break; - case BPF_JMP | BPF_JSET | BPF_K: - ctx->flags |= SEEN_A; - /* pc += (A & K) ? pc -> jt : pc -> jf */ - emit_load_imm(r_s1, k, ctx); - emit_and(r_s0, r_A, r_s1, ctx); - /* jump true */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - break; - case BPF_JMP | BPF_JSET | BPF_X: - ctx->flags |= SEEN_X | SEEN_A; - /* pc += (A & X) ? pc -> jt : pc -> jf */ - emit_and(r_s0, r_A, r_X, ctx); - /* jump true */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - break; - case BPF_RET | BPF_A: - ctx->flags |= SEEN_A; - if (i != prog->len - 1) - /* - * If this is not the last instruction - * then jump to the epilogue - */ - emit_b(b_imm(prog->len, ctx), ctx); - emit_reg_move(r_ret, r_A, ctx); /* delay slot */ - break; - case BPF_RET | BPF_K: - /* - * It can emit two instructions so it does not fit on - * the delay slot. - */ - emit_load_imm(r_ret, k, ctx); - if (i != prog->len - 1) { - /* - * If this is not the last instruction - * then jump to the epilogue - */ - emit_b(b_imm(prog->len, ctx), ctx); - emit_nop(ctx); - } - break; - case BPF_MISC | BPF_TAX: - /* X = A */ - ctx->flags |= SEEN_X | SEEN_A; - emit_jit_reg_move(r_X, r_A, ctx); - break; - case BPF_MISC | BPF_TXA: - /* A = X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_jit_reg_move(r_A, r_X, ctx); - break; - /* AUX */ - case BPF_ANC | SKF_AD_PROTOCOL: - /* A = ntohs(skb->protocol */ - ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - protocol) != 2); - off = offsetof(struct sk_buff, protocol); - emit_half_load(r_A, r_skb, off, ctx); -#ifdef CONFIG_CPU_LITTLE_ENDIAN - /* This needs little endian fixup */ - if (cpu_has_wsbh) { - /* R2 and later have the wsbh instruction */ - emit_wsbh(r_A, r_A, ctx); - } else { - /* Get first byte */ - emit_andi(r_tmp_imm, r_A, 0xff, ctx); - /* Shift it */ - emit_sll(r_tmp, r_tmp_imm, 8, ctx); - /* Get second byte */ - emit_srl(r_tmp_imm, r_A, 8, ctx); - emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx); - /* Put everyting together in r_A */ - emit_or(r_A, r_tmp, r_tmp_imm, ctx); - } -#endif - break; - case BPF_ANC | SKF_AD_CPU: - ctx->flags |= SEEN_A | SEEN_OFF; - /* A = current_thread_info()->cpu */ - BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, - cpu) != 4); - off = offsetof(struct thread_info, cpu); - /* $28/gp points to the thread_info struct */ - emit_load(r_A, 28, off, ctx); - break; - case BPF_ANC | SKF_AD_IFINDEX: - /* A = skb->dev->ifindex */ - case BPF_ANC | SKF_AD_HATYPE: - /* A = skb->dev->type */ - ctx->flags |= SEEN_SKB | SEEN_A; - off = offsetof(struct sk_buff, dev); - /* Load *dev pointer */ - emit_load_ptr(r_s0, r_skb, off, ctx); - /* error (0) in the delay slot */ - emit_bcond(MIPS_COND_EQ, r_s0, r_zero, - b_imm(prog->len, ctx), ctx); - emit_reg_move(r_ret, r_zero, ctx); - if (code == (BPF_ANC | SKF_AD_IFINDEX)) { - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); - off = offsetof(struct net_device, ifindex); - emit_load(r_A, r_s0, off, ctx); - } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */ - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); - off = offsetof(struct net_device, type); - emit_half_load_unsigned(r_A, r_s0, off, ctx); - } - break; - case BPF_ANC | SKF_AD_MARK: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - off = offsetof(struct sk_buff, mark); - emit_load(r_A, r_skb, off, ctx); - break; - case BPF_ANC | SKF_AD_RXHASH: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - off = offsetof(struct sk_buff, hash); - emit_load(r_A, r_skb, off, ctx); - break; - case BPF_ANC | SKF_AD_VLAN_TAG: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - vlan_tci) != 2); - off = offsetof(struct sk_buff, vlan_tci); - emit_half_load_unsigned(r_A, r_skb, off, ctx); - break; - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: - ctx->flags |= SEEN_SKB | SEEN_A; - emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx); - if (PKT_VLAN_PRESENT_BIT) - emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx); - if (PKT_VLAN_PRESENT_BIT < 7) - emit_andi(r_A, r_A, 1, ctx); - break; - case BPF_ANC | SKF_AD_PKTTYPE: - ctx->flags |= SEEN_SKB; - - emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); - /* Keep only the last 3 bits */ - emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); -#ifdef __BIG_ENDIAN_BITFIELD - /* Get the actual packet type to the lower 3 bits */ - emit_srl(r_A, r_A, 5, ctx); -#endif - break; - case BPF_ANC | SKF_AD_QUEUE: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - queue_mapping) != 2); - BUILD_BUG_ON(offsetof(struct sk_buff, - queue_mapping) > 0xff); - off = offsetof(struct sk_buff, queue_mapping); - emit_half_load_unsigned(r_A, r_skb, off, ctx); - break; - default: - pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__, - inst->code); - return -1; - } - } - - /* compute offsets only during the first pass */ - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; - - return 0; -} - -void bpf_jit_compile(struct bpf_prog *fp) -{ - struct jit_ctx ctx; - unsigned int alloc_size, tmp_idx; - - if (!bpf_jit_enable) - return; - - memset(&ctx, 0, sizeof(ctx)); - - ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); - if (ctx.offsets == NULL) - return; - - ctx.skf = fp; - - if (build_body(&ctx)) - goto out; - - tmp_idx = ctx.idx; - build_prologue(&ctx); - ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; - /* just to complete the ctx.idx count */ - build_epilogue(&ctx); - - alloc_size = 4 * ctx.idx; - ctx.target = module_alloc(alloc_size); - if (ctx.target == NULL) - goto out; - - /* Clean it */ - memset(ctx.target, 0, alloc_size); - - ctx.idx = 0; - - /* Generate the actual JIT code */ - build_prologue(&ctx); - build_body(&ctx); - build_epilogue(&ctx); - - /* Update the icache */ - flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx)); - - if (bpf_jit_enable > 1) - /* Dump JIT code */ - bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); - - fp->bpf_func = (void *)ctx.target; - fp->jited = 1; - -out: - kfree(ctx.offsets); -} - -void bpf_jit_free(struct bpf_prog *fp) -{ - if (fp->jited) - module_memfree(fp->bpf_func); - - bpf_prog_unlock_free(fp); -} diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S deleted file mode 100644 index 57154c5883b6..000000000000 --- a/arch/mips/net/bpf_jit_asm.S +++ /dev/null @@ -1,285 +0,0 @@ -/* - * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF - * compiler. - * - * Copyright (C) 2015 Imagination Technologies Ltd. - * Author: Markos Chandras - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2 of the License. - */ - -#include -#include -#include -#include "bpf_jit.h" - -/* ABI - * - * r_skb_hl skb header length - * r_skb_data skb data - * r_off(a1) offset register - * r_A BPF register A - * r_X PF register X - * r_skb(a0) *skb - * r_M *scratch memory - * r_skb_le skb length - * r_s0 Scratch register 0 - * r_s1 Scratch register 1 - * - * On entry: - * a0: *skb - * a1: offset (imm or imm + X) - * - * All non-BPF-ABI registers are free for use. On return, we only - * care about r_ret. The BPF-ABI registers are assumed to remain - * unmodified during the entire filter operation. - */ - -#define skb a0 -#define offset a1 -#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ - - /* We know better :) so prevent assembler reordering etc */ - .set noreorder - -#define is_offset_negative(TYPE) \ - /* If offset is negative we have more work to do */ \ - slti t0, offset, 0; \ - bgtz t0, bpf_slow_path_##TYPE##_neg; \ - /* Be careful what follows in DS. */ - -#define is_offset_in_header(SIZE, TYPE) \ - /* Reading from header? */ \ - addiu $r_s0, $r_skb_hl, -SIZE; \ - slt t0, $r_s0, offset; \ - bgtz t0, bpf_slow_path_##TYPE; \ - -LEAF(sk_load_word) - is_offset_negative(word) -FEXPORT(sk_load_word_positive) - is_offset_in_header(4, word) - /* Offset within header boundaries */ - PTR_ADDU t1, $r_skb_data, offset - .set reorder - lw $r_A, 0(t1) - .set noreorder -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - wsbh t0, $r_A - rotr $r_A, t0, 16 -# else - sll t0, $r_A, 24 - srl t1, $r_A, 24 - srl t2, $r_A, 8 - or t0, t0, t1 - andi t2, t2, 0xff00 - andi t1, $r_A, 0xff00 - or t0, t0, t2 - sll t1, t1, 8 - or $r_A, t0, t1 -# endif -#endif - jr $r_ra - move $r_ret, zero - END(sk_load_word) - -LEAF(sk_load_half) - is_offset_negative(half) -FEXPORT(sk_load_half_positive) - is_offset_in_header(2, half) - /* Offset within header boundaries */ - PTR_ADDU t1, $r_skb_data, offset - lhu $r_A, 0(t1) -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - wsbh $r_A, $r_A -# else - sll t0, $r_A, 8 - srl t1, $r_A, 8 - andi t0, t0, 0xff00 - or $r_A, t0, t1 -# endif -#endif - jr $r_ra - move $r_ret, zero - END(sk_load_half) - -LEAF(sk_load_byte) - is_offset_negative(byte) -FEXPORT(sk_load_byte_positive) - is_offset_in_header(1, byte) - /* Offset within header boundaries */ - PTR_ADDU t1, $r_skb_data, offset - lbu $r_A, 0(t1) - jr $r_ra - move $r_ret, zero - END(sk_load_byte) - -/* - * call skb_copy_bits: - * (prototype in linux/skbuff.h) - * - * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) - * - * o32 mandates we leave 4 spaces for argument registers in case - * the callee needs to use them. Even though we don't care about - * the argument registers ourselves, we need to allocate that space - * to remain ABI compliant since the callee may want to use that space. - * We also allocate 2 more spaces for $r_ra and our return register (*to). - * - * n64 is a bit different. The *caller* will allocate the space to preserve - * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no - * good reason but it does not matter that much really. - * - * (void *to) is returned in r_s0 - * - */ -#ifdef CONFIG_CPU_LITTLE_ENDIAN -#define DS_OFFSET(SIZE) (4 * SZREG) -#else -#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) -#endif -#define bpf_slow_path_common(SIZE) \ - /* Quick check. Are we within reasonable boundaries? */ \ - LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ - sltu $r_s0, offset, $r_s1; \ - beqz $r_s0, fault; \ - /* Load 4th argument in DS */ \ - LONG_ADDIU a3, zero, SIZE; \ - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ - PTR_LA t0, skb_copy_bits; \ - PTR_S $r_ra, (5 * SZREG)($r_sp); \ - /* Assign low slot to a2 */ \ - PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ - jalr t0; \ - /* Reset our destination slot (DS but it's ok) */ \ - INT_S zero, (4 * SZREG)($r_sp); \ - /* \ - * skb_copy_bits returns 0 on success and -EFAULT \ - * on error. Our data live in a2. Do not bother with \ - * our data if an error has been returned. \ - */ \ - /* Restore our frame */ \ - PTR_L $r_ra, (5 * SZREG)($r_sp); \ - INT_L $r_s0, (4 * SZREG)($r_sp); \ - bltz v0, fault; \ - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ - move $r_ret, zero; \ - -NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) - bpf_slow_path_common(4) -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - wsbh t0, $r_s0 - jr $r_ra - rotr $r_A, t0, 16 -# else - sll t0, $r_s0, 24 - srl t1, $r_s0, 24 - srl t2, $r_s0, 8 - or t0, t0, t1 - andi t2, t2, 0xff00 - andi t1, $r_s0, 0xff00 - or t0, t0, t2 - sll t1, t1, 8 - jr $r_ra - or $r_A, t0, t1 -# endif -#else - jr $r_ra - move $r_A, $r_s0 -#endif - - END(bpf_slow_path_word) - -NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) - bpf_slow_path_common(2) -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - jr $r_ra - wsbh $r_A, $r_s0 -# else - sll t0, $r_s0, 8 - andi t1, $r_s0, 0xff00 - andi t0, t0, 0xff00 - srl t1, t1, 8 - jr $r_ra - or $r_A, t0, t1 -# endif -#else - jr $r_ra - move $r_A, $r_s0 -#endif - - END(bpf_slow_path_half) - -NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) - bpf_slow_path_common(1) - jr $r_ra - move $r_A, $r_s0 - - END(bpf_slow_path_byte) - -/* - * Negative entry points - */ - .macro bpf_is_end_of_data - li t0, SKF_LL_OFF - /* Reading link layer data? */ - slt t1, offset, t0 - bgtz t1, fault - /* Be careful what follows in DS. */ - .endm -/* - * call skb_copy_bits: - * (prototype in linux/filter.h) - * - * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, - * int k, unsigned int size) - * - * see above (bpf_slow_path_common) for ABI restrictions - */ -#define bpf_negative_common(SIZE) \ - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ - PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ - PTR_S $r_ra, (5 * SZREG)($r_sp); \ - jalr t0; \ - li a2, SIZE; \ - PTR_L $r_ra, (5 * SZREG)($r_sp); \ - /* Check return pointer */ \ - beqz v0, fault; \ - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ - /* Preserve our pointer */ \ - move $r_s0, v0; \ - /* Set return value */ \ - move $r_ret, zero; \ - -bpf_slow_path_word_neg: - bpf_is_end_of_data -NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) - bpf_negative_common(4) - jr $r_ra - lw $r_A, 0($r_s0) - END(sk_load_word_negative) - -bpf_slow_path_half_neg: - bpf_is_end_of_data -NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) - bpf_negative_common(2) - jr $r_ra - lhu $r_A, 0($r_s0) - END(sk_load_half_negative) - -bpf_slow_path_byte_neg: - bpf_is_end_of_data -NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) - bpf_negative_common(1) - jr $r_ra - lbu $r_A, 0($r_s0) - END(sk_load_byte_negative) - -fault: - jr $r_ra - addiu $r_ret, zero, 1 diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 26eef9ad3896..3548a69c82f7 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -126,15 +126,21 @@ static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx, } /* Simply emit the instruction if the JIT memory space has been allocated */ -#define emit_instr(ctx, func, ...) \ -do { \ - if ((ctx)->target != NULL) { \ - u32 *p = &(ctx)->target[ctx->idx]; \ - uasm_i_##func(&p, ##__VA_ARGS__); \ - } \ - (ctx)->idx++; \ +#define emit_instr_long(ctx, func64, func32, ...) \ +do { \ + if ((ctx)->target != NULL) { \ + u32 *p = &(ctx)->target[ctx->idx]; \ + if (IS_ENABLED(CONFIG_64BIT)) \ + uasm_i_##func64(&p, ##__VA_ARGS__); \ + else \ + uasm_i_##func32(&p, ##__VA_ARGS__); \ + } \ + (ctx)->idx++; \ } while (0) +#define emit_instr(ctx, func, ...) \ + emit_instr_long(ctx, func, func, ##__VA_ARGS__) + static unsigned int j_target(struct jit_ctx *ctx, int target_idx) { unsigned long target_va, base_va; @@ -274,17 +280,17 @@ static int gen_int_prologue(struct jit_ctx *ctx) * If RA we are doing a function call and may need * extra 8-byte tmp area. */ - stack_adjust += 16; + stack_adjust += 2 * sizeof(long); if (ctx->flags & EBPF_SAVE_S0) - stack_adjust += 8; + stack_adjust += sizeof(long); if (ctx->flags & EBPF_SAVE_S1) - stack_adjust += 8; + stack_adjust += sizeof(long); if (ctx->flags & EBPF_SAVE_S2) - stack_adjust += 8; + stack_adjust += sizeof(long); if (ctx->flags & EBPF_SAVE_S3) - stack_adjust += 8; + stack_adjust += sizeof(long); if (ctx->flags & EBPF_SAVE_S4) - stack_adjust += 8; + stack_adjust += sizeof(long); BUILD_BUG_ON(MAX_BPF_STACK & 7); locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; @@ -298,41 +304,49 @@ static int gen_int_prologue(struct jit_ctx *ctx) * On tail call we skip this instruction, and the TCC is * passed in $v1 from the caller. */ - emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); + emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); if (stack_adjust) - emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust); + emit_instr_long(ctx, daddiu, addiu, + MIPS_R_SP, MIPS_R_SP, -stack_adjust); else return 0; - store_offset = stack_adjust - 8; + store_offset = stack_adjust - sizeof(long); if (ctx->flags & EBPF_SAVE_RA) { - emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, sd, sw, + MIPS_R_RA, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S0) { - emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, sd, sw, + MIPS_R_S0, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S1) { - emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, sd, sw, + MIPS_R_S1, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S2) { - emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, sd, sw, + MIPS_R_S2, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S3) { - emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, sd, sw, + MIPS_R_S3, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S4) { - emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, sd, sw, + MIPS_R_S4, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) - emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); + emit_instr_long(ctx, daddu, addu, + MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); return 0; } @@ -341,7 +355,7 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) { const struct bpf_prog *prog = ctx->skf; int stack_adjust = ctx->stack_size; - int store_offset = stack_adjust - 8; + int store_offset = stack_adjust - sizeof(long); enum reg_val_type td; int r0 = MIPS_R_V0; @@ -353,33 +367,40 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) } if (ctx->flags & EBPF_SAVE_RA) { - emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, ld, lw, + MIPS_R_RA, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S0) { - emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, ld, lw, + MIPS_R_S0, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S1) { - emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, ld, lw, + MIPS_R_S1, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S2) { - emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, ld, lw, + MIPS_R_S2, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S3) { - emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, ld, lw, + MIPS_R_S3, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } if (ctx->flags & EBPF_SAVE_S4) { - emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP); - store_offset -= 8; + emit_instr_long(ctx, ld, lw, + MIPS_R_S4, store_offset, MIPS_R_SP); + store_offset -= sizeof(long); } emit_instr(ctx, jr, dest_reg); if (stack_adjust) - emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust); + emit_instr_long(ctx, daddiu, addiu, + MIPS_R_SP, MIPS_R_SP, stack_adjust); else emit_instr(ctx, nop); @@ -646,6 +667,10 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, s64 t64s; int bpf_op = BPF_OP(insn->code); + if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64) + || (bpf_op == BPF_DW))) + return -EINVAL; + switch (insn->code) { case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ @@ -1283,7 +1308,7 @@ jeq_common: case BPF_JMP | BPF_CALL: ctx->flags |= EBPF_SAVE_RA; - t64s = (s64)insn->imm + (s64)__bpf_call_base; + t64s = (s64)insn->imm + (long)__bpf_call_base; emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); /* delay slot */ -- cgit v1.2.3 From 371a415153be953520003ffe4a4eca3fecc0e8c2 Mon Sep 17 00:00:00 2001 From: "Enrico Weigelt, metux IT consult" Date: Mon, 11 Mar 2019 16:54:27 +0100 Subject: arch: mips: Kconfig: pedantic formatting Formatting of Kconfig files doesn't look so pretty, so let the Great White Handkerchief come around and clean it up. Signed-off-by: Enrico Weigelt, metux IT consult Signed-off-by: Paul Burton Cc: linux-kernel@vger.kernel.org Cc: hauke@hauke-m.de Cc: zajec5@gmail.com Cc: f.fainelli@gmail.com Cc: bcm-kernel-feedback-list@broadcom.com Cc: linux-mips@vger.kernel.org --- arch/mips/Kconfig | 61 ++++++++++++++++++++-------------------- arch/mips/bcm47xx/Kconfig | 8 +++--- arch/mips/bcm63xx/boards/Kconfig | 2 +- arch/mips/pic32/Kconfig | 8 +++--- 4 files changed, 39 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cbf16db3366c..fb8a39d53168 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -275,7 +275,7 @@ config BCM47XX select BCM47XX_SPROM select BCM47XX_SSB if !BCM47XX_BCMA help - Support for BCM47XX based boards + Support for BCM47XX based boards config BCM63XX bool "Broadcom BCM63XX based boards" @@ -294,7 +294,7 @@ config BCM63XX select MIPS_L1_CACHE_SHIFT_4 select CLKDEV_LOOKUP help - Support for BCM63XX based boards + Support for BCM63XX based boards config MIPS_COBALT bool "Cobalt Server" @@ -373,10 +373,10 @@ config MACH_JAZZ select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_100HZ help - This a family of machines based on the MIPS R4030 chipset which was - used by several vendors to build RISC/os and Windows NT workstations. - Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and - Olivetti M700-10 workstations. + This a family of machines based on the MIPS R4030 chipset which was + used by several vendors to build RISC/os and Windows NT workstations. + Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and + Olivetti M700-10 workstations. config MACH_INGENIC bool "Ingenic SoC based machines" @@ -572,14 +572,14 @@ config NXP_STB220 bool "NXP STB220 board" select SOC_PNX833X help - Support for NXP Semiconductors STB220 Development Board. + Support for NXP Semiconductors STB220 Development Board. config NXP_STB225 bool "NXP 225 board" select SOC_PNX833X select SOC_PNX8335 help - Support for NXP Semiconductors STB225 Development Board. + Support for NXP Semiconductors STB225 Development Board. config PMC_MSP bool "PMC-Sierra MSP chipsets" @@ -721,9 +721,9 @@ config SGI_IP28 select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select MIPS_L1_CACHE_SHIFT_7 - help - This is the SGI Indigo2 with R10000 processor. To compile a Linux - kernel that runs on these, say Y here. + help + This is the SGI Indigo2 with R10000 processor. To compile a Linux + kernel that runs on these, say Y here. config SGI_IP32 bool "SGI IP32 (O2)" @@ -1174,9 +1174,9 @@ config HOLES_IN_ZONE config SYS_SUPPORTS_RELOCATABLE bool help - Selected if the platform supports relocating the kernel. - The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF - to allow access to command line and entropy sources. + Selected if the platform supports relocating the kernel. + The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF + to allow access to command line and entropy sources. config MIPS_CBPF_JIT def_bool y @@ -2119,8 +2119,8 @@ config MIPS_PGD_C0_CONTEXT # Set to y for ptrace access to watch registers. # config HARDWARE_WATCHPOINTS - bool - default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6 + bool + default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6 menu "Kernel type" @@ -2184,10 +2184,10 @@ config PAGE_SIZE_4KB bool "4kB" depends on !CPU_LOONGSON2 && !CPU_LOONGSON3 help - This option select the standard 4kB Linux page size. On some - R3000-family processors this is the only available page size. Using - 4kB page size will minimize memory consumption and is therefore - recommended for low memory systems. + This option select the standard 4kB Linux page size. On some + R3000-family processors this is the only available page size. Using + 4kB page size will minimize memory consumption and is therefore + recommended for low memory systems. config PAGE_SIZE_8KB bool "8kB" @@ -2480,7 +2480,6 @@ config SB1_PASS_2_1_WORKAROUNDS depends on CPU_SB1 && CPU_SB1_PASS_2 default y - choice prompt "SmartMIPS or microMIPS ASE support" @@ -2688,16 +2687,16 @@ config RANDOMIZE_BASE bool "Randomize the address of the kernel image" depends on RELOCATABLE ---help--- - Randomizes the physical and virtual address at which the - kernel image is loaded, as a security feature that - deters exploit attempts relying on knowledge of the location - of kernel internals. + Randomizes the physical and virtual address at which the + kernel image is loaded, as a security feature that + deters exploit attempts relying on knowledge of the location + of kernel internals. - Entropy is generated using any coprocessor 0 registers available. + Entropy is generated using any coprocessor 0 registers available. - The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. - If unsure, say N. + If unsure, say N. config RANDOMIZE_BASE_MAX_OFFSET hex "Maximum kASLR offset" if EXPERT @@ -2827,7 +2826,7 @@ choice prompt "Timer frequency" default HZ_250 help - Allows the configuration of the timer frequency. + Allows the configuration of the timer frequency. config HZ_24 bool "24 HZ" if SYS_SUPPORTS_24HZ || SYS_SUPPORTS_ARBIT_HZ @@ -3127,10 +3126,10 @@ config ARCH_MMAP_RND_BITS_MAX default 15 config ARCH_MMAP_RND_COMPAT_BITS_MIN - default 8 + default 8 config ARCH_MMAP_RND_COMPAT_BITS_MAX - default 15 + default 15 config I8253 bool diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig index 29471038d817..6889f74e06f5 100644 --- a/arch/mips/bcm47xx/Kconfig +++ b/arch/mips/bcm47xx/Kconfig @@ -15,9 +15,9 @@ config BCM47XX_SSB select SSB_DRIVER_GPIO default y help - Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. + Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. - This will generate an image with support for SSB and MIPS32 R1 instruction set. + This will generate an image with support for SSB and MIPS32 R1 instruction set. config BCM47XX_BCMA bool "BCMA Support for Broadcom BCM47XX" @@ -31,8 +31,8 @@ config BCM47XX_BCMA select BCMA_DRIVER_GPIO default y help - Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. + Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. - This will generate an image with support for BCMA and MIPS32 R2 instruction set. + This will generate an image with support for BCMA and MIPS32 R2 instruction set. endif diff --git a/arch/mips/bcm63xx/boards/Kconfig b/arch/mips/bcm63xx/boards/Kconfig index f60d96610ace..492c3bd005d5 100644 --- a/arch/mips/bcm63xx/boards/Kconfig +++ b/arch/mips/bcm63xx/boards/Kconfig @@ -5,7 +5,7 @@ choice default BOARD_BCM963XX config BOARD_BCM963XX - bool "Generic Broadcom 963xx boards" + bool "Generic Broadcom 963xx boards" select SSB endchoice diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig index e284e89183cc..7acbb50c1dcd 100644 --- a/arch/mips/pic32/Kconfig +++ b/arch/mips/pic32/Kconfig @@ -39,12 +39,12 @@ choice Select the devicetree. config DTB_PIC32_NONE - bool "None" + bool "None" config DTB_PIC32_MZDA_SK - bool "PIC32MZDA Starter Kit" - depends on PIC32MZDA - select BUILTIN_DTB + bool "PIC32MZDA Starter Kit" + depends on PIC32MZDA + select BUILTIN_DTB endchoice -- cgit v1.2.3 From e6331a321aafcc291a60ceedf4d6b0051a6117ca Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Mar 2019 18:04:04 +0000 Subject: MIPS: KVM: Use prandom_u32_max() to generate tlbwr index Emulation of the tlbwr instruction, which writes a TLB entry to a random index in the TLB, currently uses get_random_bytes() to generate a 4 byte random number which we then mask to form the index. This is overkill in a couple of ways: - We don't need 4 bytes here since we mask the value to form a 6 bit number anyway, so we waste /dev/random entropy generating 3 random bytes that are unused. - We don't need crypto-grade randomness here - the architecture spec allows implementations to use any algorithm & merely encourages that some pseudo-randomness be used rather than a simple counter. The fast prandom_u32() function fits that criteria well. So rather than using get_random_bytes() & consuming /dev/random entropy, switch to using the faster prandom_u32_max() which provides what we need here whilst also performing the masking/modulo for us. Signed-off-by: Paul Burton Reported-by: George Spelvin Cc: James Hogan Cc: linux-mips@vger.kernel.org --- arch/mips/kvm/emulate.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 0074427b04fb..e5de6bac8197 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1141,9 +1141,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) unsigned long pc = vcpu->arch.pc; int index; - get_random_bytes(&index, sizeof(index)); - index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); - + index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE); tlb = &vcpu->arch.guest_tlb[index]; kvm_mips_invalidate_guest_tlb(vcpu, tlb); -- cgit v1.2.3 From e6046b5e69a070f8a1af868bba788cf796bcd9a8 Mon Sep 17 00:00:00 2001 From: Chuanhong Guo Date: Thu, 21 Mar 2019 22:42:51 +0800 Subject: MIPS: ralink: fix cpu clock of mt7621 and add dt clk devices For a long time the mt7621 uses a fixed cpu clock which causes a problem if the cpu frequency is not 880MHz. This patch fixes the cpu clock calculation and adds the cpu/bus clkdev which will be used in dts. Ported from OpenWrt: c7ca224299 ramips: fix cpu clock of mt7621 and add dt clk devices Signed-off-by: Weijie Gao Signed-off-by: Chuanhong Guo Signed-off-by: Paul Burton Cc: linux-mips@vger.kernel.org Cc: Ralf Baechle Cc: James Hogan Cc: John Crispin Cc: linux-kernel@vger.kernel.org --- arch/mips/include/asm/mach-ralink/mt7621.h | 20 ++++++ arch/mips/ralink/mt7621.c | 102 ++++++++++++++++++++--------- arch/mips/ralink/timer-gic.c | 4 +- 3 files changed, 93 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h index a672e06fa5fd..b8a8834164c8 100644 --- a/arch/mips/include/asm/mach-ralink/mt7621.h +++ b/arch/mips/include/asm/mach-ralink/mt7621.h @@ -19,6 +19,10 @@ #define SYSC_REG_CHIP_REV 0x0c #define SYSC_REG_SYSTEM_CONFIG0 0x10 #define SYSC_REG_SYSTEM_CONFIG1 0x14 +#define SYSC_REG_CLKCFG0 0x2c +#define SYSC_REG_CUR_CLK_STS 0x44 + +#define MEMC_REG_CPU_PLL 0x648 #define CHIP_REV_PKG_MASK 0x1 #define CHIP_REV_PKG_SHIFT 16 @@ -26,6 +30,22 @@ #define CHIP_REV_VER_SHIFT 8 #define CHIP_REV_ECO_MASK 0xf +#define XTAL_MODE_SEL_MASK 0x7 +#define XTAL_MODE_SEL_SHIFT 6 + +#define CPU_CLK_SEL_MASK 0x3 +#define CPU_CLK_SEL_SHIFT 30 + +#define CUR_CPU_FDIV_MASK 0x1f +#define CUR_CPU_FDIV_SHIFT 8 +#define CUR_CPU_FFRAC_MASK 0x1f +#define CUR_CPU_FFRAC_SHIFT 0 + +#define CPU_PLL_PREDIV_MASK 0x3 +#define CPU_PLL_PREDIV_SHIFT 12 +#define CPU_PLL_FBDIV_MASK 0x7f +#define CPU_PLL_FBDIV_SHIFT 4 + #define MT7621_DRAM_BASE 0x0 #define MT7621_DDR2_SIZE_MIN 32 #define MT7621_DDR2_SIZE_MAX 256 diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index d2718de60b9b..6828eefb0a85 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -9,22 +9,22 @@ #include #include +#include +#include +#include +#include #include #include #include #include #include +#include #include #include "common.h" -#define SYSC_REG_SYSCFG 0x10 -#define SYSC_REG_CPLL_CLKCFG0 0x2c -#define SYSC_REG_CUR_CLK_STS 0x44 -#define CPU_CLK_SEL (BIT(30) | BIT(31)) - #define MT7621_GPIO_MODE_UART1 1 #define MT7621_GPIO_MODE_I2C 2 #define MT7621_GPIO_MODE_UART3_MASK 0x3 @@ -110,49 +110,89 @@ static struct rt2880_pmx_group mt7621_pinmux_data[] = { { 0 } }; +static struct clk *clks[MT7621_CLK_MAX]; +static struct clk_onecell_data clk_data = { + .clks = clks, + .clk_num = ARRAY_SIZE(clks), +}; + phys_addr_t mips_cpc_default_phys_base(void) { panic("Cannot detect cpc address"); } +static struct clk *__init mt7621_add_sys_clkdev( + const char *id, unsigned long rate) +{ + struct clk *clk; + int err; + + clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate); + if (IS_ERR(clk)) + panic("failed to allocate %s clock structure", id); + + err = clk_register_clkdev(clk, id, NULL); + if (err) + panic("unable to register %s clock device", id); + + return clk; +} + void __init ralink_clk_init(void) { - int cpu_fdiv = 0; - int cpu_ffrac = 0; - int fbdiv = 0; - u32 clk_sts, syscfg; - u8 clk_sel = 0, xtal_mode; - u32 cpu_clk; + const static u32 prediv_tbl[] = {0, 1, 2, 2}; + u32 syscfg, xtal_sel, clkcfg, clk_sel, curclk, ffiv, ffrac; + u32 pll, prediv, fbdiv; + u32 xtal_clk, cpu_clk, bus_clk; + + syscfg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0); + xtal_sel = (syscfg >> XTAL_MODE_SEL_SHIFT) & XTAL_MODE_SEL_MASK; - if ((rt_sysc_r32(SYSC_REG_CPLL_CLKCFG0) & CPU_CLK_SEL) != 0) - clk_sel = 1; + clkcfg = rt_sysc_r32(SYSC_REG_CLKCFG0); + clk_sel = (clkcfg >> CPU_CLK_SEL_SHIFT) & CPU_CLK_SEL_MASK; + + curclk = rt_sysc_r32(SYSC_REG_CUR_CLK_STS); + ffiv = (curclk >> CUR_CPU_FDIV_SHIFT) & CUR_CPU_FDIV_MASK; + ffrac = (curclk >> CUR_CPU_FFRAC_SHIFT) & CUR_CPU_FFRAC_MASK; + + if (xtal_sel <= 2) + xtal_clk = 20 * 1000 * 1000; + else if (xtal_sel <= 5) + xtal_clk = 40 * 1000 * 1000; + else + xtal_clk = 25 * 1000 * 1000; switch (clk_sel) { case 0: - clk_sts = rt_sysc_r32(SYSC_REG_CUR_CLK_STS); - cpu_fdiv = ((clk_sts >> 8) & 0x1F); - cpu_ffrac = (clk_sts & 0x1F); - cpu_clk = (500 * cpu_ffrac / cpu_fdiv) * 1000 * 1000; + cpu_clk = 500 * 1000 * 1000; break; - case 1: - fbdiv = ((rt_sysc_r32(0x648) >> 4) & 0x7F) + 1; - syscfg = rt_sysc_r32(SYSC_REG_SYSCFG); - xtal_mode = (syscfg >> 6) & 0x7; - if (xtal_mode >= 6) { - /* 25Mhz Xtal */ - cpu_clk = 25 * fbdiv * 1000 * 1000; - } else if (xtal_mode >= 3) { - /* 40Mhz Xtal */ - cpu_clk = 40 * fbdiv * 1000 * 1000; - } else { - /* 20Mhz Xtal */ - cpu_clk = 20 * fbdiv * 1000 * 1000; - } + pll = rt_memc_r32(MEMC_REG_CPU_PLL); + fbdiv = (pll >> CPU_PLL_FBDIV_SHIFT) & CPU_PLL_FBDIV_MASK; + prediv = (pll >> CPU_PLL_PREDIV_SHIFT) & CPU_PLL_PREDIV_MASK; + cpu_clk = ((fbdiv + 1) * xtal_clk) >> prediv_tbl[prediv]; break; + default: + cpu_clk = xtal_clk; } + + cpu_clk = cpu_clk / ffiv * ffrac; + bus_clk = cpu_clk / 4; + + clks[MT7621_CLK_CPU] = mt7621_add_sys_clkdev("cpu", cpu_clk); + clks[MT7621_CLK_BUS] = mt7621_add_sys_clkdev("bus", bus_clk); + + pr_info("CPU Clock: %dMHz\n", cpu_clk / 1000000); + mips_hpt_frequency = cpu_clk / 2; } +static void __init mt7621_clocks_init_dt(struct device_node *np) +{ + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); +} + +CLK_OF_DECLARE(mt7621_clk, "mediatek,mt7621-pll", mt7621_clocks_init_dt); + void __init ralink_of_remap(void) { rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc"); diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c index b5f07d21fcf2..4fed842ae8eb 100644 --- a/arch/mips/ralink/timer-gic.c +++ b/arch/mips/ralink/timer-gic.c @@ -11,14 +11,14 @@ #include #include -#include +#include #include "common.h" void __init plat_time_init(void) { ralink_of_remap(); - + ralink_clk_init(); of_clk_init(NULL); timer_probe(); } -- cgit v1.2.3 From c838b580ca9dcf16b07aa72bb61a914e9db0fd2c Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 5 Apr 2019 22:50:35 +0000 Subject: MIPS: jump_label: Remove redundant nops Both arch_static_branch() & arch_static_branch_jump() emit a control transfer instruction (ie. branch or jump) without disabling assembler re-ordering. As such the assembler will automatically fill their delay slots. Both functions follow their branch or jump with an explicit nop that at first appears to be there to fill the delay slot, but given that the assembler will do that the explicit nops serve no purpose & we end up with our branch or jump followed by 2 nops. Remove the redundant nops. Signed-off-by: Paul Burton Cc: linux-mips@vger.kernel.org --- arch/mips/include/asm/jump_label.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index e4456e450f94..df04449b261b 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -29,7 +29,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:\t" B_INSN " 2f\n\t" - "2:\tnop\n\t" + "2:\t.insn\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" ".popsection\n\t" @@ -43,7 +43,6 @@ l_yes: static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { asm_volatile_goto("1:\tj %l[l_yes]\n\t" - "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" ".popsection\n\t" -- cgit v1.2.3 From 9b6584e35f407a391de39938fd0b4fb29671924f Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 5 Apr 2019 22:50:36 +0000 Subject: MIPS: jump_label: Use compact branches for >= r6 MIPSr6 introduced compact branches which have no delay slots. Make use of them for jump labels in order to avoid the need for a nop to fill the branch or jump delay slot, saving 4 bytes of code for each static branch. Signed-off-by: Paul Burton Cc: linux-mips@vger.kernel.org --- arch/mips/include/asm/jump_label.h | 12 +++++++++--- arch/mips/kernel/jump_label.c | 30 +++++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index df04449b261b..3185fd3220ec 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -11,6 +11,7 @@ #ifndef __ASSEMBLY__ #include +#include #define JUMP_LABEL_NOP_SIZE 4 @@ -21,9 +22,14 @@ #endif #ifdef CONFIG_CPU_MICROMIPS -#define B_INSN "b32" +# define B_INSN "b32" +# define J_INSN "j32" +#elif MIPS_ISA_REV >= 6 +# define B_INSN "bc" +# define J_INSN "bc" #else -#define B_INSN "b" +# define B_INSN "b" +# define J_INSN "j" #endif static __always_inline bool arch_static_branch(struct static_key *key, bool branch) @@ -42,7 +48,7 @@ l_yes: static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { - asm_volatile_goto("1:\tj %l[l_yes]\n\t" + asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" ".popsection\n\t" diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index ab943927f97a..662c8db9f45b 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -40,18 +40,38 @@ void arch_jump_label_transform(struct jump_entry *e, { union mips_instruction *insn_p; union mips_instruction insn; + long offset; insn_p = (union mips_instruction *)msk_isa16_mode(e->code); - /* Jump only works within an aligned region its delay slot is in. */ - BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); - /* Target must have the right alignment and ISA must be preserved. */ BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); if (type == JUMP_LABEL_JMP) { - insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; - insn.j_format.target = e->target >> J_RANGE_SHIFT; + if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) { + offset = e->target - ((unsigned long)insn_p + 4); + offset >>= 2; + + /* + * The branch offset must fit in the instruction's 26 + * bit field. + */ + WARN_ON((offset >= BIT(25)) || + (offset < -(long)BIT(25))); + + insn.j_format.opcode = bc6_op; + insn.j_format.target = offset; + } else { + /* + * Jump only works within an aligned region its delay + * slot is in. + */ + WARN_ON((e->target & ~J_RANGE_MASK) != + ((e->code + 4) & ~J_RANGE_MASK)); + + insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; + insn.j_format.target = e->target >> J_RANGE_SHIFT; + } } else { insn.word = 0; /* nop */ } -- cgit v1.2.3 From 3e3d1dfda4d6a17666fe0beae5c56b97b42e2c7a Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 5 Apr 2019 22:50:37 +0000 Subject: MIPS: generic: Enable CONFIG_JUMP_LABEL Enable CONFIG_JUMP_LABEL for generic configs in order to better optimize at runtime and get better test coverage for our jump label support. Signed-off-by: Paul Burton Cc: linux-mips@vger.kernel.org --- arch/mips/configs/generic_defconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig index 5d80521e5d5a..714169e411cf 100644 --- a/arch/mips/configs/generic_defconfig +++ b/arch/mips/configs/generic_defconfig @@ -26,6 +26,7 @@ CONFIG_MIPS_CPS=y CONFIG_HIGHMEM=y CONFIG_NR_CPUS=16 CONFIG_MIPS_O32_FP64_SUPPORT=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_TRIM_UNUSED_KSYMS=y -- cgit v1.2.3 From 16b22f85bca246ace984209e9f22af2161450cbd Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 12 Apr 2019 11:04:02 -0700 Subject: Revert "MIPS: ralink: fix cpu clock of mt7621 and add dt clk devices" Commit e6046b5e69a0 ("MIPS: ralink: fix cpu clock of mt7621 and add dt clk devices") includes a file that doesn't exist, causing build failures... Revert it. References: https://lore.kernel.org/linux-mips/CAJsYDVJvviz8a2oVmb0XL3OB+=Eecu-3kC9T9vsmxpuC_BqDSA@mail.gmail.com/ Signed-off-by: Paul Burton --- arch/mips/include/asm/mach-ralink/mt7621.h | 20 ------ arch/mips/ralink/mt7621.c | 102 +++++++++-------------------- arch/mips/ralink/timer-gic.c | 4 +- 3 files changed, 33 insertions(+), 93 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h index b8a8834164c8..a672e06fa5fd 100644 --- a/arch/mips/include/asm/mach-ralink/mt7621.h +++ b/arch/mips/include/asm/mach-ralink/mt7621.h @@ -19,10 +19,6 @@ #define SYSC_REG_CHIP_REV 0x0c #define SYSC_REG_SYSTEM_CONFIG0 0x10 #define SYSC_REG_SYSTEM_CONFIG1 0x14 -#define SYSC_REG_CLKCFG0 0x2c -#define SYSC_REG_CUR_CLK_STS 0x44 - -#define MEMC_REG_CPU_PLL 0x648 #define CHIP_REV_PKG_MASK 0x1 #define CHIP_REV_PKG_SHIFT 16 @@ -30,22 +26,6 @@ #define CHIP_REV_VER_SHIFT 8 #define CHIP_REV_ECO_MASK 0xf -#define XTAL_MODE_SEL_MASK 0x7 -#define XTAL_MODE_SEL_SHIFT 6 - -#define CPU_CLK_SEL_MASK 0x3 -#define CPU_CLK_SEL_SHIFT 30 - -#define CUR_CPU_FDIV_MASK 0x1f -#define CUR_CPU_FDIV_SHIFT 8 -#define CUR_CPU_FFRAC_MASK 0x1f -#define CUR_CPU_FFRAC_SHIFT 0 - -#define CPU_PLL_PREDIV_MASK 0x3 -#define CPU_PLL_PREDIV_SHIFT 12 -#define CPU_PLL_FBDIV_MASK 0x7f -#define CPU_PLL_FBDIV_SHIFT 4 - #define MT7621_DRAM_BASE 0x0 #define MT7621_DDR2_SIZE_MIN 32 #define MT7621_DDR2_SIZE_MAX 256 diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index 6828eefb0a85..d2718de60b9b 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -9,22 +9,22 @@ #include #include -#include -#include -#include -#include #include #include #include #include #include -#include #include #include "common.h" +#define SYSC_REG_SYSCFG 0x10 +#define SYSC_REG_CPLL_CLKCFG0 0x2c +#define SYSC_REG_CUR_CLK_STS 0x44 +#define CPU_CLK_SEL (BIT(30) | BIT(31)) + #define MT7621_GPIO_MODE_UART1 1 #define MT7621_GPIO_MODE_I2C 2 #define MT7621_GPIO_MODE_UART3_MASK 0x3 @@ -110,89 +110,49 @@ static struct rt2880_pmx_group mt7621_pinmux_data[] = { { 0 } }; -static struct clk *clks[MT7621_CLK_MAX]; -static struct clk_onecell_data clk_data = { - .clks = clks, - .clk_num = ARRAY_SIZE(clks), -}; - phys_addr_t mips_cpc_default_phys_base(void) { panic("Cannot detect cpc address"); } -static struct clk *__init mt7621_add_sys_clkdev( - const char *id, unsigned long rate) -{ - struct clk *clk; - int err; - - clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate); - if (IS_ERR(clk)) - panic("failed to allocate %s clock structure", id); - - err = clk_register_clkdev(clk, id, NULL); - if (err) - panic("unable to register %s clock device", id); - - return clk; -} - void __init ralink_clk_init(void) { - const static u32 prediv_tbl[] = {0, 1, 2, 2}; - u32 syscfg, xtal_sel, clkcfg, clk_sel, curclk, ffiv, ffrac; - u32 pll, prediv, fbdiv; - u32 xtal_clk, cpu_clk, bus_clk; - - syscfg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0); - xtal_sel = (syscfg >> XTAL_MODE_SEL_SHIFT) & XTAL_MODE_SEL_MASK; + int cpu_fdiv = 0; + int cpu_ffrac = 0; + int fbdiv = 0; + u32 clk_sts, syscfg; + u8 clk_sel = 0, xtal_mode; + u32 cpu_clk; - clkcfg = rt_sysc_r32(SYSC_REG_CLKCFG0); - clk_sel = (clkcfg >> CPU_CLK_SEL_SHIFT) & CPU_CLK_SEL_MASK; - - curclk = rt_sysc_r32(SYSC_REG_CUR_CLK_STS); - ffiv = (curclk >> CUR_CPU_FDIV_SHIFT) & CUR_CPU_FDIV_MASK; - ffrac = (curclk >> CUR_CPU_FFRAC_SHIFT) & CUR_CPU_FFRAC_MASK; - - if (xtal_sel <= 2) - xtal_clk = 20 * 1000 * 1000; - else if (xtal_sel <= 5) - xtal_clk = 40 * 1000 * 1000; - else - xtal_clk = 25 * 1000 * 1000; + if ((rt_sysc_r32(SYSC_REG_CPLL_CLKCFG0) & CPU_CLK_SEL) != 0) + clk_sel = 1; switch (clk_sel) { case 0: - cpu_clk = 500 * 1000 * 1000; + clk_sts = rt_sysc_r32(SYSC_REG_CUR_CLK_STS); + cpu_fdiv = ((clk_sts >> 8) & 0x1F); + cpu_ffrac = (clk_sts & 0x1F); + cpu_clk = (500 * cpu_ffrac / cpu_fdiv) * 1000 * 1000; break; + case 1: - pll = rt_memc_r32(MEMC_REG_CPU_PLL); - fbdiv = (pll >> CPU_PLL_FBDIV_SHIFT) & CPU_PLL_FBDIV_MASK; - prediv = (pll >> CPU_PLL_PREDIV_SHIFT) & CPU_PLL_PREDIV_MASK; - cpu_clk = ((fbdiv + 1) * xtal_clk) >> prediv_tbl[prediv]; + fbdiv = ((rt_sysc_r32(0x648) >> 4) & 0x7F) + 1; + syscfg = rt_sysc_r32(SYSC_REG_SYSCFG); + xtal_mode = (syscfg >> 6) & 0x7; + if (xtal_mode >= 6) { + /* 25Mhz Xtal */ + cpu_clk = 25 * fbdiv * 1000 * 1000; + } else if (xtal_mode >= 3) { + /* 40Mhz Xtal */ + cpu_clk = 40 * fbdiv * 1000 * 1000; + } else { + /* 20Mhz Xtal */ + cpu_clk = 20 * fbdiv * 1000 * 1000; + } break; - default: - cpu_clk = xtal_clk; } - - cpu_clk = cpu_clk / ffiv * ffrac; - bus_clk = cpu_clk / 4; - - clks[MT7621_CLK_CPU] = mt7621_add_sys_clkdev("cpu", cpu_clk); - clks[MT7621_CLK_BUS] = mt7621_add_sys_clkdev("bus", bus_clk); - - pr_info("CPU Clock: %dMHz\n", cpu_clk / 1000000); - mips_hpt_frequency = cpu_clk / 2; } -static void __init mt7621_clocks_init_dt(struct device_node *np) -{ - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); -} - -CLK_OF_DECLARE(mt7621_clk, "mediatek,mt7621-pll", mt7621_clocks_init_dt); - void __init ralink_of_remap(void) { rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc"); diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c index 4fed842ae8eb..b5f07d21fcf2 100644 --- a/arch/mips/ralink/timer-gic.c +++ b/arch/mips/ralink/timer-gic.c @@ -11,14 +11,14 @@ #include #include -#include +#include #include "common.h" void __init plat_time_init(void) { ralink_of_remap(); - ralink_clk_init(); + of_clk_init(NULL); timer_probe(); } -- cgit v1.2.3 From 1e0221374e30a765a02e00f01f68869eac57c8d3 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Tue, 23 Apr 2019 13:55:19 -0700 Subject: mips: vdso: drop unnecessary cc-ldoption Towards the goal of removing cc-ldoption, it seems that --hash-style= was added to binutils 2.17.50.0.2 in 2006. The minimal required version of binutils for the kernel according to Documentation/process/changes.rst is 2.20. --build-id was added in 2.18 according to binutils-gdb/ld/NEWS. Link: https://gcc.gnu.org/ml/gcc/2007-01/msg01141.html Cc: clang-built-linux@googlegroups.com Suggested-by: Masahiro Yamada Signed-off-by: Nick Desaulniers Signed-off-by: Paul Burton Cc: ralf@linux-mips.org Cc: jhogan@kernel.org Cc: Matt Redfearn Cc: Joel Stanley Cc: Hassan Naveed Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/vdso/Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 0ede4deb8181..7221df24cb23 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -46,9 +46,7 @@ endif VDSO_LDFLAGS := \ -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \ $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \ - -nostdlib -shared \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ - $(call cc-ldoption, -Wl$(comma)--build-id) + -nostdlib -shared -Wl,--hash-style=sysv -Wl,--build-id GCOV_PROFILE := n UBSAN_SANITIZE := n -- cgit v1.2.3 From a703db3d5b4b43cb7e46b65a661471015cbcba57 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 24 Apr 2019 01:47:37 +0300 Subject: mips: Make sure kernel .bss exists in boot mem pool Current MIPS platform code makes sure the kernel text, data and init sections are added to the boot memory map pool right after the arch-specific memory setup method has been executed. But for some reason the MIPS platform code skipped the kernel .bss section, which definitely should be in the boot mem pool as well in any case. Lets fix this just be adding the space between __bss_start and __bss_stop. Reviewed-by: Matt Redfearn Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8d1dc6c71173..0ee033c44116 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -809,6 +809,9 @@ static void __init arch_mem_init(char **cmdline_p) arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, BOOT_MEM_INIT_RAM); + arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT, + PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT, + BOOT_MEM_RAM); pr_info("Determined physical RAM map:\n"); print_memory_map(); -- cgit v1.2.3 From 6ea3ba6fac31380af86339c37be095a672dd01bb Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 24 Apr 2019 01:47:38 +0300 Subject: mips: Discard rudiments from bootmem_init There is a pointless code left in the bootmem_init() method since the bootmem allocator removal. First part resides the PFN ranges calculation loop. The conditional expressions and continue operator are useless there, since nothing is done after them. Second part is in RAM ranges installation loop. We can simplify the conditions cascade a bit without much of the logic redefinition, so to reduce the code length. In particular the end boundary value can be verified after the possible reduction to be below max_low_pfn. Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Matt Redfearn Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 0ee033c44116..53d93a727d1a 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -394,10 +394,7 @@ static void __init bootmem_init(void) min_low_pfn = ~0UL; max_low_pfn = 0; - /* - * Find the highest page frame number we have available - * and the lowest used RAM address - */ + /* Find the highest and lowest page frame numbers we have available. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -427,13 +424,6 @@ static void __init bootmem_init(void) max_low_pfn = end; if (start < min_low_pfn) min_low_pfn = start; - if (end <= reserved_end) - continue; -#ifdef CONFIG_BLK_DEV_INITRD - /* Skip zones before initrd and initrd itself */ - if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) - continue; -#endif } if (min_low_pfn >= max_low_pfn) @@ -474,6 +464,7 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } + /* Install all valid RAM ranges to the memblock memory region */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -481,21 +472,15 @@ static void __init bootmem_init(void) end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); - if (start <= min_low_pfn) + if (start < min_low_pfn) start = min_low_pfn; - if (start >= end) - continue; - #ifndef CONFIG_HIGHMEM + /* Ignore highmem regions if highmem is unsupported */ if (end > max_low_pfn) end = max_low_pfn; - - /* - * ... finally, is the area going away? - */ +#endif if (end <= start) continue; -#endif memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); } -- cgit v1.2.3 From cf0c4876684d287b6fe72699598517ca9b827216 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 24 Apr 2019 01:47:39 +0300 Subject: mips: Combine memblock init and memory reservation loops Before bootmem was completely removed from the kernel, the last loop in the bootmem_init() had been used to reserve the correspondingly marked regions, initialize sparsemem sections and to free the low memory pages, which then would be used for early memory allocations. After the bootmem removing patchset had been merged the loop was left to do the first two things only. But it didn't do them quite well. First of all it leaves the BOOT_MEM_INIT_RAM memory types unreserved, which is definitely bug (although it isn't noticeable due to being used by the kernel region only, which is fully marked as reserved). Secondly the reservation is supposed to be done for any memory including the high one. (I couldn't figure out why the highmem was ignored in the first place, since platforms and dts' may declare any memory region for reservation) Thirdly the reserved_end variable had been used here to not accidentally free memory occupied by kernel. Since we already reserved the corresponding region higher in this method there is no need in using the variable here anymore. Fourthly the sparsemem should be aware of all the memory types in the system including the ROM_DATA even if it is going to be reserved for the whole system uptime. Finally after all these notes are fixed the loop of memory reservation can be freely merged into the memory installation loop as it's done in this patch. Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Matt Redfearn Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 48 +++++++----------------------------------------- 1 file changed, 7 insertions(+), 41 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 53d93a727d1a..185e0e42e009 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -483,55 +483,21 @@ static void __init bootmem_init(void) continue; memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); - } - - /* - * Register fully available low RAM pages with the bootmem allocator. - */ - for (i = 0; i < boot_mem_map.nr_map; i++) { - unsigned long start, end, size; - - start = PFN_UP(boot_mem_map.map[i].addr); - end = PFN_DOWN(boot_mem_map.map[i].addr - + boot_mem_map.map[i].size); - /* - * Reserve usable memory. - */ + /* Reserve any memory except the ordinary RAM ranges. */ switch (boot_mem_map.map[i].type) { case BOOT_MEM_RAM: break; - case BOOT_MEM_INIT_RAM: - memory_present(0, start, end); - continue; - default: - /* Not usable memory */ - if (start > min_low_pfn && end < max_low_pfn) - memblock_reserve(boot_mem_map.map[i].addr, - boot_mem_map.map[i].size); - - continue; + default: /* Reserve the rest of the memory types at boot time */ + memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start)); + break; } /* - * We are rounding up the start address of usable memory - * and at the end of the usable range downwards. - */ - if (start >= max_low_pfn) - continue; - if (start < reserved_end) - start = reserved_end; - if (end > max_low_pfn) - end = max_low_pfn; - - /* - * ... finally, is the area going away? + * In any case the added to the memblock memory regions + * (highmem/lowmem, available/reserved, etc) are considered + * as present, so inform sparsemem about them. */ - if (end <= start) - continue; - size = end - start; - - /* Register lowmem ranges */ memory_present(0, start, end); } -- cgit v1.2.3 From f995adb0ac5bcfa0d396ba7706aab420c7a6fec2 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 30 Apr 2019 22:53:30 +0000 Subject: MIPS: Use memblock_phys_alloc() for exception vector MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocate the exception vector using memblock_phys_alloc() which gives us a physical address, rather than the previous convoluted setup which obtained a virtual address using memblock_alloc(), converted it to a physical address & then back to a virtual address. Signed-off-by: Paul Burton Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Serge Semin Tested-by: Serge Semin Cc: linux-mips@vger.kernel.org --- arch/mips/kernel/traps.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 98ca55d62201..00f44b16385e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2292,9 +2292,8 @@ void __init trap_init(void) unsigned long size = 0x200 + VECTORSPACING*64; phys_addr_t ebase_pa; - ebase = (unsigned long) - memblock_alloc(size, 1 << fls(size)); - if (!ebase) + ebase_pa = memblock_phys_alloc(size, 1 << fls(size)); + if (!ebase_pa) panic("%s: Failed to allocate %lu bytes align=0x%x\n", __func__, size, 1 << fls(size)); @@ -2309,9 +2308,10 @@ void __init trap_init(void) * EVA is special though as it allows segments to be rearranged * and to become uncached during cache error handling. */ - ebase_pa = __pa(ebase); if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) ebase = CKSEG0ADDR(ebase_pa); + else + ebase = (unsigned long)phys_to_virt(ebase_pa); } else { ebase = CAC_BASE; -- cgit v1.2.3 From 172dcd935c34b022729f45a7bbaae5cc05231533 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 30 Apr 2019 22:53:30 +0000 Subject: MIPS: Always allocate exception vector for MIPSr2+ Currently we allocate the exception vector on systems which use a vectored interrupt mode, but otherwise attempt to reuse whatever exception vector the bootloader uses. This can be problematic for a number of reasons: 1) The memory isn't properly marked reserved in the memblock allocator. We've relied on the fact that EBase is generally in the memory below the kernel image which we don't free, but this is about to change. 2) Recent versions of U-Boot place their exception vector high in kseg0, in memory which isn't protected by being lower than the kernel anyway & can end up being clobbered. 3) We are unnecessarily reliant upon there being memory at the address EBase points to upon entry to the kernel. This is often the case, but if the bootloader doesn't configure EBase & leaves it with its default value then we rely upon there being memory at physical address 0 for no good reason. Improve this situation by allocating the exception vector in all cases when running on MIPSr2 or higher, and reserving the memory for MIPSr1 or lower. This ensures we don't clobber the exception vector in any configuration, and for MIPSr2 & higher removes the need for memory at physical address 0. Signed-off-by: Paul Burton Reviewed-by: Serge Semin Tested-by: Serge Semin Cc: linux-mips@vger.kernel.org --- arch/mips/kernel/traps.c | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 00f44b16385e..9b565ed51662 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2284,18 +2284,27 @@ void __init trap_init(void) extern char except_vec3_generic; extern char except_vec4; extern char except_vec3_r4000; - unsigned long i; + unsigned long i, vec_size; + phys_addr_t ebase_pa; check_wait(); - if (cpu_has_veic || cpu_has_vint) { - unsigned long size = 0x200 + VECTORSPACING*64; - phys_addr_t ebase_pa; + if (!cpu_has_mips_r2_r6) { + ebase = CAC_BASE; + ebase_pa = virt_to_phys((void *)ebase); + vec_size = 0x400; + + memblock_reserve(ebase_pa, vec_size); + } else { + if (cpu_has_veic || cpu_has_vint) + vec_size = 0x200 + VECTORSPACING*64; + else + vec_size = PAGE_SIZE; - ebase_pa = memblock_phys_alloc(size, 1 << fls(size)); + ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size)); if (!ebase_pa) panic("%s: Failed to allocate %lu bytes align=0x%x\n", - __func__, size, 1 << fls(size)); + __func__, vec_size, 1 << fls(vec_size)); /* * Try to ensure ebase resides in KSeg0 if possible. @@ -2312,20 +2321,6 @@ void __init trap_init(void) ebase = CKSEG0ADDR(ebase_pa); else ebase = (unsigned long)phys_to_virt(ebase_pa); - } else { - ebase = CAC_BASE; - - if (cpu_has_mips_r2_r6) { - if (cpu_has_ebase_wg) { -#ifdef CONFIG_64BIT - ebase = (read_c0_ebase_64() & ~0xfff); -#else - ebase = (read_c0_ebase() & ~0xfff); -#endif - } else { - ebase += (read_c0_ebase() & 0x3ffff000); - } - } } if (cpu_has_mmips) { -- cgit v1.2.3 From 783454e2bc7ce491b5cd50154433cde993bfd849 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 30 Apr 2019 22:53:31 +0000 Subject: MIPS: Sync icache for whole exception vector MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than performing cache flushing for a fixed 0x400 bytes, use the actual size of the vector in order to ensure we cover all emitted code on systems that make use of vectored interrupts. Signed-off-by: Paul Burton Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Serge Semin Tested-by: Serge Semin Cc: linux-mips@vger.kernel.org --- arch/mips/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 9b565ed51662..2775190adbe7 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2454,7 +2454,7 @@ void __init trap_init(void) else set_handler(0x080, &except_vec3_generic, 0x80); - local_flush_icache_range(ebase, ebase + 0x400); + local_flush_icache_range(ebase, ebase + vec_size); sort_extable(__start___dbe_table, __stop___dbe_table); -- cgit v1.2.3 From de56d4c1da3e68f0ca468a55f6677bef3cee6e10 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 30 Apr 2019 22:53:31 +0000 Subject: MIPS: Remove duplicate EBase configuration Clean up our configuration of the EBase register by making configure_exception_vector() write to it unconditionally on systems implementing MIPSr2 or higher, and removing the duplicate code in per_cpu_trap_init(). The latter would have duplicated work on systems with vectored interrupts, and didn't set BEV for safety like the configure_exception_vector() version of the code does. Signed-off-by: Paul Burton Reviewed-by: Serge Semin Tested-by: Serge Semin Cc: linux-mips@vger.kernel.org --- arch/mips/kernel/traps.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 2775190adbe7..c52766a5b85f 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2151,7 +2151,7 @@ static void configure_hwrena(void) static void configure_exception_vector(void) { - if (cpu_has_veic || cpu_has_vint) { + if (cpu_has_mips_r2_r6) { unsigned long sr = set_c0_status(ST0_BEV); /* If available, use WG to set top bits of EBASE */ if (cpu_has_ebase_wg) { @@ -2163,6 +2163,8 @@ static void configure_exception_vector(void) } write_c0_ebase(ebase); write_c0_status(sr); + } + if (cpu_has_veic || cpu_has_vint) { /* Setting vector spacing enables EI/VI mode */ change_c0_intctl(0x3e0, VECTORSPACING); } @@ -2193,22 +2195,6 @@ void per_cpu_trap_init(bool is_boot_cpu) * o read IntCtl.IPFDC to determine the fast debug channel interrupt */ if (cpu_has_mips_r2_r6) { - /* - * We shouldn't trust a secondary core has a sane EBASE register - * so use the one calculated by the boot CPU. - */ - if (!is_boot_cpu) { - /* If available, use WG to set top bits of EBASE */ - if (cpu_has_ebase_wg) { -#ifdef CONFIG_64BIT - write_c0_ebase_64(ebase | MIPS_EBASE_WG); -#else - write_c0_ebase(ebase | MIPS_EBASE_WG); -#endif - } - write_c0_ebase(ebase); - } - cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; -- cgit v1.2.3 From b93ddc4f9156205eb3c8df6ad35a37be3fa4e31e Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 24 Apr 2019 01:47:40 +0300 Subject: mips: Reserve memory for the kernel image resources The reserved_end variable had been used by the bootmem_init() code to find a lowest limit of memory available for memmap blob. The original code just tried to find a free memory space higher than kernel was placed. This limitation seems justified for the memmap ragion search process, but I can't see any obvious reason to reserve the unused space below kernel seeing some platforms place it much higher than standard 1MB. Moreover the RELOCATION config enables it to be loaded at any memory address. So lets reserve the memory occupied by the kernel only, leaving the region below being free for allocations. After doing this we can now discard the code freeing a space between kernel _text and VMLINUX_LOAD_ADDRESS symbols since it's going to be free anyway (unless marked as reserved by platforms). Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Matt Redfearn Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 30 +++--------------------------- 1 file changed, 3 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 185e0e42e009..f71a7d32a687 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -371,7 +371,6 @@ static void __init bootmem_init(void) static void __init bootmem_init(void) { - unsigned long reserved_end; phys_addr_t ramstart = PHYS_ADDR_MAX; int i; @@ -382,10 +381,10 @@ static void __init bootmem_init(void) * will reserve the area used for the initrd. */ init_initrd(); - reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); - memblock_reserve(PHYS_OFFSET, - (reserved_end << PAGE_SHIFT) - PHYS_OFFSET); + /* Reserve memory occupied by kernel. */ + memblock_reserve(__pa_symbol(&_text), + __pa_symbol(&_end) - __pa_symbol(&_text)); /* * max_low_pfn is not a number of pages. The number of pages @@ -501,29 +500,6 @@ static void __init bootmem_init(void) memory_present(0, start, end); } -#ifdef CONFIG_RELOCATABLE - /* - * The kernel reserves all memory below its _end symbol as bootmem, - * but the kernel may now be at a much higher address. The memory - * between the original and new locations may be returned to the system. - */ - if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) { - unsigned long offset; - extern void show_kernel_relocation(const char *level); - - offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS); - memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset); - -#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO) - /* - * This information is necessary when debugging the kernel - * But is a security vulnerability otherwise! - */ - show_kernel_relocation(KERN_INFO); -#endif - } -#endif - /* * Reserve initrd memory if needed. */ -- cgit v1.2.3 From eadb6925efeb0c254d17e1da9bb730d2add5613d Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 24 Apr 2019 01:47:41 +0300 Subject: mips: Discard post-CMA-init foreach loop Really the loop is pointless, since it walks over memblock-reserved memory regions and mark them as reserved in memblock. Before bootmem was removed from the kernel, this loop had been used to map the memory reserved by CMA into the legacy bootmem allocator. But now the early memory allocator is memblock, which is used by CMA for reservation, so we don't need any mapping anymore. Reviewed-by: Matt Redfearn Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index f71a7d32a687..2ae6b02b948f 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -708,7 +708,6 @@ static void __init request_crashkernel(struct resource *res) */ static void __init arch_mem_init(char **cmdline_p) { - struct memblock_region *reg; extern void plat_mem_setup(void); /* @@ -814,10 +813,6 @@ static void __init arch_mem_init(char **cmdline_p) plat_swiotlb_setup(); dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); - /* Tell bootmem about cma reserved memblock section */ - for_each_memblock(reserved, reg) - if (reg->size != 0) - memblock_reserve(reg->base, reg->size); reserve_bootmem_region(__pa_symbol(&__nosave_begin), __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ -- cgit v1.2.3 From 4e50a35de4ccc834dbc32c664fb068f4c24cfebf Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 24 Apr 2019 01:47:42 +0300 Subject: mips: Use memblock to reserve the __nosave memory range Originally before legacy bootmem was removed, the memory for the range was correctly reserved by reserve_bootmem_region(). But since memblock has been selected for early memory allocation the function can be utilized only after paging is fully initialized (as it is done by memblock_free_all() function). So calling it from arch_mem_init() method is prone to errors, and at this stage we need to reserve the memory in the memblock allocator. Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Matt Redfearn Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 2ae6b02b948f..3a5140943f54 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -814,8 +814,9 @@ static void __init arch_mem_init(char **cmdline_p) dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); - reserve_bootmem_region(__pa_symbol(&__nosave_begin), - __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ + /* Reserve for hibernation. */ + memblock_reserve(__pa_symbol(&__nosave_begin), + __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); } static void __init resource_init(void) -- cgit v1.2.3 From 9b9a59db84812d326af41a3802c63f1f95d81016 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 24 Apr 2019 01:47:43 +0300 Subject: mips: Add reserve-nomap memory type support It might be necessary to prevent the virtual mapping creation for a requested memory region. For instance there is a "no-map" property indicating exactly this feature. In this case we need to not only reserve the specified region by pretending it doesn't exist in the memory space, but completely remove the range from system just by removing it from memblock. The same way it's done in default early_init_dt_reserve_memory_arch() method. Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Matt Redfearn Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/include/asm/bootinfo.h | 1 + arch/mips/kernel/prom.c | 4 +++- arch/mips/kernel/setup.c | 8 ++++++++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index a301a8f4bc66..235bc2f52113 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -92,6 +92,7 @@ extern unsigned long mips_machtype; #define BOOT_MEM_ROM_DATA 2 #define BOOT_MEM_RESERVED 3 #define BOOT_MEM_INIT_RAM 4 +#define BOOT_MEM_NOMAP 5 /* * A memory map that's built upon what was determined diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 93b8e0b4332f..437a174e3ef9 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -47,7 +47,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool nomap) { - add_memory_region(base, size, BOOT_MEM_RESERVED); + add_memory_region(base, size, + nomap ? BOOT_MEM_NOMAP : BOOT_MEM_RESERVED); + return 0; } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 3a5140943f54..2a1b2e7a1bc9 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -178,6 +178,7 @@ static bool __init __maybe_unused memory_region_available(phys_addr_t start, in_ram = true; break; case BOOT_MEM_RESERVED: + case BOOT_MEM_NOMAP: if ((start >= start_ && start < end_) || (start < start_ && start + size >= start_)) free = false; @@ -213,6 +214,9 @@ static void __init print_memory_map(void) case BOOT_MEM_RESERVED: printk(KERN_CONT "(reserved)\n"); break; + case BOOT_MEM_NOMAP: + printk(KERN_CONT "(nomap)\n"); + break; default: printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type); break; @@ -487,6 +491,9 @@ static void __init bootmem_init(void) switch (boot_mem_map.map[i].type) { case BOOT_MEM_RAM: break; + case BOOT_MEM_NOMAP: /* Discard the range from the system. */ + memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start)); + continue; default: /* Reserve the rest of the memory types at boot time */ memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start)); break; @@ -861,6 +868,7 @@ static void __init resource_init(void) res->flags |= IORESOURCE_SYSRAM; break; case BOOT_MEM_RESERVED: + case BOOT_MEM_NOMAP: default: res->name = "reserved"; } -- cgit v1.2.3 From 30c8f4e411fb76f752a193bd945d4b9ef06c2d87 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Fri, 3 May 2019 20:50:37 +0300 Subject: mips: Dump memblock regions for debugging It is useful to have the whole memblock memory space printed to console when basic memlock initializations are done. It can be performed by ready-to-use method memblock_dump_all(), which prints the available and reserved memory spaces if memblock=debug kernel parameter is specified. Lets call it at the very end of arch_mem_init() function, when all memblock memory and reserved regions are defined, but before any serious allocation is performed. Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: Serge Semin Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 2a1b2e7a1bc9..ca493fdf69b0 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -824,6 +824,8 @@ static void __init arch_mem_init(char **cmdline_p) /* Reserve for hibernation. */ memblock_reserve(__pa_symbol(&__nosave_begin), __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); + + memblock_dump_all(); } static void __init resource_init(void) -- cgit v1.2.3 From 2f5bd0367e7a9e5f5a150500e016a9cb7042803b Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Fri, 3 May 2019 20:50:38 +0300 Subject: mips: Perform early low memory test memblock subsystem provides a method to optionally test the passed memory region in case if it was requested via special kernel boot argument. Lets add the function at the bottom of the arch_mem_init() method. Testing at this point in the boot sequence should be safe since all critical areas are now reserved and a minimum of allocations have been done. Reviewed-by: Matt Redfearn Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: Serge Semin Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index ca493fdf69b0..fbd216b4e929 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -826,6 +826,8 @@ static void __init arch_mem_init(char **cmdline_p) __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); memblock_dump_all(); + + early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn)); } static void __init resource_init(void) -- cgit v1.2.3 From 93fa5b280761a4dbb14c5330f260380385ab2b49 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Fri, 3 May 2019 20:50:40 +0300 Subject: mips: Make sure dt memory regions are valid There are situations when memory regions coming from dts may be too big for the platform physical address space. This especially concerns XPA-capable systems. Bootloader may determine more than 4GB memory available and pass it to the kernel over dts memory node, while kernel is built without XPA/64BIT support. In this case the region may either simply be truncated by add_memory_region() method or by u64->phys_addr_t type casting. But in worst case the method can even drop the memory region if it exceeds PHYS_ADDR_MAX size. So lets make sure the retrieved from dts memory regions are valid, and if some of them aren't, just manually truncate them with a warning printed out. Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: Serge Semin Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/prom.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 437a174e3ef9..28bf01961bb2 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -41,7 +41,19 @@ char *mips_get_machine_name(void) #ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { - return add_memory_region(base, size, BOOT_MEM_RAM); + if (base >= PHYS_ADDR_MAX) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return; + } + + /* Truncate the passed memory region instead of type casting */ + if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) { + pr_warn("Truncate memory region %llx @ %llx to size %llx\n", + size, base, PHYS_ADDR_MAX - base); + size = PHYS_ADDR_MAX - base; + } + + add_memory_region(base, size, BOOT_MEM_RAM); } int __init early_init_dt_reserve_memory_arch(phys_addr_t base, -- cgit v1.2.3 From 3751cbda8f223549d7ea28803cbec8ac87e43ed2 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Fri, 3 May 2019 20:50:41 +0300 Subject: mips: Manually call fdt_init_reserved_mem() method Since memblock-patchset was introduced the reserved-memory nodes are supported being declared in dt-files. So these nodes are actually parsed during the arch setup procedure when the early_init_fdt_scan_reserved_mem() method is called. But due to the arch-specific boot mem_map container utilization we need to manually call the fdt_init_reserved_mem() method after all the available and reserved memory has been moved to memblock. The first function call performed before bootmem_init() by the early_init_fdt_scan_reserved_mem() routine fails due to the lack of any memblock memory regions to allocate from at that stage. Signed-off-by: Serge Semin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Mike Rapoport Cc: Andrew Morton Cc: Michal Hocko Cc: Greg Kroah-Hartman Cc: Thomas Bogendoerfer Cc: Huacai Chen Cc: Stefan Agner Cc: Stephen Rothwell Cc: Alexandre Belloni Cc: Juergen Gross Cc: Serge Semin Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/setup.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index fbd216b4e929..ab349d2381c3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -825,6 +826,8 @@ static void __init arch_mem_init(char **cmdline_p) memblock_reserve(__pa_symbol(&__nosave_begin), __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); + fdt_init_reserved_mem(); + memblock_dump_all(); early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn)); -- cgit v1.2.3