summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/arm/net/bpf_jit_32.c4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi14
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5.dtsi5
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi8
-rw-r--r--arch/arm64/net/bpf_jit.h39
-rw-r--r--arch/arm64/net/bpf_jit_comp.c246
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/include/asm/checksum.h7
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/text-patching.h1
-rw-r--r--arch/x86/kernel/alternative.c34
-rw-r--r--arch/x86/net/bpf_jit_comp.c82
-rw-r--r--arch/xtensa/platforms/iss/network.c2
21 files changed, 375 insertions, 91 deletions
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 284d28755b8d..7d81535893af 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -133,6 +133,8 @@
#define SO_RESERVE_MEM 73
+#define SO_TXREHASH 74
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 10ceebb7530b..9e457156ad4d 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1864,7 +1864,7 @@ static int build_body(struct jit_ctx *ctx)
if (ctx->target == NULL)
ctx->offsets[i] = ctx->idx;
- /* If unsuccesfull, return with error code */
+ /* If unsuccesful, return with error code */
if (ret)
return ret;
}
@@ -1973,7 +1973,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* for jit, although it can decrease the size of the image.
*
* As each arm instruction is of length 32bit, we are translating
- * number of JITed intructions into the size required to store these
+ * number of JITed instructions into the size required to store these
* JITed code.
*/
image_size = sizeof(u32) * ctx.idx;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
index 17f8e733972a..41702e7386e3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
@@ -63,21 +63,25 @@
&dpmac7 {
sfp = <&sfp0>;
managed = "in-band-status";
+ phys = <&serdes_1 3>;
};
&dpmac8 {
sfp = <&sfp1>;
managed = "in-band-status";
+ phys = <&serdes_1 2>;
};
&dpmac9 {
sfp = <&sfp2>;
managed = "in-band-status";
+ phys = <&serdes_1 1>;
};
&dpmac10 {
sfp = <&sfp3>;
managed = "in-band-status";
+ phys = <&serdes_1 0>;
};
&emdio2 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index 3c611cb4f5fe..c5daa15b020d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -612,6 +612,12 @@
ranges;
dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
+ serdes_1: phy@1ea0000 {
+ compatible = "fsl,lynx-28g";
+ reg = <0x0 0x1ea0000 0x0 0x1e30>;
+ #phy-cells = <1>;
+ };
+
crypto: crypto@8000000 {
compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
fsl,sec-era = <10>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 7d369fdd3117..11aa135aa0f3 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -110,6 +110,7 @@
phy-handle = <&ethernet_phy0>;
mediatek,tx-delay-ps = <1530>;
snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
+ snps,reset-delays-us = <0 10000 10000>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth_default>;
pinctrl-1 = <&eth_sleep>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index de16c0d80c30..a27b7628c5f7 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -726,7 +726,7 @@
};
eth: ethernet@1101c000 {
- compatible = "mediatek,mt2712-gmac";
+ compatible = "mediatek,mt2712-gmac", "snps,dwmac-4.20a";
reg = <0 0x1101c000 0 0x1300>;
interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "macirq";
@@ -734,15 +734,19 @@
clock-names = "axi",
"apb",
"mac_main",
- "ptp_ref";
+ "ptp_ref",
+ "rmii_internal";
clocks = <&pericfg CLK_PERI_GMAC>,
<&pericfg CLK_PERI_GMAC_PCLK>,
<&topckgen CLK_TOP_ETHER_125M_SEL>,
- <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ <&topckgen CLK_TOP_ETHER_50M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
- <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ <&topckgen CLK_TOP_ETHER_50M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
- <&topckgen CLK_TOP_APLL1_D3>;
+ <&topckgen CLK_TOP_APLL1_D3>,
+ <&topckgen CLK_TOP_ETHERPLL_50M>;
power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
mediatek,pericfg = <&pericfg>;
snps,axi-config = <&stmmac_axi_setup>;
diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
index 787ebcec121d..2dd5e38820b1 100644
--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
@@ -471,9 +471,10 @@
<0x6 0x10004000 0x7fc000>,
<0x6 0x11010000 0xaf0000>;
reg-names = "cpu", "dev", "gcb";
- interrupt-names = "xtr", "fdma";
+ interrupt-names = "xtr", "fdma", "ptp";
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
resets = <&reset 0>;
reset-names = "switch";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 3e15391e5b37..056761c974fd 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -510,6 +510,8 @@
#size-cells = <0>;
iommus = <&smmu 0x874>;
power-domains = <&zynqmp_firmware PD_ETH_0>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM0>;
+ reset-names = "gem0_rst";
};
gem1: ethernet@ff0c0000 {
@@ -523,6 +525,8 @@
#size-cells = <0>;
iommus = <&smmu 0x875>;
power-domains = <&zynqmp_firmware PD_ETH_1>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM1>;
+ reset-names = "gem1_rst";
};
gem2: ethernet@ff0d0000 {
@@ -536,6 +540,8 @@
#size-cells = <0>;
iommus = <&smmu 0x876>;
power-domains = <&zynqmp_firmware PD_ETH_2>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM2>;
+ reset-names = "gem2_rst";
};
gem3: ethernet@ff0e0000 {
@@ -549,6 +555,8 @@
#size-cells = <0>;
iommus = <&smmu 0x877>;
power-domains = <&zynqmp_firmware PD_ETH_3>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM3>;
+ reset-names = "gem3_rst";
};
gpio: gpio@ff0a0000 {
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 9d9250c7cc72..dd59b5ad8fe4 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -88,17 +88,42 @@
/* [Rn] = Rt; (atomic) Rs = [state] */
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
+/* [Rn] = Rt (store release); (atomic) Rs = [state] */
+#define A64_STLXR(sf, Rt, Rn, Rs) \
+ aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
+ AARCH64_INSN_LDST_STORE_REL_EX)
/*
* LSE atomics
*
- * STADD is simply encoded as an alias for LDADD with XZR as
- * the destination register.
+ * ST{ADD,CLR,SET,EOR} is simply encoded as an alias for
+ * LDD{ADD,CLR,SET,EOR} with XZR as the destination register.
*/
-#define A64_STADD(sf, Rn, Rs) \
+#define A64_ST_OP(sf, Rn, Rs, op) \
aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
- A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_ADD, \
+ A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
AARCH64_INSN_MEM_ORDER_NONE)
+/* [Rn] <op>= Rs */
+#define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
+#define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
+#define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
+#define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
+
+#define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
+ aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
+ A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
+ AARCH64_INSN_MEM_ORDER_ACQREL)
+/* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
+#define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
+#define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
+#define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
+#define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
+/* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
+#define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
+/* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
+#define A64_CASAL(sf, Rt, Rn, Rs) \
+ aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
+ AARCH64_INSN_MEM_ORDER_ACQREL)
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
@@ -203,6 +228,9 @@
#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
/* Rn & Rm; set condition flags */
#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
+/* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
+#define A64_MVN(sf, Rd, Rm) \
+ A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
/* Logical (immediate) */
#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
@@ -226,4 +254,7 @@
#define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ)
#define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
+/* DMB */
+#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
+
#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index e96d4d87291f..e850c69e128c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -27,6 +27,17 @@
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
+#define check_imm(bits, imm) do { \
+ if ((((imm) > 0) && ((imm) >> (bits))) || \
+ (((imm) < 0) && (~(imm) >> (bits)))) { \
+ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
+ i, imm, imm); \
+ return -EINVAL; \
+ } \
+} while (0)
+#define check_imm19(imm) check_imm(19, imm)
+#define check_imm26(imm) check_imm(26, imm)
+
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
/* return value from in-kernel function, and exit value from eBPF */
@@ -329,6 +340,170 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
#undef jmp_offset
}
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 code = insn->code;
+ const u8 dst = bpf2a64[insn->dst_reg];
+ const u8 src = bpf2a64[insn->src_reg];
+ const u8 tmp = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const bool isdw = BPF_SIZE(code) == BPF_DW;
+ const s16 off = insn->off;
+ u8 reg;
+
+ if (!off) {
+ reg = dst;
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+
+ switch (insn->imm) {
+ /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
+ case BPF_ADD:
+ emit(A64_STADD(isdw, reg, src), ctx);
+ break;
+ case BPF_AND:
+ emit(A64_MVN(isdw, tmp2, src), ctx);
+ emit(A64_STCLR(isdw, reg, tmp2), ctx);
+ break;
+ case BPF_OR:
+ emit(A64_STSET(isdw, reg, src), ctx);
+ break;
+ case BPF_XOR:
+ emit(A64_STEOR(isdw, reg, src), ctx);
+ break;
+ /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
+ case BPF_ADD | BPF_FETCH:
+ emit(A64_LDADDAL(isdw, src, reg, src), ctx);
+ break;
+ case BPF_AND | BPF_FETCH:
+ emit(A64_MVN(isdw, tmp2, src), ctx);
+ emit(A64_LDCLRAL(isdw, src, reg, tmp2), ctx);
+ break;
+ case BPF_OR | BPF_FETCH:
+ emit(A64_LDSETAL(isdw, src, reg, src), ctx);
+ break;
+ case BPF_XOR | BPF_FETCH:
+ emit(A64_LDEORAL(isdw, src, reg, src), ctx);
+ break;
+ /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
+ case BPF_XCHG:
+ emit(A64_SWPAL(isdw, src, reg, src), ctx);
+ break;
+ /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
+ case BPF_CMPXCHG:
+ emit(A64_CASAL(isdw, src, reg, bpf2a64[BPF_REG_0]), ctx);
+ break;
+ default:
+ pr_err_once("unknown atomic op code %02x\n", insn->imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static inline int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ return -EINVAL;
+}
+#endif
+
+static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 code = insn->code;
+ const u8 dst = bpf2a64[insn->dst_reg];
+ const u8 src = bpf2a64[insn->src_reg];
+ const u8 tmp = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const u8 tmp3 = bpf2a64[TMP_REG_3];
+ const int i = insn - ctx->prog->insnsi;
+ const s32 imm = insn->imm;
+ const s16 off = insn->off;
+ const bool isdw = BPF_SIZE(code) == BPF_DW;
+ u8 reg;
+ s32 jmp_offset;
+
+ if (!off) {
+ reg = dst;
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+
+ if (imm == BPF_ADD || imm == BPF_AND ||
+ imm == BPF_OR || imm == BPF_XOR) {
+ /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
+ emit(A64_LDXR(isdw, tmp2, reg), ctx);
+ if (imm == BPF_ADD)
+ emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
+ else if (imm == BPF_AND)
+ emit(A64_AND(isdw, tmp2, tmp2, src), ctx);
+ else if (imm == BPF_OR)
+ emit(A64_ORR(isdw, tmp2, tmp2, src), ctx);
+ else
+ emit(A64_EOR(isdw, tmp2, tmp2, src), ctx);
+ emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -3;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ } else if (imm == (BPF_ADD | BPF_FETCH) ||
+ imm == (BPF_AND | BPF_FETCH) ||
+ imm == (BPF_OR | BPF_FETCH) ||
+ imm == (BPF_XOR | BPF_FETCH)) {
+ /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
+ const u8 ax = bpf2a64[BPF_REG_AX];
+
+ emit(A64_MOV(isdw, ax, src), ctx);
+ emit(A64_LDXR(isdw, src, reg), ctx);
+ if (imm == (BPF_ADD | BPF_FETCH))
+ emit(A64_ADD(isdw, tmp2, src, ax), ctx);
+ else if (imm == (BPF_AND | BPF_FETCH))
+ emit(A64_AND(isdw, tmp2, src, ax), ctx);
+ else if (imm == (BPF_OR | BPF_FETCH))
+ emit(A64_ORR(isdw, tmp2, src, ax), ctx);
+ else
+ emit(A64_EOR(isdw, tmp2, src, ax), ctx);
+ emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -3;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ emit(A64_DMB_ISH, ctx);
+ } else if (imm == BPF_XCHG) {
+ /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
+ emit(A64_MOV(isdw, tmp2, src), ctx);
+ emit(A64_LDXR(isdw, src, reg), ctx);
+ emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -2;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ emit(A64_DMB_ISH, ctx);
+ } else if (imm == BPF_CMPXCHG) {
+ /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
+ const u8 r0 = bpf2a64[BPF_REG_0];
+
+ emit(A64_MOV(isdw, tmp2, r0), ctx);
+ emit(A64_LDXR(isdw, r0, reg), ctx);
+ emit(A64_EOR(isdw, tmp3, r0, tmp2), ctx);
+ jmp_offset = 4;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(isdw, tmp3, jmp_offset), ctx);
+ emit(A64_STLXR(isdw, src, reg, tmp3), ctx);
+ jmp_offset = -4;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ emit(A64_DMB_ISH, ctx);
+ } else {
+ pr_err_once("unknown atomic op code %02x\n", imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void build_epilogue(struct jit_ctx *ctx)
{
const u8 r0 = bpf2a64[BPF_REG_0];
@@ -434,29 +609,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
- const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
BPF_CLASS(code) == BPF_JMP;
- const bool isdw = BPF_SIZE(code) == BPF_DW;
- u8 jmp_cond, reg;
+ u8 jmp_cond;
s32 jmp_offset;
u32 a64_insn;
int ret;
-#define check_imm(bits, imm) do { \
- if ((((imm) > 0) && ((imm) >> (bits))) || \
- (((imm) < 0) && (~(imm) >> (bits)))) { \
- pr_info("[%2d] imm=%d(0x%x) out of range\n", \
- i, imm, imm); \
- return -EINVAL; \
- } \
-} while (0)
-#define check_imm19(imm) check_imm(19, imm)
-#define check_imm26(imm) check_imm(26, imm)
-
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
@@ -891,33 +1053,12 @@ emit_cond_jmp:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
- if (insn->imm != BPF_ADD) {
- pr_err_once("unknown atomic op code %02x\n", insn->imm);
- return -EINVAL;
- }
-
- /* STX XADD: lock *(u32 *)(dst + off) += src
- * and
- * STX XADD: lock *(u64 *)(dst + off) += src
- */
-
- if (!off) {
- reg = dst;
- } else {
- emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
- reg = tmp;
- }
- if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
- emit(A64_STADD(isdw, reg, src), ctx);
- } else {
- emit(A64_LDXR(isdw, tmp2, reg), ctx);
- emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
- emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
- jmp_offset = -3;
- check_imm19(jmp_offset);
- emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
- }
+ if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
+ ret = emit_lse_atomic(insn, ctx);
+ else
+ ret = emit_ll_sc_atomic(insn, ctx);
+ if (ret)
+ return ret;
break;
default:
@@ -1049,15 +1190,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- /* 1. Initial fake pass to compute ctx->idx. */
-
- /* Fake pass to fill in ctx->offset. */
- if (build_body(&ctx, extra_pass)) {
+ /*
+ * 1. Initial fake pass to compute ctx->idx and ctx->offset.
+ *
+ * BPF line info needs ctx->offset[i] to be the offset of
+ * instruction[i] in jited image, so build prologue first.
+ */
+ if (build_prologue(&ctx, was_classic)) {
prog = orig_prog;
goto out_off;
}
- if (build_prologue(&ctx, was_classic)) {
+ if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_off;
}
@@ -1130,6 +1274,11 @@ skip_init_ctx:
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
+ int i;
+
+ /* offset[prog->len] is the size of program */
+ for (i = 0; i <= prog->len; i++)
+ ctx.offset[i] *= AARCH64_INSN_SIZE;
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
kfree(ctx.offset);
@@ -1143,6 +1292,11 @@ out:
return prog;
}
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
+
u64 bpf_jit_alloc_exec_limit(void)
{
return VMALLOC_END - VMALLOC_START;
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 24e0efb360f6..1d55e57b8466 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -144,6 +144,8 @@
#define SO_RESERVE_MEM 73
+#define SO_TXREHASH 74
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 845ddc63c882..654061e0964e 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -125,6 +125,8 @@
#define SO_RESERVE_MEM 0x4047
+#define SO_TXREHASH 0x4048
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 350de8f90250..ab3832b93f0a 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -112,6 +112,13 @@ static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
#endif
}
+#define HAVE_ARCH_CSUM_SHIFT
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
+{
+ /* rotate sum to align it with a 16b boundary */
+ return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3);
+}
+
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries. ihl is the number
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 56dd1f4e3e44..a4f4d347e6bd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -264,7 +264,7 @@ skip_codegen_passes:
fp->jited = 1;
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
- bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
+ bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
if (!fp->is_func || extra_pass) {
bpf_jit_binary_lock_ro(bpf_hdr);
bpf_prog_fill_jited_linfo(fp, addrs);
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 2672dd03faf3..666f81e617ea 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -126,6 +126,8 @@
#define SO_RESERVE_MEM 0x0052
+#define SO_TXREHASH 0x0053
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index b1e38784eb23..fa0759bfe498 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1599,7 +1599,7 @@ skip_init_ctx:
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, image_size, pass, ctx.image);
- bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
+ bpf_flush_icache(header, (u8 *)header + header->size);
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 61e511c51890..b037fe7be374 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -161,6 +161,7 @@ config X86
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
+ select HAVE_ARCH_HUGE_VMALLOC if X86_64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index b7421780e4e9..4cc18ba1b75e 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -44,6 +44,7 @@ extern void text_poke_early(void *addr, const void *opcode, size_t len);
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void text_poke_sync(void);
extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
+extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index b4470eabf151..b4e576600969 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1102,6 +1102,40 @@ void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
return __text_poke(addr, opcode, len);
}
+/**
+ * text_poke_copy - Copy instructions into (an unused part of) RX memory
+ * @addr: address to modify
+ * @opcode: source of the copy
+ * @len: length to copy, could be more than 2x PAGE_SIZE
+ *
+ * Not safe against concurrent execution; useful for JITs to dump
+ * new code blocks into unused regions of RX memory. Can be used in
+ * conjunction with synchronize_rcu_tasks() to wait for existing
+ * execution to quiesce after having made sure no existing functions
+ * pointers are live.
+ */
+void *text_poke_copy(void *addr, const void *opcode, size_t len)
+{
+ unsigned long start = (unsigned long)addr;
+ size_t patched = 0;
+
+ if (WARN_ON_ONCE(core_kernel_text(start)))
+ return NULL;
+
+ mutex_lock(&text_mutex);
+ while (patched < len) {
+ unsigned long ptr = start + patched;
+ size_t s;
+
+ s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
+
+ __text_poke((void *)ptr, opcode + patched, s);
+ patched += s;
+ }
+ mutex_unlock(&text_mutex);
+ return addr;
+}
+
static void do_sync_core(void *info)
{
sync_core();
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 0ecb140864b2..6efbb87f65ed 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -330,8 +330,7 @@ static int emit_jump(u8 **pprog, void *func, void *ip)
}
static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
- void *old_addr, void *new_addr,
- const bool text_live)
+ void *old_addr, void *new_addr)
{
const u8 *nop_insn = x86_nops[5];
u8 old_insn[X86_PATCH_SIZE];
@@ -365,10 +364,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
goto out;
ret = 1;
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
- if (text_live)
- text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
- else
- memcpy(ip, new_insn, X86_PATCH_SIZE);
+ text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
ret = 0;
}
out:
@@ -384,7 +380,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
/* BPF poking in modules is not supported */
return -EINVAL;
- return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
+ return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
}
#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
@@ -558,24 +554,15 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
mutex_lock(&array->aux->poke_mutex);
target = array->ptrs[poke->tail_call.key];
if (target) {
- /* Plain memcpy is used when image is not live yet
- * and still not locked as read-only. Once poke
- * location is active (poke->tailcall_target_stable),
- * any parallel bpf_arch_text_poke() might occur
- * still on the read-write image until we finally
- * locked it as read-only. Both modifications on
- * the given image are under text_mutex to avoid
- * interference.
- */
ret = __bpf_arch_text_poke(poke->tailcall_target,
BPF_MOD_JUMP, NULL,
(u8 *)target->bpf_func +
- poke->adj_off, false);
+ poke->adj_off);
BUG_ON(ret < 0);
ret = __bpf_arch_text_poke(poke->tailcall_bypass,
BPF_MOD_JUMP,
(u8 *)poke->tailcall_target +
- X86_PATCH_SIZE, NULL, false);
+ X86_PATCH_SIZE, NULL);
BUG_ON(ret < 0);
}
WRITE_ONCE(poke->tailcall_target_stable, true);
@@ -787,7 +774,6 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
/* emit opcode */
switch (atomic_op) {
case BPF_ADD:
- case BPF_SUB:
case BPF_AND:
case BPF_OR:
case BPF_XOR:
@@ -867,7 +853,7 @@ static void emit_nops(u8 **pprog, int len)
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
-static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
@@ -894,8 +880,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
push_callee_regs(&prog, callee_regs_used);
ilen = prog - temp;
- if (image)
- memcpy(image + proglen, temp, ilen);
+ if (rw_image)
+ memcpy(rw_image + proglen, temp, ilen);
proglen += ilen;
addrs[0] = proglen;
prog = temp;
@@ -1324,6 +1310,9 @@ st: if (is_imm8(insn->off))
pr_err("extable->insn doesn't fit into 32-bit\n");
return -EFAULT;
}
+ /* switch ex to rw buffer for writes */
+ ex = (void *)rw_image + ((void *)ex - (void *)image);
+
ex->insn = delta;
ex->data = EX_TYPE_BPF;
@@ -1706,7 +1695,7 @@ emit_jmp:
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
- memcpy(image + proglen, temp, ilen);
+ memcpy(rw_image + proglen, temp, ilen);
}
proglen += ilen;
addrs[i] = proglen;
@@ -2247,6 +2236,7 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
}
struct x64_jit_data {
+ struct bpf_binary_header *rw_header;
struct bpf_binary_header *header;
int *addrs;
u8 *image;
@@ -2259,6 +2249,7 @@ struct x64_jit_data {
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
+ struct bpf_binary_header *rw_header = NULL;
struct bpf_binary_header *header = NULL;
struct bpf_prog *tmp, *orig_prog = prog;
struct x64_jit_data *jit_data;
@@ -2267,6 +2258,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bool tmp_blinded = false;
bool extra_pass = false;
bool padding = false;
+ u8 *rw_image = NULL;
u8 *image = NULL;
int *addrs;
int pass;
@@ -2302,6 +2294,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
oldproglen = jit_data->proglen;
image = jit_data->image;
header = jit_data->header;
+ rw_header = jit_data->rw_header;
+ rw_image = (void *)rw_header + ((void *)image - (void *)header);
extra_pass = true;
padding = true;
goto skip_init_addrs;
@@ -2332,13 +2326,22 @@ skip_init_addrs:
for (pass = 0; pass < MAX_PASSES || image; pass++) {
if (!padding && pass >= PADDING_PASSES)
padding = true;
- proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
+ proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
if (proglen <= 0) {
out_image:
image = NULL;
- if (header)
- bpf_jit_binary_free(header);
+ if (header) {
+ bpf_arch_text_copy(&header->size, &rw_header->size,
+ sizeof(rw_header->size));
+ bpf_jit_binary_pack_free(header, rw_header);
+ }
+ /* Fall back to interpreter mode */
prog = orig_prog;
+ if (extra_pass) {
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ }
goto out_addrs;
}
if (image) {
@@ -2361,8 +2364,9 @@ out_image:
sizeof(struct exception_table_entry);
/* allocate module memory for x86 insns and extable */
- header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
- &image, align, jit_fill_hole);
+ header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
+ &image, align, &rw_header, &rw_image,
+ jit_fill_hole);
if (!header) {
prog = orig_prog;
goto out_addrs;
@@ -2378,14 +2382,27 @@ out_image:
if (image) {
if (!prog->is_func || extra_pass) {
+ /*
+ * bpf_jit_binary_pack_finalize fails in two scenarios:
+ * 1) header is not pointing to proper module memory;
+ * 2) the arch doesn't support bpf_arch_text_copy().
+ *
+ * Both cases are serious bugs and justify WARN_ON.
+ */
+ if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
+ /* header has been freed */
+ header = NULL;
+ goto out_image;
+ }
+
bpf_tail_call_direct_fixup(prog);
- bpf_jit_binary_lock_ro(header);
} else {
jit_data->addrs = addrs;
jit_data->ctx = ctx;
jit_data->proglen = proglen;
jit_data->image = image;
jit_data->header = header;
+ jit_data->rw_header = rw_header;
}
prog->bpf_func = (void *)image;
prog->jited = 1;
@@ -2413,3 +2430,10 @@ bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ if (text_poke_copy(dst, src, len) == NULL)
+ return ERR_PTR(-EINVAL);
+ return dst;
+}
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 962e5e145209..9fb99d18e3c2 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -304,7 +304,7 @@ static int iss_net_rx(struct net_device *dev)
lp->stats.rx_bytes += skb->len;
lp->stats.rx_packets++;
- netif_rx_ni(skb);
+ netif_rx(skb);
return pkt_len;
}
kfree_skb(skb);