diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-01 03:29:33 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-01 03:29:33 +0300 |
commit | 29d9f30d4ce6c7a38745a54a8cddface10013490 (patch) | |
tree | 85649ba6a7b39203584d8db9365e03f64e62c136 /arch | |
parent | 56a451b780676bc1cdac011735fe2869fa2e9abf (diff) | |
parent | 7f80ccfe996871ca69648efee74a60ae7ad0dcd9 (diff) | |
download | linux-29d9f30d4ce6c7a38745a54a8cddface10013490.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller:
"Highlights:
1) Fix the iwlwifi regression, from Johannes Berg.
2) Support BSS coloring and 802.11 encapsulation offloading in
hardware, from John Crispin.
3) Fix some potential Spectre issues in qtnfmac, from Sergey
Matyukevich.
4) Add TTL decrement action to openvswitch, from Matteo Croce.
5) Allow paralleization through flow_action setup by not taking the
RTNL mutex, from Vlad Buslov.
6) A lot of zero-length array to flexible-array conversions, from
Gustavo A. R. Silva.
7) Align XDP statistics names across several drivers for consistency,
from Lorenzo Bianconi.
8) Add various pieces of infrastructure for offloading conntrack, and
make use of it in mlx5 driver, from Paul Blakey.
9) Allow using listening sockets in BPF sockmap, from Jakub Sitnicki.
10) Lots of parallelization improvements during configuration changes
in mlxsw driver, from Ido Schimmel.
11) Add support to devlink for generic packet traps, which report
packets dropped during ACL processing. And use them in mlxsw
driver. From Jiri Pirko.
12) Support bcmgenet on ACPI, from Jeremy Linton.
13) Make BPF compatible with RT, from Thomas Gleixnet, Alexei
Starovoitov, and your's truly.
14) Support XDP meta-data in virtio_net, from Yuya Kusakabe.
15) Fix sysfs permissions when network devices change namespaces, from
Christian Brauner.
16) Add a flags element to ethtool_ops so that drivers can more simply
indicate which coalescing parameters they actually support, and
therefore the generic layer can validate the user's ethtool
request. Use this in all drivers, from Jakub Kicinski.
17) Offload FIFO qdisc in mlxsw, from Petr Machata.
18) Support UDP sockets in sockmap, from Lorenz Bauer.
19) Fix stretch ACK bugs in several TCP congestion control modules,
from Pengcheng Yang.
20) Support virtual functiosn in octeontx2 driver, from Tomasz
Duszynski.
21) Add region operations for devlink and use it in ice driver to dump
NVM contents, from Jacob Keller.
22) Add support for hw offload of MACSEC, from Antoine Tenart.
23) Add support for BPF programs that can be attached to LSM hooks,
from KP Singh.
24) Support for multiple paths, path managers, and counters in MPTCP.
From Peter Krystad, Paolo Abeni, Florian Westphal, Davide Caratti,
and others.
25) More progress on adding the netlink interface to ethtool, from
Michal Kubecek"
* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2121 commits)
net: ipv6: rpl_iptunnel: Fix potential memory leak in rpl_do_srh_inline
cxgb4/chcr: nic-tls stats in ethtool
net: dsa: fix oops while probing Marvell DSA switches
net/bpfilter: remove superfluous testing message
net: macb: Fix handling of fixed-link node
net: dsa: ksz: Select KSZ protocol tag
netdevsim: dev: Fix memory leak in nsim_dev_take_snapshot_write
net: stmmac: add EHL 2.5Gbps PCI info and PCI ID
net: stmmac: add EHL PSE0 & PSE1 1Gbps PCI info and PCI ID
net: stmmac: create dwmac-intel.c to contain all Intel platform
net: dsa: bcm_sf2: Support specifying VLAN tag egress rule
net: dsa: bcm_sf2: Add support for matching VLAN TCI
net: dsa: bcm_sf2: Move writing of CFP_DATA(5) into slicing functions
net: dsa: bcm_sf2: Check earlier for FLOW_EXT and FLOW_MAC_EXT
net: dsa: bcm_sf2: Disable learning for ASP port
net: dsa: b53: Deny enslaving port 7 for 7278 into a bridge
net: dsa: b53: Prevent tagged VLAN on port 7 for 7278
net: dsa: b53: Restore VLAN entries upon (re)configuration
net: dsa: bcm_sf2: Fix overflow checks
hv_netvsc: Remove unnecessary round_up for recv_completion_cnt
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/boot/dts/imx6qdl-apalis.dtsi | 2 | ||||
-rw-r--r-- | arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi | 49 | ||||
-rw-r--r-- | arch/arm64/boot/dts/ti/k3-am65.dtsi | 1 | ||||
-rw-r--r-- | arch/arm64/boot/dts/ti/k3-am654-base-board.dts | 42 | ||||
-rw-r--r-- | arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts | 43 | ||||
-rw-r--r-- | arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi | 49 | ||||
-rw-r--r-- | arch/arm64/boot/dts/ti/k3-j721e.dtsi | 1 | ||||
-rw-r--r-- | arch/arm64/configs/defconfig | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds.S | 6 | ||||
-rw-r--r-- | arch/riscv/Kconfig | 2 | ||||
-rw-r--r-- | arch/riscv/net/Makefile | 9 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit.h | 514 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit_comp32.c | 1310 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit_comp64.c (renamed from arch/riscv/net/bpf_jit_comp.c) | 605 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit_core.c | 166 | ||||
-rw-r--r-- | arch/s390/include/asm/qdio.h | 9 | ||||
-rw-r--r-- | arch/um/drivers/vector_kern.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 14 | ||||
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 260 |
19 files changed, 2392 insertions, 694 deletions
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi index 1b5bc6b5e806..347a5edc6927 100644 --- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi @@ -180,7 +180,7 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy>; phy-reset-duration = <10>; phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index 92629cbdc184..cbf97b621931 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -187,4 +187,53 @@ #size-cells = <0>; }; }; + + mcu_cpsw: ethernet@46000000 { + compatible = "ti,am654-cpsw-nuss"; + #address-cells = <2>; + #size-cells = <2>; + reg = <0x0 0x46000000 0x0 0x200000>; + reg-names = "cpsw_nuss"; + ranges = <0x0 0x0 0x0 0x46000000 0x0 0x200000>; + dma-coherent; + clocks = <&k3_clks 5 10>; + clock-names = "fck"; + power-domains = <&k3_pds 5 TI_SCI_PD_EXCLUSIVE>; + + dmas = <&mcu_udmap 0xf000>, + <&mcu_udmap 0xf001>, + <&mcu_udmap 0xf002>, + <&mcu_udmap 0xf003>, + <&mcu_udmap 0xf004>, + <&mcu_udmap 0xf005>, + <&mcu_udmap 0xf006>, + <&mcu_udmap 0xf007>, + <&mcu_udmap 0x7000>; + dma-names = "tx0", "tx1", "tx2", "tx3", + "tx4", "tx5", "tx6", "tx7", + "rx"; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + cpsw_port1: port@1 { + reg = <1>; + ti,mac-only; + label = "port1"; + ti,syscon-efuse = <&mcu_conf 0x200>; + phys = <&phy_gmii_sel 1>; + }; + }; + + davinci_mdio: mdio@f00 { + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; + reg = <0x0 0xf00 0x0 0x100>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&k3_clks 5 10>; + clock-names = "fck"; + bus_freq = <1000000>; + }; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi index aea36e29dd32..5be75e430965 100644 --- a/arch/arm64/boot/dts/ti/k3-am65.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi @@ -30,6 +30,7 @@ i2c3 = &main_i2c1; i2c4 = &main_i2c2; i2c5 = &main_i2c3; + ethernet0 = &cpsw_port1; }; chosen { }; diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts index 1700996800eb..2f3d3316a1cf 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts @@ -7,6 +7,7 @@ #include "k3-am654.dtsi" #include <dt-bindings/input/input.h> +#include <dt-bindings/net/ti-dp83867.h> / { compatible = "ti,am654-evm", "ti,am654"; @@ -95,7 +96,30 @@ wkup_pca554_default: wkup_pca554_default { pinctrl-single,pins = < AM65X_WKUP_IOPAD(0x0034, PIN_INPUT, 7) /* (T1) MCU_OSPI1_CLK.WKUP_GPIO0_25 */ + >; + }; + + mcu_cpsw_pins_default: mcu_cpsw_pins_default { + pinctrl-single,pins = < + AM65X_WKUP_IOPAD(0x0058, PIN_OUTPUT, 0) /* (N4) MCU_RGMII1_TX_CTL */ + AM65X_WKUP_IOPAD(0x005c, PIN_INPUT, 0) /* (N5) MCU_RGMII1_RX_CTL */ + AM65X_WKUP_IOPAD(0x0060, PIN_OUTPUT, 0) /* (M2) MCU_RGMII1_TD3 */ + AM65X_WKUP_IOPAD(0x0064, PIN_OUTPUT, 0) /* (M3) MCU_RGMII1_TD2 */ + AM65X_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* (M4) MCU_RGMII1_TD1 */ + AM65X_WKUP_IOPAD(0x006c, PIN_OUTPUT, 0) /* (M5) MCU_RGMII1_TD0 */ + AM65X_WKUP_IOPAD(0x0078, PIN_INPUT, 0) /* (L2) MCU_RGMII1_RD3 */ + AM65X_WKUP_IOPAD(0x007c, PIN_INPUT, 0) /* (L5) MCU_RGMII1_RD2 */ + AM65X_WKUP_IOPAD(0x0080, PIN_INPUT, 0) /* (M6) MCU_RGMII1_RD1 */ + AM65X_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* (L6) MCU_RGMII1_RD0 */ + AM65X_WKUP_IOPAD(0x0070, PIN_INPUT, 0) /* (N1) MCU_RGMII1_TXC */ + AM65X_WKUP_IOPAD(0x0074, PIN_INPUT, 0) /* (M1) MCU_RGMII1_RXC */ + >; + }; + mcu_mdio_pins_default: mcu_mdio1_pins_default { + pinctrl-single,pins = < + AM65X_WKUP_IOPAD(0x008c, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */ + AM65X_WKUP_IOPAD(0x0088, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */ >; }; }; @@ -419,3 +443,21 @@ data-lanes = <1 2>; }; }; + +&mcu_cpsw { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>; +}; + +&davinci_mdio { + phy0: ethernet-phy@0 { + reg = <0>; + ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; + ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; + }; +}; + +&cpsw_port1 { + phy-mode = "rgmii-rxid"; + phy-handle = <&phy0>; +}; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts index 7a5c3d4adadd..98e5e17e3ff7 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts @@ -8,6 +8,7 @@ #include "k3-j721e-som-p0.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> +#include <dt-bindings/net/ti-dp83867.h> / { chosen { @@ -128,6 +129,30 @@ J721E_WKUP_IOPAD(0x38, PIN_INPUT, 0) /* (A23) MCU_OSPI1_LBCLKO */ >; }; + + mcu_cpsw_pins_default: mcu_cpsw_pins_default { + pinctrl-single,pins = < + J721E_WKUP_IOPAD(0x0058, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */ + J721E_WKUP_IOPAD(0x005c, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */ + J721E_WKUP_IOPAD(0x0060, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */ + J721E_WKUP_IOPAD(0x0064, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */ + J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */ + J721E_WKUP_IOPAD(0x006c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */ + J721E_WKUP_IOPAD(0x0078, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */ + J721E_WKUP_IOPAD(0x007c, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */ + J721E_WKUP_IOPAD(0x0080, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */ + J721E_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */ + J721E_WKUP_IOPAD(0x0070, PIN_INPUT, 0) /* MCU_RGMII1_TXC */ + J721E_WKUP_IOPAD(0x0074, PIN_INPUT, 0) /* MCU_RGMII1_RXC */ + >; + }; + + mcu_mdio_pins_default: mcu_mdio1_pins_default { + pinctrl-single,pins = < + J721E_WKUP_IOPAD(0x008c, PIN_OUTPUT, 0) /* MCU_MDIO0_MDC */ + J721E_WKUP_IOPAD(0x0088, PIN_INPUT, 0) /* MCU_MDIO0_MDIO */ + >; + }; }; &wkup_uart0 { @@ -429,3 +454,21 @@ #gpio-cells = <2>; }; }; + +&mcu_cpsw { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>; +}; + +&davinci_mdio { + phy0: ethernet-phy@0 { + reg = <0>; + ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; + ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; + }; +}; + +&cpsw_port1 { + phy-mode = "rgmii-rxid"; + phy-handle = <&phy0>; +}; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi index 16c874bfd49a..bfe91f2a52cb 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi @@ -270,4 +270,53 @@ ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */ }; }; + + mcu_cpsw: ethernet@46000000 { + compatible = "ti,j721e-cpsw-nuss"; + #address-cells = <2>; + #size-cells = <2>; + reg = <0x0 0x46000000 0x0 0x200000>; + reg-names = "cpsw_nuss"; + ranges = <0x0 0x0 0x0 0x46000000 0x0 0x200000>; + dma-coherent; + clocks = <&k3_clks 18 22>; + clock-names = "fck"; + power-domains = <&k3_pds 18 TI_SCI_PD_EXCLUSIVE>; + + dmas = <&mcu_udmap 0xf000>, + <&mcu_udmap 0xf001>, + <&mcu_udmap 0xf002>, + <&mcu_udmap 0xf003>, + <&mcu_udmap 0xf004>, + <&mcu_udmap 0xf005>, + <&mcu_udmap 0xf006>, + <&mcu_udmap 0xf007>, + <&mcu_udmap 0x7000>; + dma-names = "tx0", "tx1", "tx2", "tx3", + "tx4", "tx5", "tx6", "tx7", + "rx"; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + cpsw_port1: port@1 { + reg = <1>; + ti,mac-only; + label = "port1"; + ti,syscon-efuse = <&mcu_conf 0x200>; + phys = <&phy_gmii_sel 1>; + }; + }; + + davinci_mdio: mdio@f00 { + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; + reg = <0x0 0xf00 0x0 0x100>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&k3_clks 18 22>; + clock-names = "fck"; + bus_freq = <1000000>; + }; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi index 027bd1febafa..2f9a56d9b114 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi @@ -30,6 +30,7 @@ serial9 = &main_uart7; serial10 = &main_uart8; serial11 = &main_uart9; + ethernet0 = &cpsw_port1; }; chosen { }; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index e7573289a66f..a6c0d02d9928 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -284,6 +284,7 @@ CONFIG_SMSC911X=y CONFIG_SNI_AVE=y CONFIG_SNI_NETSEC=y CONFIG_STMMAC_ETH=m +CONFIG_TI_K3_AM65_CPSW_NUSS=y CONFIG_MDIO_BUS_MUX_MMIOREG=y CONFIG_MARVELL_PHY=m CONFIG_MARVELL_10G_PHY=m @@ -699,6 +700,8 @@ CONFIG_QCOM_HIDMA_MGMT=y CONFIG_QCOM_HIDMA=y CONFIG_RCAR_DMAC=y CONFIG_RENESAS_USB_DMAC=m +CONFIG_TI_K3_UDMA=y +CONFIG_TI_K3_UDMA_GLUE_LAYER=y CONFIG_VFIO=y CONFIG_VFIO_PCI=y CONFIG_VIRTIO_PCI=y diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index a32d478a7f41..b4c89a1acebb 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -303,12 +303,6 @@ SECTIONS *(.branch_lt) } -#ifdef CONFIG_DEBUG_INFO_BTF - .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { - *(.BTF) - } -#endif - .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; KEEP(*(.opd)) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index cd5db57bfd41..8672e77a5b7a 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -55,7 +55,7 @@ config RISCV select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MMIOWB select ARCH_HAS_DEBUG_VIRTUAL - select HAVE_EBPF_JIT if 64BIT + select HAVE_EBPF_JIT select EDAC_SUPPORT select ARCH_HAS_GIGANTIC_PAGE select ARCH_WANT_HUGE_PMD_SHARE if 64BIT diff --git a/arch/riscv/net/Makefile b/arch/riscv/net/Makefile index ec5b14763316..9a1e5f0a94e5 100644 --- a/arch/riscv/net/Makefile +++ b/arch/riscv/net/Makefile @@ -1,2 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o + +obj-$(CONFIG_BPF_JIT) += bpf_jit_core.o + +ifeq ($(CONFIG_ARCH_RV64I),y) + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o +else + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o +endif diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h new file mode 100644 index 000000000000..20e235d06f66 --- /dev/null +++ b/arch/riscv/net/bpf_jit.h @@ -0,0 +1,514 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common functionality for RV32 and RV64 BPF JIT compilers + * + * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com> + * + */ + +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +#include <linux/bpf.h> +#include <linux/filter.h> +#include <asm/cacheflush.h> + +enum { + RV_REG_ZERO = 0, /* The constant value 0 */ + RV_REG_RA = 1, /* Return address */ + RV_REG_SP = 2, /* Stack pointer */ + RV_REG_GP = 3, /* Global pointer */ + RV_REG_TP = 4, /* Thread pointer */ + RV_REG_T0 = 5, /* Temporaries */ + RV_REG_T1 = 6, + RV_REG_T2 = 7, + RV_REG_FP = 8, /* Saved register/frame pointer */ + RV_REG_S1 = 9, /* Saved register */ + RV_REG_A0 = 10, /* Function argument/return values */ + RV_REG_A1 = 11, /* Function arguments */ + RV_REG_A2 = 12, + RV_REG_A3 = 13, + RV_REG_A4 = 14, + RV_REG_A5 = 15, + RV_REG_A6 = 16, + RV_REG_A7 = 17, + RV_REG_S2 = 18, /* Saved registers */ + RV_REG_S3 = 19, + RV_REG_S4 = 20, + RV_REG_S5 = 21, + RV_REG_S6 = 22, + RV_REG_S7 = 23, + RV_REG_S8 = 24, + RV_REG_S9 = 25, + RV_REG_S10 = 26, + RV_REG_S11 = 27, + RV_REG_T3 = 28, /* Temporaries */ + RV_REG_T4 = 29, + RV_REG_T5 = 30, + RV_REG_T6 = 31, +}; + +struct rv_jit_context { + struct bpf_prog *prog; + u32 *insns; /* RV insns */ + int ninsns; + int epilogue_offset; + int *offset; /* BPF to RV */ + unsigned long flags; + int stack_size; +}; + +struct rv_jit_data { + struct bpf_binary_header *header; + u8 *image; + struct rv_jit_context ctx; +}; + +static inline void bpf_fill_ill_insns(void *area, unsigned int size) +{ + memset(area, 0, size); +} + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +static inline void emit(const u32 insn, struct rv_jit_context *ctx) +{ + if (ctx->insns) + ctx->insns[ctx->ninsns] = insn; + + ctx->ninsns++; +} + +static inline int epilogue_offset(struct rv_jit_context *ctx) +{ + int to = ctx->epilogue_offset, from = ctx->ninsns; + + return (to - from) << 2; +} + +/* Return -1 or inverted cond. */ +static inline int invert_bpf_cond(u8 cond) +{ + switch (cond) { + case BPF_JEQ: + return BPF_JNE; + case BPF_JGT: + return BPF_JLE; + case BPF_JLT: + return BPF_JGE; + case BPF_JGE: + return BPF_JLT; + case BPF_JLE: + return BPF_JGT; + case BPF_JNE: + return BPF_JEQ; + case BPF_JSGT: + return BPF_JSLE; + case BPF_JSLT: + return BPF_JSGE; + case BPF_JSGE: + return BPF_JSLT; + case BPF_JSLE: + return BPF_JSGT; + } + return -1; +} + +static inline bool is_12b_int(long val) +{ + return -(1L << 11) <= val && val < (1L << 11); +} + +static inline int is_12b_check(int off, int insn) +{ + if (!is_12b_int(off)) { + pr_err("bpf-jit: insn=%d 12b < offset=%d not supported yet!\n", + insn, (int)off); + return -1; + } + return 0; +} + +static inline bool is_13b_int(long val) +{ + return -(1L << 12) <= val && val < (1L << 12); +} + +static inline bool is_21b_int(long val) +{ + return -(1L << 20) <= val && val < (1L << 20); +} + +static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx) +{ + int from, to; + + off++; /* BPF branch is from PC+1, RV is from PC */ + from = (insn > 0) ? ctx->offset[insn - 1] : 0; + to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0; + return (to - from) << 2; +} + +/* Instruction formats. */ + +static inline u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, + u8 opcode) +{ + return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (rd << 7) | opcode; +} + +static inline u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode) +{ + return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) | + opcode; +} + +static inline u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode) +{ + u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f; + + return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (imm4_0 << 7) | opcode; +} + +static inline u32 rv_b_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode) +{ + u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4); + u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10); + + return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (imm4_1 << 7) | opcode; +} + +static inline u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode) +{ + return (imm31_12 << 12) | (rd << 7) | opcode; +} + +static inline u32 rv_j_insn(u32 imm20_1, u8 rd, u8 opcode) +{ + u32 imm; + + imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) | + ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11); + + return (imm << 12) | (rd << 7) | opcode; +} + +static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, + u8 funct3, u8 rd, u8 opcode) +{ + u8 funct7 = (funct5 << 2) | (aq << 1) | rl; + + return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode); +} + +/* Instructions shared by both RV32 and RV64. */ + +static inline u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x13); +} + +static inline u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 7, rd, 0x13); +} + +static inline u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 6, rd, 0x13); +} + +static inline u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 4, rd, 0x13); +} + +static inline u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 1, rd, 0x13); +} + +static inline u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x13); +} + +static inline u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13); +} + +static inline u32 rv_lui(u8 rd, u32 imm31_12) +{ + return rv_u_insn(imm31_12, rd, 0x37); +} + +static inline u32 rv_auipc(u8 rd, u32 imm31_12) +{ + return rv_u_insn(imm31_12, rd, 0x17); +} + +static inline u32 rv_add(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 0, rd, 0x33); +} + +static inline u32 rv_sub(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33); +} + +static inline u32 rv_sltu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 3, rd, 0x33); +} + +static inline u32 rv_and(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 7, rd, 0x33); +} + +static inline u32 rv_or(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 6, rd, 0x33); +} + +static inline u32 rv_xor(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 4, rd, 0x33); +} + +static inline u32 rv_sll(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 1, rd, 0x33); +} + +static inline u32 rv_srl(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 5, rd, 0x33); +} + +static inline u32 rv_sra(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33); +} + +static inline u32 rv_mul(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 0, rd, 0x33); +} + +static inline u32 rv_mulhu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 3, rd, 0x33); +} + +static inline u32 rv_divu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 5, rd, 0x33); +} + +static inline u32 rv_remu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 7, rd, 0x33); +} + +static inline u32 rv_jal(u8 rd, u32 imm20_1) +{ + return rv_j_insn(imm20_1, rd, 0x6f); +} + +static inline u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x67); +} + +static inline u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_b_insn(imm12_1, rs2, rs1, 0, 0x63); +} + +static inline u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_b_insn(imm12_1, rs2, rs1, 1, 0x63); +} + +static inline u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_b_insn(imm12_1, rs2, rs1, 6, 0x63); +} + +static inline u32 rv_bgtu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_bltu(rs2, rs1, imm12_1); +} + +static inline u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_b_insn(imm12_1, rs2, rs1, 7, 0x63); +} + +static inline u32 rv_bleu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_bgeu(rs2, rs1, imm12_1); +} + +static inline u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_b_insn(imm12_1, rs2, rs1, 4, 0x63); +} + +static inline u32 rv_bgt(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_blt(rs2, rs1, imm12_1); +} + +static inline u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_b_insn(imm12_1, rs2, rs1, 5, 0x63); +} + +static inline u32 rv_ble(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_bge(rs2, rs1, imm12_1); +} + +static inline u32 rv_lw(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 2, rd, 0x03); +} + +static inline u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 4, rd, 0x03); +} + +static inline u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x03); +} + +static inline u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23); +} + +static inline u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23); +} + +static inline u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23); +} + +static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) +{ + return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); +} + +/* + * RV64-only instructions. + * + * These instructions are not available on RV32. Wrap them below a #if to + * ensure that the RV32 JIT doesn't emit any of these instructions. + */ + +#if __riscv_xlen == 64 + +static inline u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b); +} + +static inline u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b); +} + +static inline u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b); +} + +static inline u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b); +} + +static inline u32 rv_addw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b); +} + +static inline u32 rv_subw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b); +} + +static inline u32 rv_sllw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b); +} + +static inline u32 rv_srlw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b); +} + +static inline u32 rv_sraw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b); +} + +static inline u32 rv_mulw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b); +} + +static inline u32 rv_divuw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b); +} + +static inline u32 rv_remuw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b); +} + +static inline u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 3, rd, 0x03); +} + +static inline u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 6, rd, 0x03); +} + +static inline u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23); +} + +static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) +{ + return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f); +} + +#endif /* __riscv_xlen == 64 */ + +void bpf_jit_build_prologue(struct rv_jit_context *ctx); +void bpf_jit_build_epilogue(struct rv_jit_context *ctx); + +int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + bool extra_pass); + +#endif /* _BPF_JIT_H */ diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c new file mode 100644 index 000000000000..302934177760 --- /dev/null +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BPF JIT compiler for RV32G + * + * Copyright (c) 2020 Luke Nelson <luke.r.nels@gmail.com> + * Copyright (c) 2020 Xi Wang <xi.wang@gmail.com> + * + * The code is based on the BPF JIT compiler for RV64G by Björn Töpel and + * the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan. + */ + +#include <linux/bpf.h> +#include <linux/filter.h> +#include "bpf_jit.h" + +enum { + /* Stack layout - these are offsets from (top of stack - 4). */ + BPF_R6_HI, + BPF_R6_LO, + BPF_R7_HI, + BPF_R7_LO, + BPF_R8_HI, + BPF_R8_LO, + BPF_R9_HI, + BPF_R9_LO, + BPF_AX_HI, + BPF_AX_LO, + /* Stack space for BPF_REG_6 through BPF_REG_9 and BPF_REG_AX. */ + BPF_JIT_SCRATCH_REGS, +}; + +#define STACK_OFFSET(k) (-4 - ((k) * 4)) + +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) + +#define RV_REG_TCC RV_REG_T6 +#define RV_REG_TCC_SAVED RV_REG_S7 + +static const s8 bpf2rv32[][2] = { + /* Return value from in-kernel function, and exit value from eBPF. */ + [BPF_REG_0] = {RV_REG_S2, RV_REG_S1}, + /* Arguments from eBPF program to in-kernel function. */ + [BPF_REG_1] = {RV_REG_A1, RV_REG_A0}, + [BPF_REG_2] = {RV_REG_A3, RV_REG_A2}, + [BPF_REG_3] = {RV_REG_A5, RV_REG_A4}, + [BPF_REG_4] = {RV_REG_A7, RV_REG_A6}, + [BPF_REG_5] = {RV_REG_S4, RV_REG_S3}, + /* + * Callee-saved registers that in-kernel function will preserve. + * Stored on the stack. + */ + [BPF_REG_6] = {STACK_OFFSET(BPF_R6_HI), STACK_OFFSET(BPF_R6_LO)}, + [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)}, + [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)}, + [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, + /* Read-only frame pointer to access BPF stack. */ + [BPF_REG_FP] = {RV_REG_S6, RV_REG_S5}, + /* Temporary register for blinding constants. Stored on the stack. */ + [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)}, + /* + * Temporary registers used by the JIT to operate on registers stored + * on the stack. Save t0 and t1 to be used as temporaries in generated + * code. + */ + [TMP_REG_1] = {RV_REG_T3, RV_REG_T2}, + [TMP_REG_2] = {RV_REG_T5, RV_REG_T4}, +}; + +static s8 hi(const s8 *r) +{ + return r[0]; +} + +static s8 lo(const s8 *r) +{ + return r[1]; +} + +static void emit_imm(const s8 rd, s32 imm, struct rv_jit_context *ctx) +{ + u32 upper = (imm + (1 << 11)) >> 12; + u32 lower = imm & 0xfff; + + if (upper) { + emit(rv_lui(rd, upper), ctx); + emit(rv_addi(rd, rd, lower), ctx); + } else { + emit(rv_addi(rd, RV_REG_ZERO, lower), ctx); + } +} + +static void emit_imm32(const s8 *rd, s32 imm, struct rv_jit_context *ctx) +{ + /* Emit immediate into lower bits. */ + emit_imm(lo(rd), imm, ctx); + + /* Sign-extend into upper bits. */ + if (imm >= 0) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + else + emit(rv_addi(hi(rd), RV_REG_ZERO, -1), ctx); +} + +static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo, + struct rv_jit_context *ctx) +{ + emit_imm(lo(rd), imm_lo, ctx); + emit_imm(hi(rd), imm_hi, ctx); +} + +static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) +{ + int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 4; + const s8 *r0 = bpf2rv32[BPF_REG_0]; + + store_offset -= 4 * BPF_JIT_SCRATCH_REGS; + + /* Set return value if not tail call. */ + if (!is_tail_call) { + emit(rv_addi(RV_REG_A0, lo(r0), 0), ctx); + emit(rv_addi(RV_REG_A1, hi(r0), 0), ctx); + } + + /* Restore callee-saved registers. */ + emit(rv_lw(RV_REG_RA, store_offset - 0, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_FP, store_offset - 4, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_S1, store_offset - 8, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_S2, store_offset - 12, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_S3, store_offset - 16, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_S4, store_offset - 20, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_S5, store_offset - 24, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_S6, store_offset - 28, RV_REG_SP), ctx); + emit(rv_lw(RV_REG_S7, store_offset - 32, RV_REG_SP), ctx); + + emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx); + + if (is_tail_call) { + /* + * goto *(t0 + 4); + * Skips first instruction of prologue which initializes tail + * call counter. Assumes t0 contains address of target program, + * see emit_bpf_tail_call. + */ + emit(rv_jalr(RV_REG_ZERO, RV_REG_T0, 4), ctx); + } else { + emit(rv_jalr(RV_REG_ZERO, RV_REG_RA, 0), ctx); + } +} + +static bool is_stacked(s8 reg) +{ + return reg < 0; +} + +static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp, + struct rv_jit_context *ctx) +{ + if (is_stacked(hi(reg))) { + emit(rv_lw(hi(tmp), hi(reg), RV_REG_FP), ctx); + emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx); + reg = tmp; + } + return reg; +} + +static void bpf_put_reg64(const s8 *reg, const s8 *src, + struct rv_jit_context *ctx) +{ + if (is_stacked(hi(reg))) { + emit(rv_sw(RV_REG_FP, hi(reg), hi(src)), ctx); + emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx); + } +} + +static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp, + struct rv_jit_context *ctx) +{ + if (is_stacked(lo(reg))) { + emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx); + reg = tmp; + } + return reg; +} + +static void bpf_put_reg32(const s8 *reg, const s8 *src, + struct rv_jit_context *ctx) +{ + if (is_stacked(lo(reg))) { + emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(rv_sw(RV_REG_FP, hi(reg), RV_REG_ZERO), ctx); + } else if (!ctx->prog->aux->verifier_zext) { + emit(rv_addi(hi(reg), RV_REG_ZERO, 0), ctx); + } +} + +static void emit_jump_and_link(u8 rd, s32 rvoff, bool force_jalr, + struct rv_jit_context *ctx) +{ + s32 upper, lower; + + if (rvoff && is_21b_int(rvoff) && !force_jalr) { + emit(rv_jal(rd, rvoff >> 1), ctx); + return; + } + + upper = (rvoff + (1 << 11)) >> 12; + lower = rvoff & 0xfff; + emit(rv_auipc(RV_REG_T1, upper), ctx); + emit(rv_jalr(rd, RV_REG_T1, lower), ctx); +} + +static void emit_alu_i64(const s8 *dst, s32 imm, + struct rv_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + + switch (op) { + case BPF_MOV: + emit_imm32(rd, imm, ctx); + break; + case BPF_AND: + if (is_12b_int(imm)) { + emit(rv_andi(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx); + } + if (imm >= 0) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + break; + case BPF_OR: + if (is_12b_int(imm)) { + emit(rv_ori(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx); + } + if (imm < 0) + emit(rv_ori(hi(rd), RV_REG_ZERO, -1), ctx); + break; + case BPF_XOR: + if (is_12b_int(imm)) { + emit(rv_xori(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx); + } + if (imm < 0) + emit(rv_xori(hi(rd), hi(rd), -1), ctx); + break; + case BPF_LSH: + if (imm >= 32) { + emit(rv_slli(hi(rd), lo(rd), imm - 32), ctx); + emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx); + } else if (imm == 0) { + /* Do nothing. */ + } else { + emit(rv_srli(RV_REG_T0, lo(rd), 32 - imm), ctx); + emit(rv_slli(hi(rd), hi(rd), imm), ctx); + emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx); + emit(rv_slli(lo(rd), lo(rd), imm), ctx); + } + break; + case BPF_RSH: + if (imm >= 32) { + emit(rv_srli(lo(rd), hi(rd), imm - 32), ctx); + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + } else if (imm == 0) { + /* Do nothing. */ + } else { + emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx); + emit(rv_srli(lo(rd), lo(rd), imm), ctx); + emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx); + emit(rv_srli(hi(rd), hi(rd), imm), ctx); + } + break; + case BPF_ARSH: + if (imm >= 32) { + emit(rv_srai(lo(rd), hi(rd), imm - 32), ctx); + emit(rv_srai(hi(rd), hi(rd), 31), ctx); + } else if (imm == 0) { + /* Do nothing. */ + } else { + emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx); + emit(rv_srli(lo(rd), lo(rd), imm), ctx); + emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx); + emit(rv_srai(hi(rd), hi(rd), imm), ctx); + } + break; + } + + bpf_put_reg64(dst, rd, ctx); +} + +static void emit_alu_i32(const s8 *dst, s32 imm, + struct rv_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *rd = bpf_get_reg32(dst, tmp1, ctx); + + switch (op) { + case BPF_MOV: + emit_imm(lo(rd), imm, ctx); + break; + case BPF_ADD: + if (is_12b_int(imm)) { + emit(rv_addi(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_add(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + case BPF_SUB: + if (is_12b_int(-imm)) { + emit(rv_addi(lo(rd), lo(rd), -imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_sub(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + case BPF_AND: + if (is_12b_int(imm)) { + emit(rv_andi(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + case BPF_OR: + if (is_12b_int(imm)) { + emit(rv_ori(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + case BPF_XOR: + if (is_12b_int(imm)) { + emit(rv_xori(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + case BPF_LSH: + if (is_12b_int(imm)) { + emit(rv_slli(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_sll(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + case BPF_RSH: + if (is_12b_int(imm)) { + emit(rv_srli(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_srl(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + case BPF_ARSH: + if (is_12b_int(imm)) { + emit(rv_srai(lo(rd), lo(rd), imm), ctx); + } else { + emit_imm(RV_REG_T0, imm, ctx); + emit(rv_sra(lo(rd), lo(rd), RV_REG_T0), ctx); + } + break; + } + + bpf_put_reg32(dst, rd, ctx); +} + +static void emit_alu_r64(const s8 *dst, const s8 *src, + struct rv_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *tmp2 = bpf2rv32[TMP_REG_2]; + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + const s8 *rs = bpf_get_reg64(src, tmp2, ctx); + + switch (op) { + case BPF_MOV: + emit(rv_addi(lo(rd), lo(rs), 0), ctx); + emit(rv_addi(hi(rd), hi(rs), 0), ctx); + break; + case BPF_ADD: + if (rd == rs) { + emit(rv_srli(RV_REG_T0, lo(rd), 31), ctx); + emit(rv_slli(hi(rd), hi(rd), 1), ctx); + emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx); + emit(rv_slli(lo(rd), lo(rd), 1), ctx); + } else { + emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx); + emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx); + emit(rv_add(hi(rd), hi(rd), hi(rs)), ctx); + emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx); + } + break; + case BPF_SUB: + emit(rv_sub(RV_REG_T1, hi(rd), hi(rs)), ctx); + emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx); + emit(rv_sub(hi(rd), RV_REG_T1, RV_REG_T0), ctx); + emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_AND: + emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx); + emit(rv_and(hi(rd), hi(rd), hi(rs)), ctx); + break; + case BPF_OR: + emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx); + emit(rv_or(hi(rd), hi(rd), hi(rs)), ctx); + break; + case BPF_XOR: + emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx); + emit(rv_xor(hi(rd), hi(rd), hi(rs)), ctx); + break; + case BPF_MUL: + emit(rv_mul(RV_REG_T0, hi(rs), lo(rd)), ctx); + emit(rv_mul(hi(rd), hi(rd), lo(rs)), ctx); + emit(rv_mulhu(RV_REG_T1, lo(rd), lo(rs)), ctx); + emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx); + emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx); + emit(rv_add(hi(rd), hi(rd), RV_REG_T1), ctx); + break; + case BPF_LSH: + emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx); + emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx); + emit(rv_sll(hi(rd), lo(rd), RV_REG_T0), ctx); + emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx); + emit(rv_jal(RV_REG_ZERO, 16), ctx); + emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx); + emit(rv_srli(RV_REG_T0, lo(rd), 1), ctx); + emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx); + emit(rv_srl(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx); + emit(rv_sll(hi(rd), hi(rd), lo(rs)), ctx); + emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx); + emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_RSH: + emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx); + emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx); + emit(rv_srl(lo(rd), hi(rd), RV_REG_T0), ctx); + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + emit(rv_jal(RV_REG_ZERO, 16), ctx); + emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx); + emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx); + emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx); + emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx); + emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx); + emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx); + emit(rv_srl(hi(rd), hi(rd), lo(rs)), ctx); + break; + case BPF_ARSH: + emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx); + emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx); + emit(rv_sra(lo(rd), hi(rd), RV_REG_T0), ctx); + emit(rv_srai(hi(rd), hi(rd), 31), ctx); + emit(rv_jal(RV_REG_ZERO, 16), ctx); + emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx); + emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx); + emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx); + emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx); + emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx); + emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx); + emit(rv_sra(hi(rd), hi(rd), lo(rs)), ctx); + break; + case BPF_NEG: + emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx); + emit(rv_sltu(RV_REG_T0, RV_REG_ZERO, lo(rd)), ctx); + emit(rv_sub(hi(rd), RV_REG_ZERO, hi(rd)), ctx); + emit(rv_sub(hi(rd), hi(rd), RV_REG_T0), ctx); + break; + } + + bpf_put_reg64(dst, rd, ctx); +} + +static void emit_alu_r32(const s8 *dst, const s8 *src, + struct rv_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *tmp2 = bpf2rv32[TMP_REG_2]; + const s8 *rd = bpf_get_reg32(dst, tmp1, ctx); + const s8 *rs = bpf_get_reg32(src, tmp2, ctx); + + switch (op) { + case BPF_MOV: + emit(rv_addi(lo(rd), lo(rs), 0), ctx); + break; + case BPF_ADD: + emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_SUB: + emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_AND: + emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_OR: + emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_XOR: + emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_MUL: + emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_DIV: + emit(rv_divu(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_MOD: + emit(rv_remu(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_LSH: + emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_RSH: + emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_ARSH: + emit(rv_sra(lo(rd), lo(rd), lo(rs)), ctx); + break; + case BPF_NEG: + emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx); + break; + } + + bpf_put_reg32(dst, rd, ctx); +} + +static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff, + struct rv_jit_context *ctx, const u8 op) +{ + int e, s = ctx->ninsns; + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *tmp2 = bpf2rv32[TMP_REG_2]; + + const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx); + const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx); + + /* + * NO_JUMP skips over the rest of the instructions and the + * emit_jump_and_link, meaning the BPF branch is not taken. + * JUMP skips directly to the emit_jump_and_link, meaning + * the BPF branch is taken. + * + * The fallthrough case results in the BPF branch being taken. + */ +#define NO_JUMP(idx) (6 + (2 * (idx))) +#define JUMP(idx) (2 + (2 * (idx))) + + switch (op) { + case BPF_JEQ: + emit(rv_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JGT: + emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JLT: + emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JGE: + emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JLE: + emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JNE: + emit(rv_bne(hi(rs1), hi(rs2), JUMP(1)), ctx); + emit(rv_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSGT: + emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSLT: + emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSGE: + emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSLE: + emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSET: + emit(rv_and(RV_REG_T0, hi(rs1), hi(rs2)), ctx); + emit(rv_bne(RV_REG_T0, RV_REG_ZERO, JUMP(2)), ctx); + emit(rv_and(RV_REG_T0, lo(rs1), lo(rs2)), ctx); + emit(rv_beq(RV_REG_T0, RV_REG_ZERO, NO_JUMP(0)), ctx); + break; + } + +#undef NO_JUMP +#undef JUMP + + e = ctx->ninsns; + /* Adjust for extra insns. */ + rvoff -= (e - s) << 2; + emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); + return 0; +} + +static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx) +{ + int e, s = ctx->ninsns; + bool far = false; + int off; + + if (op == BPF_JSET) { + /* + * BPF_JSET is a special case: it has no inverse so we always + * treat it as a far branch. + */ + far = true; + } else if (!is_13b_int(rvoff)) { + op = invert_bpf_cond(op); + far = true; + } + + /* + * For a far branch, the condition is negated and we jump over the + * branch itself, and the two instructions from emit_jump_and_link. + * For a near branch, just use rvoff. + */ + off = far ? 6 : (rvoff >> 1); + + switch (op) { + case BPF_JEQ: + emit(rv_beq(rd, rs, off), ctx); + break; + case BPF_JGT: + emit(rv_bgtu(rd, rs, off), ctx); + break; + case BPF_JLT: + emit(rv_bltu(rd, rs, off), ctx); + break; + case BPF_JGE: + emit(rv_bgeu(rd, rs, off), ctx); + break; + case BPF_JLE: + emit(rv_bleu(rd, rs, off), ctx); + break; + case BPF_JNE: + emit(rv_bne(rd, rs, off), ctx); + break; + case BPF_JSGT: + emit(rv_bgt(rd, rs, off), ctx); + break; + case BPF_JSLT: + emit(rv_blt(rd, rs, off), ctx); + break; + case BPF_JSGE: + emit(rv_bge(rd, rs, off), ctx); + break; + case BPF_JSLE: + emit(rv_ble(rd, rs, off), ctx); + break; + case BPF_JSET: + emit(rv_and(RV_REG_T0, rd, rs), ctx); + emit(rv_beq(RV_REG_T0, RV_REG_ZERO, off), ctx); + break; + } + + if (far) { + e = ctx->ninsns; + /* Adjust for extra insns. */ + rvoff -= (e - s) << 2; + emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); + } + return 0; +} + +static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff, + struct rv_jit_context *ctx, const u8 op) +{ + int e, s = ctx->ninsns; + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *tmp2 = bpf2rv32[TMP_REG_2]; + + const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx); + const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx); + + e = ctx->ninsns; + /* Adjust for extra insns. */ + rvoff -= (e - s) << 2; + + if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx)) + return -1; + + return 0; +} + +static void emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) +{ + const s8 *r0 = bpf2rv32[BPF_REG_0]; + const s8 *r5 = bpf2rv32[BPF_REG_5]; + u32 upper = ((u32)addr + (1 << 11)) >> 12; + u32 lower = addr & 0xfff; + + /* R1-R4 already in correct registers---need to push R5 to stack. */ + emit(rv_addi(RV_REG_SP, RV_REG_SP, -16), ctx); + emit(rv_sw(RV_REG_SP, 0, lo(r5)), ctx); + emit(rv_sw(RV_REG_SP, 4, hi(r5)), ctx); + + /* Backup TCC. */ + emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx); + + /* + * Use lui/jalr pair to jump to absolute address. Don't use emit_imm as + * the number of emitted instructions should not depend on the value of + * addr. + */ + emit(rv_lui(RV_REG_T1, upper), ctx); + emit(rv_jalr(RV_REG_RA, RV_REG_T1, lower), ctx); + + /* Restore TCC. */ + emit(rv_addi(RV_REG_TCC, RV_REG_TCC_SAVED, 0), ctx); + + /* Set return value and restore stack. */ + emit(rv_addi(lo(r0), RV_REG_A0, 0), ctx); + emit(rv_addi(hi(r0), RV_REG_A1, 0), ctx); + emit(rv_addi(RV_REG_SP, RV_REG_SP, 16), ctx); +} + +static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) +{ + /* + * R1 -> &ctx + * R2 -> &array + * R3 -> index + */ + int tc_ninsn, off, start_insn = ctx->ninsns; + const s8 *arr_reg = bpf2rv32[BPF_REG_2]; + const s8 *idx_reg = bpf2rv32[BPF_REG_3]; + + tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] : + ctx->offset[0]; + + /* max_entries = array->map.max_entries; */ + off = offsetof(struct bpf_array, map.max_entries); + if (is_12b_check(off, insn)) + return -1; + emit(rv_lw(RV_REG_T1, off, lo(arr_reg)), ctx); + + /* + * if (index >= max_entries) + * goto out; + */ + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx); + + /* + * if ((temp_tcc = tcc - 1) < 0) + * goto out; + */ + emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + emit_bcc(BPF_JSLT, RV_REG_T1, RV_REG_ZERO, off, ctx); + + /* + * prog = array->ptrs[index]; + * if (!prog) + * goto out; + */ + emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx); + emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx); + off = offsetof(struct bpf_array, ptrs); + if (is_12b_check(off, insn)) + return -1; + emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx); + + /* + * tcc = temp_tcc; + * goto *(prog->bpf_func + 4); + */ + off = offsetof(struct bpf_prog, bpf_func); + if (is_12b_check(off, insn)) + return -1; + emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx); + emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx); + /* Epilogue jumps to *(t0 + 4). */ + __build_epilogue(true, ctx); + return 0; +} + +static int emit_load_r64(const s8 *dst, const s8 *src, s16 off, + struct rv_jit_context *ctx, const u8 size) +{ + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *tmp2 = bpf2rv32[TMP_REG_2]; + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + const s8 *rs = bpf_get_reg64(src, tmp2, ctx); + + emit_imm(RV_REG_T0, off, ctx); + emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rs)), ctx); + + switch (size) { + case BPF_B: + emit(rv_lbu(lo(rd), 0, RV_REG_T0), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + break; + case BPF_H: + emit(rv_lhu(lo(rd), 0, RV_REG_T0), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + break; + case BPF_W: + emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + break; + case BPF_DW: + emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx); + emit(rv_lw(hi(rd), 4, RV_REG_T0), ctx); + break; + } + + bpf_put_reg64(dst, rd, ctx); + return 0; +} + +static int emit_store_r64(const s8 *dst, const s8 *src, s16 off, + struct rv_jit_context *ctx, const u8 size, + const u8 mode) +{ + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *tmp2 = bpf2rv32[TMP_REG_2]; + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + const s8 *rs = bpf_get_reg64(src, tmp2, ctx); + + if (mode == BPF_XADD && size != BPF_W) + return -1; + + emit_imm(RV_REG_T0, off, ctx); + emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rd)), ctx); + + switch (size) { + case BPF_B: + emit(rv_sb(RV_REG_T0, 0, lo(rs)), ctx); + break; + case BPF_H: + emit(rv_sh(RV_REG_T0, 0, lo(rs)), ctx); + break; + case BPF_W: + switch (mode) { + case BPF_MEM: + emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx); + break; + case BPF_XADD: + emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0), + ctx); + break; + } + break; + case BPF_DW: + emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx); + emit(rv_sw(RV_REG_T0, 4, hi(rs)), ctx); + break; + } + + return 0; +} + +static void emit_rev16(const s8 rd, struct rv_jit_context *ctx) +{ + emit(rv_slli(rd, rd, 16), ctx); + emit(rv_slli(RV_REG_T1, rd, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + emit(rv_add(RV_REG_T1, rd, RV_REG_T1), ctx); + emit(rv_srli(rd, RV_REG_T1, 16), ctx); +} + +static void emit_rev32(const s8 rd, struct rv_jit_context *ctx) +{ + emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 0), ctx); + emit(rv_andi(RV_REG_T0, rd, 255), ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx); + emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + emit(rv_andi(RV_REG_T0, rd, 255), ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx); + emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + emit(rv_andi(RV_REG_T0, rd, 255), ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx); + emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + emit(rv_andi(RV_REG_T0, rd, 255), ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx); + emit(rv_addi(rd, RV_REG_T1, 0), ctx); +} + +static void emit_zext64(const s8 *dst, struct rv_jit_context *ctx) +{ + const s8 *rd; + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + + rd = bpf_get_reg64(dst, tmp1, ctx); + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + bpf_put_reg64(dst, rd, ctx); +} + +int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + bool extra_pass) +{ + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || + BPF_CLASS(insn->code) == BPF_JMP; + int s, e, rvoff, i = insn - ctx->prog->insnsi; + u8 code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; + + const s8 *dst = bpf2rv32[insn->dst_reg]; + const s8 *src = bpf2rv32[insn->src_reg]; + const s8 *tmp1 = bpf2rv32[TMP_REG_1]; + const s8 *tmp2 = bpf2rv32[TMP_REG_2]; + + switch (code) { + case BPF_ALU64 | BPF_MOV | BPF_X: + + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_K: + + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_K: + + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + + case BPF_ALU64 | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + + case BPF_ALU64 | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + if (BPF_SRC(code) == BPF_K) { + emit_imm32(tmp2, imm, ctx); + src = tmp2; + } + emit_alu_r64(dst, src, ctx, BPF_OP(code)); + break; + + case BPF_ALU64 | BPF_NEG: + emit_alu_r64(dst, tmp2, ctx, BPF_OP(code)); + break; + + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + goto notsupported; + + case BPF_ALU64 | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit_alu_i64(dst, imm, ctx, BPF_OP(code)); + break; + + case BPF_ALU | BPF_MOV | BPF_X: + if (imm == 1) { + /* Special mov32 for zext. */ + emit_zext64(dst, ctx); + break; + } + /* Fallthrough. */ + + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_X: + + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_MUL | BPF_K: + + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_DIV | BPF_K: + + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_X: + if (BPF_SRC(code) == BPF_K) { + emit_imm32(tmp2, imm, ctx); + src = tmp2; + } + emit_alu_r32(dst, src, ctx, BPF_OP(code)); + break; + + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: + /* + * mul,div,mod are handled in the BPF_X case since there are + * no RISC-V I-type equivalents. + */ + emit_alu_i32(dst, imm, ctx, BPF_OP(code)); + break; + + case BPF_ALU | BPF_NEG: + /* + * src is ignored---choose tmp2 as a dummy register since it + * is not on the stack. + */ + emit_alu_r32(dst, tmp2, ctx, BPF_OP(code)); + break; + + case BPF_ALU | BPF_END | BPF_FROM_LE: + { + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + + switch (imm) { + case 16: + emit(rv_slli(lo(rd), lo(rd), 16), ctx); + emit(rv_srli(lo(rd), lo(rd), 16), ctx); + /* Fallthrough. */ + case 32: + if (!ctx->prog->aux->verifier_zext) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + break; + case 64: + /* Do nothing. */ + break; + default: + pr_err("bpf-jit: BPF_END imm %d invalid\n", imm); + return -1; + } + + bpf_put_reg64(dst, rd, ctx); + break; + } + + case BPF_ALU | BPF_END | BPF_FROM_BE: + { + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + + switch (imm) { + case 16: + emit_rev16(lo(rd), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + break; + case 32: + emit_rev32(lo(rd), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); + break; + case 64: + /* Swap upper and lower halves. */ + emit(rv_addi(RV_REG_T0, lo(rd), 0), ctx); + emit(rv_addi(lo(rd), hi(rd), 0), ctx); + emit(rv_addi(hi(rd), RV_REG_T0, 0), ctx); + + /* Swap each half. */ + emit_rev32(lo(rd), ctx); + emit_rev32(hi(rd), ctx); + break; + default: + pr_err("bpf-jit: BPF_END imm %d invalid\n", imm); + return -1; + } + + bpf_put_reg64(dst, rd, ctx); + break; + } + + case BPF_JMP | BPF_JA: + rvoff = rv_offset(i, off, ctx); + emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); + break; + + case BPF_JMP | BPF_CALL: + { + bool fixed; + int ret; + u64 addr; + + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, + &fixed); + if (ret < 0) + return ret; + emit_call(fixed, addr, ctx); + break; + } + + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(i, ctx)) + return -1; + break; + + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + rvoff = rv_offset(i, off, ctx); + if (BPF_SRC(code) == BPF_K) { + s = ctx->ninsns; + emit_imm32(tmp2, imm, ctx); + src = tmp2; + e = ctx->ninsns; + rvoff -= (e - s) << 2; + } + + if (is64) + emit_branch_r64(dst, src, rvoff, ctx, BPF_OP(code)); + else + emit_branch_r32(dst, src, rvoff, ctx, BPF_OP(code)); + break; + + case BPF_JMP | BPF_EXIT: + if (i == ctx->prog->len - 1) + break; + + rvoff = epilogue_offset(ctx); + emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); + break; + + case BPF_LD | BPF_IMM | BPF_DW: + { + struct bpf_insn insn1 = insn[1]; + s32 imm_lo = imm; + s32 imm_hi = insn1.imm; + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + + emit_imm64(rd, imm_hi, imm_lo, ctx); + bpf_put_reg64(dst, rd, ctx); + return 1; + } + + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: + if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code))) + return -1; + break; + + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_DW: + + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_DW: + case BPF_STX | BPF_XADD | BPF_W: + if (BPF_CLASS(code) == BPF_ST) { + emit_imm32(tmp2, imm, ctx); + src = tmp2; + } + + if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code), + BPF_MODE(code))) + return -1; + break; + + /* No hardware support for 8-byte atomics in RV32. */ + case BPF_STX | BPF_XADD | BPF_DW: + /* Fallthrough. */ + +notsupported: + pr_info_once("bpf-jit: not supported: opcode %02x ***\n", code); + return -EFAULT; + + default: + pr_err("bpf-jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; +} + +void bpf_jit_build_prologue(struct rv_jit_context *ctx) +{ + /* Make space to save 9 registers: ra, fp, s1--s7. */ + int stack_adjust = 9 * sizeof(u32), store_offset, bpf_stack_adjust; + const s8 *fp = bpf2rv32[BPF_REG_FP]; + const s8 *r1 = bpf2rv32[BPF_REG_1]; + + bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); + stack_adjust += bpf_stack_adjust; + + store_offset = stack_adjust - 4; + + stack_adjust += 4 * BPF_JIT_SCRATCH_REGS; + + /* + * The first instruction sets the tail-call-counter (TCC) register. + * This instruction is skipped by tail calls. + */ + emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx); + + emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx); + + /* Save callee-save registers. */ + emit(rv_sw(RV_REG_SP, store_offset - 0, RV_REG_RA), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 4, RV_REG_FP), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 8, RV_REG_S1), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 12, RV_REG_S2), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 16, RV_REG_S3), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 20, RV_REG_S4), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 24, RV_REG_S5), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 28, RV_REG_S6), ctx); + emit(rv_sw(RV_REG_SP, store_offset - 32, RV_REG_S7), ctx); + + /* Set fp: used as the base address for stacked BPF registers. */ + emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx); + + /* Set up BPF stack pointer. */ + emit(rv_addi(lo(fp), RV_REG_SP, bpf_stack_adjust), ctx); + emit(rv_addi(hi(fp), RV_REG_ZERO, 0), ctx); + + /* Set up context pointer. */ + emit(rv_addi(lo(r1), RV_REG_A0, 0), ctx); + emit(rv_addi(hi(r1), RV_REG_ZERO, 0), ctx); + + ctx->stack_size = stack_adjust; +} + +void bpf_jit_build_epilogue(struct rv_jit_context *ctx) +{ + __build_epilogue(false, ctx); +} diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp64.c index 483f4ad7f4dc..cc1985d8750a 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -7,42 +7,7 @@ #include <linux/bpf.h> #include <linux/filter.h> -#include <asm/cacheflush.h> - -enum { - RV_REG_ZERO = 0, /* The constant value 0 */ - RV_REG_RA = 1, /* Return address */ - RV_REG_SP = 2, /* Stack pointer */ - RV_REG_GP = 3, /* Global pointer */ - RV_REG_TP = 4, /* Thread pointer */ - RV_REG_T0 = 5, /* Temporaries */ - RV_REG_T1 = 6, - RV_REG_T2 = 7, - RV_REG_FP = 8, - RV_REG_S1 = 9, /* Saved registers */ - RV_REG_A0 = 10, /* Function argument/return values */ - RV_REG_A1 = 11, /* Function arguments */ - RV_REG_A2 = 12, - RV_REG_A3 = 13, - RV_REG_A4 = 14, - RV_REG_A5 = 15, - RV_REG_A6 = 16, - RV_REG_A7 = 17, - RV_REG_S2 = 18, /* Saved registers */ - RV_REG_S3 = 19, - RV_REG_S4 = 20, - RV_REG_S5 = 21, - RV_REG_S6 = 22, - RV_REG_S7 = 23, - RV_REG_S8 = 24, - RV_REG_S9 = 25, - RV_REG_S10 = 26, - RV_REG_S11 = 27, - RV_REG_T3 = 28, /* Temporaries */ - RV_REG_T4 = 29, - RV_REG_T5 = 30, - RV_REG_T6 = 31, -}; +#include "bpf_jit.h" #define RV_REG_TCC RV_REG_A6 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ @@ -73,22 +38,6 @@ enum { RV_CTX_F_SEEN_S6 = RV_REG_S6, }; -struct rv_jit_context { - struct bpf_prog *prog; - u32 *insns; /* RV insns */ - int ninsns; - int epilogue_offset; - int *offset; /* BPF to RV */ - unsigned long flags; - int stack_size; -}; - -struct rv_jit_data { - struct bpf_binary_header *header; - u8 *image; - struct rv_jit_context ctx; -}; - static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx) { u8 reg = regmap[bpf_reg]; @@ -156,346 +105,11 @@ static u8 rv_tail_call_reg(struct rv_jit_context *ctx) return RV_REG_A6; } -static void emit(const u32 insn, struct rv_jit_context *ctx) -{ - if (ctx->insns) - ctx->insns[ctx->ninsns] = insn; - - ctx->ninsns++; -} - -static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode) -{ - return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | - (rd << 7) | opcode; -} - -static u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode) -{ - return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) | - opcode; -} - -static u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode) -{ - u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f; - - return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | - (imm4_0 << 7) | opcode; -} - -static u32 rv_sb_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode) -{ - u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4); - u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10); - - return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | - (imm4_1 << 7) | opcode; -} - -static u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode) -{ - return (imm31_12 << 12) | (rd << 7) | opcode; -} - -static u32 rv_uj_insn(u32 imm20_1, u8 rd, u8 opcode) -{ - u32 imm; - - imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) | - ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11); - - return (imm << 12) | (rd << 7) | opcode; -} - -static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, - u8 funct3, u8 rd, u8 opcode) -{ - u8 funct7 = (funct5 << 2) | (aq << 1) | rl; - - return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode); -} - -static u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b); -} - -static u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 0, rd, 0x13); -} - -static u32 rv_addw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b); -} - -static u32 rv_add(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 0, rd, 0x33); -} - -static u32 rv_subw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b); -} - -static u32 rv_sub(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33); -} - -static u32 rv_and(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 7, rd, 0x33); -} - -static u32 rv_or(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 6, rd, 0x33); -} - -static u32 rv_xor(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 4, rd, 0x33); -} - -static u32 rv_mulw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b); -} - -static u32 rv_mul(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(1, rs2, rs1, 0, rd, 0x33); -} - -static u32 rv_divuw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b); -} - -static u32 rv_divu(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(1, rs2, rs1, 5, rd, 0x33); -} - -static u32 rv_remuw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b); -} - -static u32 rv_remu(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(1, rs2, rs1, 7, rd, 0x33); -} - -static u32 rv_sllw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b); -} - -static u32 rv_sll(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 1, rd, 0x33); -} - -static u32 rv_srlw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b); -} - -static u32 rv_srl(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0, rs2, rs1, 5, rd, 0x33); -} - -static u32 rv_sraw(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b); -} - -static u32 rv_sra(u8 rd, u8 rs1, u8 rs2) -{ - return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33); -} - -static u32 rv_lui(u8 rd, u32 imm31_12) -{ - return rv_u_insn(imm31_12, rd, 0x37); -} - -static u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 1, rd, 0x13); -} - -static u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 7, rd, 0x13); -} - -static u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 6, rd, 0x13); -} - -static u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 4, rd, 0x13); -} - -static u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b); -} - -static u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b); -} - -static u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 5, rd, 0x13); -} - -static u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b); -} - -static u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13); -} - -static u32 rv_jal(u8 rd, u32 imm20_1) -{ - return rv_uj_insn(imm20_1, rd, 0x6f); -} - -static u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0) -{ - return rv_i_insn(imm11_0, rs1, 0, rd, 0x67); -} - -static u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1) -{ - return rv_sb_insn(imm12_1, rs2, rs1, 0, 0x63); -} - -static u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1) -{ - return rv_sb_insn(imm12_1, rs2, rs1, 6, 0x63); -} - -static u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1) -{ - return rv_sb_insn(imm12_1, rs2, rs1, 7, 0x63); -} - -static u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1) -{ - return rv_sb_insn(imm12_1, rs2, rs1, 1, 0x63); -} - -static u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1) -{ - return rv_sb_insn(imm12_1, rs2, rs1, 4, 0x63); -} - -static u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1) -{ - return rv_sb_insn(imm12_1, rs2, rs1, 5, 0x63); -} - -static u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2) -{ - return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23); -} - -static u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2) -{ - return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23); -} - -static u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2) -{ - return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23); -} - -static u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2) -{ - return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23); -} - -static u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1) -{ - return rv_i_insn(imm11_0, rs1, 4, rd, 0x03); -} - -static u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1) -{ - return rv_i_insn(imm11_0, rs1, 5, rd, 0x03); -} - -static u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1) -{ - return rv_i_insn(imm11_0, rs1, 6, rd, 0x03); -} - -static u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1) -{ - return rv_i_insn(imm11_0, rs1, 3, rd, 0x03); -} - -static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) -{ - return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); -} - -static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) -{ - return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f); -} - -static u32 rv_auipc(u8 rd, u32 imm31_12) -{ - return rv_u_insn(imm31_12, rd, 0x17); -} - -static bool is_12b_int(s64 val) -{ - return -(1 << 11) <= val && val < (1 << 11); -} - -static bool is_13b_int(s64 val) -{ - return -(1 << 12) <= val && val < (1 << 12); -} - -static bool is_21b_int(s64 val) -{ - return -(1L << 20) <= val && val < (1L << 20); -} - static bool is_32b_int(s64 val) { return -(1L << 31) <= val && val < (1L << 31); } -static int is_12b_check(int off, int insn) -{ - if (!is_12b_int(off)) { - pr_err("bpf-jit: insn=%d 12b < offset=%d not supported yet!\n", - insn, (int)off); - return -1; - } - return 0; -} - static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) { /* Note that the immediate from the add is sign-extended, @@ -535,23 +149,6 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) emit(rv_addi(rd, rd, lower), ctx); } -static int rv_offset(int insn, int off, struct rv_jit_context *ctx) -{ - int from, to; - - off++; /* BPF branch is from PC+1, RV is from PC */ - from = (insn > 0) ? ctx->offset[insn - 1] : 0; - to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0; - return (to - from) << 2; -} - -static int epilogue_offset(struct rv_jit_context *ctx) -{ - int to = ctx->epilogue_offset, from = ctx->ninsns; - - return (to - from) << 2; -} - static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) { int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8; @@ -596,34 +193,6 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) ctx); } -/* return -1 or inverted cond */ -static int invert_bpf_cond(u8 cond) -{ - switch (cond) { - case BPF_JEQ: - return BPF_JNE; - case BPF_JGT: - return BPF_JLE; - case BPF_JLT: - return BPF_JGE; - case BPF_JGE: - return BPF_JLT; - case BPF_JLE: - return BPF_JGT; - case BPF_JNE: - return BPF_JEQ; - case BPF_JSGT: - return BPF_JSLE; - case BPF_JSLT: - return BPF_JSGE; - case BPF_JSGE: - return BPF_JSLT; - case BPF_JSLE: - return BPF_JSGT; - } - return -1; -} - static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx) { @@ -855,8 +424,8 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) return 0; } -static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, - bool extra_pass) +int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + bool extra_pass) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || BPF_CLASS(insn->code) == BPF_JMP; @@ -1434,7 +1003,7 @@ out_be: return 0; } -static void build_prologue(struct rv_jit_context *ctx) +void bpf_jit_build_prologue(struct rv_jit_context *ctx) { int stack_adjust = 0, store_offset, bpf_stack_adjust; @@ -1515,175 +1084,11 @@ static void build_prologue(struct rv_jit_context *ctx) ctx->stack_size = stack_adjust; } -static void build_epilogue(struct rv_jit_context *ctx) +void bpf_jit_build_epilogue(struct rv_jit_context *ctx) { __build_epilogue(false, ctx); } -static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset) -{ - const struct bpf_prog *prog = ctx->prog; - int i; - - for (i = 0; i < prog->len; i++) { - const struct bpf_insn *insn = &prog->insnsi[i]; - int ret; - - ret = emit_insn(insn, ctx, extra_pass); - if (ret > 0) { - i++; - if (offset) - offset[i] = ctx->ninsns; - continue; - } - if (offset) - offset[i] = ctx->ninsns; - if (ret) - return ret; - } - return 0; -} - -static void bpf_fill_ill_insns(void *area, unsigned int size) -{ - memset(area, 0, size); -} - -static void bpf_flush_icache(void *start, void *end) -{ - flush_icache_range((unsigned long)start, (unsigned long)end); -} - -bool bpf_jit_needs_zext(void) -{ - return true; -} - -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) -{ - bool tmp_blinded = false, extra_pass = false; - struct bpf_prog *tmp, *orig_prog = prog; - int pass = 0, prev_ninsns = 0, i; - struct rv_jit_data *jit_data; - unsigned int image_size = 0; - struct rv_jit_context *ctx; - - if (!prog->jit_requested) - return orig_prog; - - tmp = bpf_jit_blind_constants(prog); - if (IS_ERR(tmp)) - return orig_prog; - if (tmp != prog) { - tmp_blinded = true; - prog = tmp; - } - - jit_data = prog->aux->jit_data; - if (!jit_data) { - jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); - if (!jit_data) { - prog = orig_prog; - goto out; - } - prog->aux->jit_data = jit_data; - } - - ctx = &jit_data->ctx; - - if (ctx->offset) { - extra_pass = true; - image_size = sizeof(u32) * ctx->ninsns; - goto skip_init_ctx; - } - - ctx->prog = prog; - ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); - if (!ctx->offset) { - prog = orig_prog; - goto out_offset; - } - for (i = 0; i < prog->len; i++) { - prev_ninsns += 32; - ctx->offset[i] = prev_ninsns; - } - - for (i = 0; i < 16; i++) { - pass++; - ctx->ninsns = 0; - if (build_body(ctx, extra_pass, ctx->offset)) { - prog = orig_prog; - goto out_offset; - } - build_prologue(ctx); - ctx->epilogue_offset = ctx->ninsns; - build_epilogue(ctx); - - if (ctx->ninsns == prev_ninsns) { - if (jit_data->header) - break; - - image_size = sizeof(u32) * ctx->ninsns; - jit_data->header = - bpf_jit_binary_alloc(image_size, - &jit_data->image, - sizeof(u32), - bpf_fill_ill_insns); - if (!jit_data->header) { - prog = orig_prog; - goto out_offset; - } - - ctx->insns = (u32 *)jit_data->image; - /* Now, when the image is allocated, the image - * can potentially shrink more (auipc/jalr -> - * jal). - */ - } - prev_ninsns = ctx->ninsns; - } - - if (i == 16) { - pr_err("bpf-jit: image did not converge in <%d passes!\n", i); - bpf_jit_binary_free(jit_data->header); - prog = orig_prog; - goto out_offset; - } - -skip_init_ctx: - pass++; - ctx->ninsns = 0; - - build_prologue(ctx); - if (build_body(ctx, extra_pass, NULL)) { - bpf_jit_binary_free(jit_data->header); - prog = orig_prog; - goto out_offset; - } - build_epilogue(ctx); - - if (bpf_jit_enable > 1) - bpf_jit_dump(prog->len, image_size, pass, ctx->insns); - - prog->bpf_func = (void *)ctx->insns; - prog->jited = 1; - prog->jited_len = image_size; - - bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); - - if (!prog->is_func || extra_pass) { -out_offset: - kfree(ctx->offset); - kfree(jit_data); - prog->aux->jit_data = NULL; - } -out: - if (tmp_blinded) - bpf_jit_prog_release_other(prog, prog == orig_prog ? - tmp : orig_prog); - return prog; -} - void *bpf_jit_alloc_exec(unsigned long size) { return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c new file mode 100644 index 000000000000..709b94ece3ed --- /dev/null +++ b/arch/riscv/net/bpf_jit_core.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common functionality for RV32 and RV64 BPF JIT compilers + * + * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com> + * + */ + +#include <linux/bpf.h> +#include <linux/filter.h> +#include "bpf_jit.h" + +/* Number of iterations to try until offsets converge. */ +#define NR_JIT_ITERATIONS 16 + +static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + ret = bpf_jit_emit_insn(insn, ctx, extra_pass); + /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */ + if (ret > 0) + i++; + if (offset) + offset[i] = ctx->ninsns; + if (ret < 0) + return ret; + } + return 0; +} + +bool bpf_jit_needs_zext(void) +{ + return true; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + bool tmp_blinded = false, extra_pass = false; + struct bpf_prog *tmp, *orig_prog = prog; + int pass = 0, prev_ninsns = 0, i; + struct rv_jit_data *jit_data; + struct rv_jit_context *ctx; + unsigned int image_size = 0; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + + ctx = &jit_data->ctx; + + if (ctx->offset) { + extra_pass = true; + image_size = sizeof(u32) * ctx->ninsns; + goto skip_init_ctx; + } + + ctx->prog = prog; + ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + if (!ctx->offset) { + prog = orig_prog; + goto out_offset; + } + for (i = 0; i < prog->len; i++) { + prev_ninsns += 32; + ctx->offset[i] = prev_ninsns; + } + + for (i = 0; i < NR_JIT_ITERATIONS; i++) { + pass++; + ctx->ninsns = 0; + if (build_body(ctx, extra_pass, ctx->offset)) { + prog = orig_prog; + goto out_offset; + } + bpf_jit_build_prologue(ctx); + ctx->epilogue_offset = ctx->ninsns; + bpf_jit_build_epilogue(ctx); + + if (ctx->ninsns == prev_ninsns) { + if (jit_data->header) + break; + + image_size = sizeof(u32) * ctx->ninsns; + jit_data->header = + bpf_jit_binary_alloc(image_size, + &jit_data->image, + sizeof(u32), + bpf_fill_ill_insns); + if (!jit_data->header) { + prog = orig_prog; + goto out_offset; + } + + ctx->insns = (u32 *)jit_data->image; + /* + * Now, when the image is allocated, the image can + * potentially shrink more (auipc/jalr -> jal). + */ + } + prev_ninsns = ctx->ninsns; + } + + if (i == NR_JIT_ITERATIONS) { + pr_err("bpf-jit: image did not converge in <%d passes!\n", i); + bpf_jit_binary_free(jit_data->header); + prog = orig_prog; + goto out_offset; + } + +skip_init_ctx: + pass++; + ctx->ninsns = 0; + + bpf_jit_build_prologue(ctx); + if (build_body(ctx, extra_pass, NULL)) { + bpf_jit_binary_free(jit_data->header); + prog = orig_prog; + goto out_offset; + } + bpf_jit_build_epilogue(ctx); + + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, pass, ctx->insns); + + prog->bpf_func = (void *)ctx->insns; + prog->jited = 1; + prog->jited_len = image_size; + + bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); + + if (!prog->is_func || extra_pass) { +out_offset: + kfree(ctx->offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 1e3517b0518b..e577f8533009 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -338,7 +338,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * @no_output_qs: number of output queues * @input_handler: handler to be called for input queues * @output_handler: handler to be called for output queues - * @queue_start_poll_array: polling handlers (one per input queue or NULL) + * @irq_poll: Data IRQ polling handler (NULL when not supported) * @scan_threshold: # of in-use buffers that triggers scan on output queue * @int_parm: interruption parameter * @input_sbal_addr_array: address of no_input_qs * 128 pointers @@ -359,8 +359,7 @@ struct qdio_initialize { unsigned int no_output_qs; qdio_handler_t *input_handler; qdio_handler_t *output_handler; - void (**queue_start_poll_array) (struct ccw_device *, int, - unsigned long); + void (*irq_poll)(struct ccw_device *cdev, unsigned long data); unsigned int scan_threshold; unsigned long int_parm; struct qdio_buffer **input_sbal_addr_array; @@ -415,8 +414,8 @@ extern int qdio_activate(struct ccw_device *); extern void qdio_release_aob(struct qaob *); extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, unsigned int); -extern int qdio_start_irq(struct ccw_device *, int); -extern int qdio_stop_irq(struct ccw_device *, int); +extern int qdio_start_irq(struct ccw_device *cdev); +extern int qdio_stop_irq(struct ccw_device *cdev); extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, unsigned int *bufnr, diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 0ff86391f77d..e98304d0219e 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1508,6 +1508,7 @@ static int vector_set_coalesce(struct net_device *netdev, } static const struct ethtool_ops vector_net_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS, .get_drvinfo = vector_net_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8ae0272c1c51..de73992b8432 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -238,7 +238,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) } } -static inline int is_kernel_text(unsigned long addr) +/* + * The <linux/kallsyms.h> already defines is_kernel_text, + * using '__' prefix not to get in conflict. + */ +static inline int __is_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) return 1; @@ -328,8 +332,8 @@ repeat: addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; - if (is_kernel_text(addr) || - is_kernel_text(addr2)) + if (__is_kernel_text(addr) || + __is_kernel_text(addr2)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; @@ -354,7 +358,7 @@ repeat: */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); - if (is_kernel_text(addr)) + if (__is_kernel_text(addr)) prot = PAGE_KERNEL_EXEC; pages_4k++; @@ -843,7 +847,7 @@ static void mark_nxdata_nx(void) */ unsigned long start = PFN_ALIGN(_etext); /* - * This comes from is_kernel_text upper limit. Also HPAGE where used: + * This comes from __is_kernel_text upper limit. Also HPAGE where used: */ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 9ba08e9abc09..5ea7c2cf7ab4 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1361,37 +1361,140 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, -(stack_size - i * 8)); } +static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, + struct bpf_prog *p, int stack_size, bool mod_ret) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (emit_call(&prog, __bpf_prog_enter, prog)) + return -EINVAL; + /* remember prog start time returned by __bpf_prog_enter */ + emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); + + /* arg1: lea rdi, [rbp - stack_size] */ + EMIT4(0x48, 0x8D, 0x7D, -stack_size); + /* arg2: progs[i]->insnsi for interpreter */ + if (!p->jited) + emit_mov_imm64(&prog, BPF_REG_2, + (long) p->insnsi >> 32, + (u32) (long) p->insnsi); + /* call JITed bpf program or interpreter */ + if (emit_call(&prog, p->bpf_func, prog)) + return -EINVAL; + + /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return + * of the previous call which is then passed on the stack to + * the next BPF program. + */ + if (mod_ret) + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + + /* arg1: mov rdi, progs[i] */ + emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, + (u32) (long) p); + /* arg2: mov rsi, rbx <- start time in nsec */ + emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); + if (emit_call(&prog, __bpf_prog_exit, prog)) + return -EINVAL; + + *pprog = prog; + return 0; +} + +static void emit_nops(u8 **pprog, unsigned int len) +{ + unsigned int i, noplen; + u8 *prog = *pprog; + int cnt = 0; + + while (len > 0) { + noplen = len; + + if (noplen > ASM_NOP_MAX) + noplen = ASM_NOP_MAX; + + for (i = 0; i < noplen; i++) + EMIT1(ideal_nops[noplen][i]); + len -= noplen; + } + + *pprog = prog; +} + +static void emit_align(u8 **pprog, u32 align) +{ + u8 *target, *prog = *pprog; + + target = PTR_ALIGN(prog, align); + if (target != prog) + emit_nops(&prog, target - prog); + + *pprog = prog; +} + +static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) +{ + u8 *prog = *pprog; + int cnt = 0; + s64 offset; + + offset = func - (ip + 2 + 4); + if (!is_simm32(offset)) { + pr_err("Target %p is out of range\n", func); + return -EINVAL; + } + EMIT2_off32(0x0F, jmp_cond + 0x10, offset); + *pprog = prog; + return 0; +} + static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, - struct bpf_prog **progs, int prog_cnt, int stack_size) + struct bpf_tramp_progs *tp, int stack_size) { + int i; u8 *prog = *pprog; - int cnt = 0, i; - for (i = 0; i < prog_cnt; i++) { - if (emit_call(&prog, __bpf_prog_enter, prog)) - return -EINVAL; - /* remember prog start time returned by __bpf_prog_enter */ - emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); - - /* arg1: lea rdi, [rbp - stack_size] */ - EMIT4(0x48, 0x8D, 0x7D, -stack_size); - /* arg2: progs[i]->insnsi for interpreter */ - if (!progs[i]->jited) - emit_mov_imm64(&prog, BPF_REG_2, - (long) progs[i]->insnsi >> 32, - (u32) (long) progs[i]->insnsi); - /* call JITed bpf program or interpreter */ - if (emit_call(&prog, progs[i]->bpf_func, prog)) + for (i = 0; i < tp->nr_progs; i++) { + if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) return -EINVAL; + } + *pprog = prog; + return 0; +} + +static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, + struct bpf_tramp_progs *tp, int stack_size, + u8 **branches) +{ + u8 *prog = *pprog; + int i, cnt = 0; - /* arg1: mov rdi, progs[i] */ - emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32, - (u32) (long) progs[i]); - /* arg2: mov rsi, rbx <- start time in nsec */ - emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); - if (emit_call(&prog, __bpf_prog_exit, prog)) + /* The first fmod_ret program will receive a garbage return value. + * Set this to 0 to avoid confusing the program. + */ + emit_mov_imm32(&prog, false, BPF_REG_0, 0); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + for (i = 0; i < tp->nr_progs; i++) { + if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) return -EINVAL; + + /* mod_ret prog stored return value into [rbp - 8]. Emit: + * if (*(u64 *)(rbp - 8) != 0) + * goto do_fexit; + */ + /* cmp QWORD PTR [rbp - 0x8], 0x0 */ + EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); + + /* Save the location of the branch and Generate 6 nops + * (4 bytes for an offset and 2 bytes for the jump) These nops + * are replaced with a conditional jump once do_fexit (i.e. the + * start of the fexit invocation) is finalized. + */ + branches[i] = prog; + emit_nops(&prog, 4 + 2); } + *pprog = prog; return 0; } @@ -1458,12 +1561,15 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, */ int arch_prepare_bpf_trampoline(void *image, void *image_end, const struct btf_func_model *m, u32 flags, - struct bpf_prog **fentry_progs, int fentry_cnt, - struct bpf_prog **fexit_progs, int fexit_cnt, + struct bpf_tramp_progs *tprogs, void *orig_call) { - int cnt = 0, nr_args = m->nr_args; + int ret, i, cnt = 0, nr_args = m->nr_args; int stack_size = nr_args * 8; + struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; + struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; + struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; + u8 **branches = NULL; u8 *prog; /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ @@ -1492,28 +1598,64 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, save_regs(m, &prog, nr_args, stack_size); - if (fentry_cnt) - if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size)) + if (fentry->nr_progs) + if (invoke_bpf(m, &prog, fentry, stack_size)) return -EINVAL; + if (fmod_ret->nr_progs) { + branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), + GFP_KERNEL); + if (!branches) + return -ENOMEM; + + if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size, + branches)) { + ret = -EINVAL; + goto cleanup; + } + } + if (flags & BPF_TRAMP_F_CALL_ORIG) { - if (fentry_cnt) + if (fentry->nr_progs || fmod_ret->nr_progs) restore_regs(m, &prog, nr_args, stack_size); /* call original function */ - if (emit_call(&prog, orig_call, prog)) - return -EINVAL; + if (emit_call(&prog, orig_call, prog)) { + ret = -EINVAL; + goto cleanup; + } /* remember return value in a stack for bpf prog to access */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); } - if (fexit_cnt) - if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size)) - return -EINVAL; + if (fmod_ret->nr_progs) { + /* From Intel 64 and IA-32 Architectures Optimization + * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler + * Coding Rule 11: All branch targets should be 16-byte + * aligned. + */ + emit_align(&prog, 16); + /* Update the branches saved in invoke_bpf_mod_ret with the + * aligned address of do_fexit. + */ + for (i = 0; i < fmod_ret->nr_progs; i++) + emit_cond_near_jump(&branches[i], prog, branches[i], + X86_JNE); + } + + if (fexit->nr_progs) + if (invoke_bpf(m, &prog, fexit, stack_size)) { + ret = -EINVAL; + goto cleanup; + } if (flags & BPF_TRAMP_F_RESTORE_REGS) restore_regs(m, &prog, nr_args, stack_size); + /* This needs to be done regardless. If there were fmod_ret programs, + * the return value is only updated on the stack and still needs to be + * restored to R0. + */ if (flags & BPF_TRAMP_F_CALL_ORIG) /* restore original return value back into RAX */ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); @@ -1525,45 +1667,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ EMIT1(0xC3); /* ret */ /* Make sure the trampoline generation logic doesn't overflow */ - if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) - return -EFAULT; - return prog - (u8 *)image; -} - -static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) -{ - u8 *prog = *pprog; - int cnt = 0; - s64 offset; - - offset = func - (ip + 2 + 4); - if (!is_simm32(offset)) { - pr_err("Target %p is out of range\n", func); - return -EINVAL; - } - EMIT2_off32(0x0F, jmp_cond + 0x10, offset); - *pprog = prog; - return 0; -} - -static void emit_nops(u8 **pprog, unsigned int len) -{ - unsigned int i, noplen; - u8 *prog = *pprog; - int cnt = 0; - - while (len > 0) { - noplen = len; - - if (noplen > ASM_NOP_MAX) - noplen = ASM_NOP_MAX; - - for (i = 0; i < noplen; i++) - EMIT1(ideal_nops[noplen][i]); - len -= noplen; + if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { + ret = -EFAULT; + goto cleanup; } + ret = prog - (u8 *)image; - *pprog = prog; +cleanup: + kfree(branches); + return ret; } static int emit_fallback_jump(u8 **pprog) @@ -1588,7 +1700,7 @@ static int emit_fallback_jump(u8 **pprog) static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) { - u8 *jg_reloc, *jg_target, *prog = *pprog; + u8 *jg_reloc, *prog = *pprog; int pivot, err, jg_bytes = 1, cnt = 0; s64 jg_offset; @@ -1643,9 +1755,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) * Coding Rule 11: All branch targets should be 16-byte * aligned. */ - jg_target = PTR_ALIGN(prog, 16); - if (jg_target != prog) - emit_nops(&prog, jg_target - prog); + emit_align(&prog, 16); jg_offset = prog - jg_reloc; emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); |