diff options
754 files changed, 42812 insertions, 8592 deletions
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill index e1ba4a104753..80151a409d67 100644 --- a/Documentation/ABI/stable/sysfs-class-rfkill +++ b/Documentation/ABI/stable/sysfs-class-rfkill @@ -11,7 +11,7 @@ KernelVersion: v2.6.22 Contact: linux-wireless@vger.kernel.org, Description: The rfkill class subsystem folder. Each registered rfkill driver is represented by an rfkillX - subfolder (X being an integer > 0). + subfolder (X being an integer >= 0). What: /sys/class/rfkill/rfkill[0-9]+/name @@ -48,8 +48,8 @@ Contact: linux-wireless@vger.kernel.org Description: Current state of the transmitter. This file was scheduled to be removed in 2014, but due to its large number of users it will be sticking around for a bit - longer. Despite it being marked as stabe, the newer "hard" and - "soft" interfaces should be preffered, since it is not possible + longer. Despite it being marked as stable, the newer "hard" and + "soft" interfaces should be preferred, since it is not possible to express the 'soft and hard block' state of the rfkill driver through this interface. There will likely be another attempt to remove it in the future. diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues index 0c0df91b1516..978b76358661 100644 --- a/Documentation/ABI/testing/sysfs-class-net-queues +++ b/Documentation/ABI/testing/sysfs-class-net-queues @@ -42,6 +42,17 @@ Description: network device transmit queue. Possible vaules depend on the number of available CPU(s) in the system. +What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs +Date: June 2018 +KernelVersion: 4.18.0 +Contact: netdev@vger.kernel.org +Description: + Mask of the receive queue(s) currently enabled to participate + into the Transmit Packet Steering packet processing flow for this + network device transmit queue. Possible values depend on the + number of available receive queue(s) in the network device. + Default is disabled. + What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time Date: November 2011 KernelVersion: 3.3 diff --git a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt new file mode 100644 index 000000000000..b6ae8541bd55 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt @@ -0,0 +1,153 @@ +Realtek SMI-based Switches +========================== + +The SMI "Simple Management Interface" is a two-wire protocol using +bit-banged GPIO that while it reuses the MDIO lines MCK and MDIO does +not use the MDIO protocol. This binding defines how to specify the +SMI-based Realtek devices. + +Required properties: + +- compatible: must be exactly one of: + "realtek,rtl8366" + "realtek,rtl8366rb" (4+1 ports) + "realtek,rtl8366s" (4+1 ports) + "realtek,rtl8367" + "realtek,rtl8367b" + "realtek,rtl8368s" (8 port) + "realtek,rtl8369" + "realtek,rtl8370" (8 port) + +Required properties: +- mdc-gpios: GPIO line for the MDC clock line. +- mdio-gpios: GPIO line for the MDIO data line. +- reset-gpios: GPIO line for the reset signal. + +Optional properties: +- realtek,disable-leds: if the LED drivers are not used in the + hardware design this will disable them so they are not turned on + and wasting power. + +Required subnodes: + +- interrupt-controller + + This defines an interrupt controller with an IRQ line (typically + a GPIO) that will demultiplex and handle the interrupt from the single + interrupt line coming out of one of the SMI-based chips. It most + importantly provides link up/down interrupts to the PHY blocks inside + the ASIC. + +Required properties of interrupt-controller: + +- interrupt: parent interrupt, see interrupt-controller/interrupts.txt +- interrupt-controller: see interrupt-controller/interrupts.txt +- #address-cells: should be <0> +- #interrupt-cells: should be <1> + +- mdio + + This defines the internal MDIO bus of the SMI device, mostly for the + purpose of being able to hook the interrupts to the right PHY and + the right PHY to the corresponding port. + +Required properties of mdio: + +- compatible: should be set to "realtek,smi-mdio" for all SMI devices + +See net/mdio.txt for additional MDIO bus properties. + +See net/dsa/dsa.txt for a list of additional required and optional properties +and subnodes of DSA switches. + +Examples: + +switch { + compatible = "realtek,rtl8366rb"; + /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */ + mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>; + mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; + + switch_intc: interrupt-controller { + /* GPIO 15 provides the interrupt */ + interrupt-parent = <&gpio0>; + interrupts = <15 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + port@0 { + reg = <0>; + label = "lan0"; + phy-handle = <&phy0>; + }; + port@1 { + reg = <1>; + label = "lan1"; + phy-handle = <&phy1>; + }; + port@2 { + reg = <2>; + label = "lan2"; + phy-handle = <&phy2>; + }; + port@3 { + reg = <3>; + label = "lan3"; + phy-handle = <&phy3>; + }; + port@4 { + reg = <4>; + label = "wan"; + phy-handle = <&phy4>; + }; + port@5 { + reg = <5>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + + mdio { + compatible = "realtek,smi-mdio", "dsa-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy0: phy@0 { + reg = <0>; + interrupt-parent = <&switch_intc>; + interrupts = <0>; + }; + phy1: phy@1 { + reg = <1>; + interrupt-parent = <&switch_intc>; + interrupts = <1>; + }; + phy2: phy@2 { + reg = <2>; + interrupt-parent = <&switch_intc>; + interrupts = <2>; + }; + phy3: phy@3 { + reg = <3>; + interrupt-parent = <&switch_intc>; + interrupts = <3>; + }; + phy4: phy@4 { + reg = <4>; + interrupt-parent = <&switch_intc>; + interrupts = <12>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt new file mode 100644 index 000000000000..ed4710c40641 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt @@ -0,0 +1,81 @@ +Vitesse VSC73xx Switches +======================== + +This defines device tree bindings for the Vitesse VSC73xx switch chips. +The Vitesse company has been acquired by Microsemi and Microsemi in turn +acquired by Microchip but retains this vendor branding. + +The currently supported switch chips are: +Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch +Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch +Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch +Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch + +The device tree node is an SPI device so it must reside inside a SPI bus +device tree node, see spi/spi-bus.txt + +Required properties: + +- compatible: must be exactly one of: + "vitesse,vsc7385" + "vitesse,vsc7388" + "vitesse,vsc7395" + "vitesse,vsc7398" +- gpio-controller: indicates that this switch is also a GPIO controller, + see gpio/gpio.txt +- #gpio-cells: this must be set to <2> and indicates that we are a twocell + GPIO controller, see gpio/gpio.txt + +Optional properties: + +- reset-gpios: a handle to a GPIO line that can issue reset of the chip. + It should be tagged as active low. + +Required subnodes: + +See net/dsa/dsa.txt for a list of additional required and optional properties +and subnodes of DSA switches. + +Examples: + +switch@0 { + compatible = "vitesse,vsc7395"; + reg = <0>; + /* Specified for 2.5 MHz or below */ + spi-max-frequency = <2500000>; + gpio-controller; + #gpio-cells = <2>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan1"; + }; + port@1 { + reg = <1>; + label = "lan2"; + }; + port@2 { + reg = <2>; + label = "lan3"; + }; + port@3 { + reg = <3>; + label = "lan4"; + }; + vsc: port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac1>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt index f8c33890bc29..299c0dcd67db 100644 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt @@ -356,30 +356,7 @@ ethernet@e0000 { ============================================================================ FMan IEEE 1588 Node -DESCRIPTION - -The FMan interface to support IEEE 1588 - - -PROPERTIES - -- compatible - Usage: required - Value type: <stringlist> - Definition: A standard property. - Must include "fsl,fman-ptp-timer". - -- reg - Usage: required - Value type: <prop-encoded-array> - Definition: A standard property. - -EXAMPLE - -ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; -}; +Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt ============================================================================= FMan MDIO Node diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 9c16ee2965a2..3b71da7e8742 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt @@ -4,6 +4,7 @@ The device node has following properties. Required properties: - compatible: should be "rockchip,<name>-gamc" + "rockchip,px30-gmac": found on PX30 SoCs "rockchip,rk3128-gmac": found on RK312x SoCs "rockchip,rk3228-gmac": found on RK322x SoCs "rockchip,rk3288-gmac": found on RK3288 SoCs diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt index 0f569d8e73a3..c5d0e7998e2b 100644 --- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt +++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt @@ -2,7 +2,8 @@ General Properties: - - compatible Should be "fsl,etsec-ptp" + - compatible Should be "fsl,etsec-ptp" for eTSEC + Should be "fsl,fman-ptp-timer" for DPAA FMan - reg Offset and length of the register set for the device - interrupts There should be at least two interrupts. Some devices have as many as four PTP related interrupts. @@ -43,14 +44,22 @@ Clock Properties: value, which will be directly written in those bits, that is why, according to reference manual, the next clock sources can be used: + For eTSEC, <0> - external high precision timer reference clock (TSEC_TMR_CLK input is used for this purpose); <1> - eTSEC system clock; <2> - eTSEC1 transmit clock; <3> - RTC clock input. - When this attribute is not used, eTSEC system clock will serve as - IEEE 1588 timer reference clock. + For DPAA FMan, + <0> - external high precision timer reference clock (TMR_1588_CLK) + <1> - MAC system clock (1/2 FMan clock) + <2> - reserved + <3> - RTC clock oscillator + + When this attribute is not used, the IEEE 1588 timer reference clock + will use the eTSEC system clock (for Gianfar) or the MAC system + clock (for DPAA). Example: diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 7cad066191ee..3e5398f87eac 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -395,6 +395,7 @@ v3 V3 Semiconductor variscite Variscite Ltd. via VIA Technologies, Inc. virtio Virtual I/O Device Specification, developed by the OASIS consortium +vitesse Vitesse Semiconductor Corporation vivante Vivante Corporation vocore VoCore Studio voipac Voipac Technologies s.r.o. diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 2b89d91b376f..1e5153ed8990 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -18,8 +18,6 @@ README.ipw2200 - README for the Intel PRO/Wireless 2915ABG and 2200BG driver. README.sb1000 - info on General Instrument/NextLevel SURFboard1000 cable modem. -alias.txt - - info on using alias network devices. altera_tse.txt - Altera Triple-Speed Ethernet controller. arcnet-hardware.txt diff --git a/Documentation/networking/alias.rst b/Documentation/networking/alias.rst new file mode 100644 index 000000000000..af7c5ee92014 --- /dev/null +++ b/Documentation/networking/alias.rst @@ -0,0 +1,49 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========== +IP-Aliasing +=========== + +IP-aliases are an obsolete way to manage multiple IP-addresses/masks +per interface. Newer tools such as iproute2 support multiple +address/prefixes per interface, but aliases are still supported +for backwards compatibility. + +An alias is formed by adding a colon and a string when running ifconfig. +This string is usually numeric, but this is not a must. + + +Alias creation +============== + +Alias creation is done by 'magic' interface naming: eg. to create a +200.1.1.1 alias for eth0 ... +:: + + # ifconfig eth0:0 200.1.1.1 etc,etc.... + ~~ -> request alias #0 creation (if not yet exists) for eth0 + +The corresponding route is also set up by this command. Please note: +The route always points to the base interface. + + +Alias deletion +============== + +The alias is removed by shutting the alias down:: + + # ifconfig eth0:0 down + ~~~~~~~~~~ -> will delete alias + + +Alias (re-)configuring +====================== + +Aliases are not real devices, but programs should be able to configure +and refer to them as usual (ifconfig, route, etc). + + +Relationship with main device +============================= + +If the base device is shut down the added aliases will be deleted too. diff --git a/Documentation/networking/alias.txt b/Documentation/networking/alias.txt deleted file mode 100644 index 85046f53fcfc..000000000000 --- a/Documentation/networking/alias.txt +++ /dev/null @@ -1,40 +0,0 @@ - -IP-Aliasing: -============ - -IP-aliases are an obsolete way to manage multiple IP-addresses/masks -per interface. Newer tools such as iproute2 support multiple -address/prefixes per interface, but aliases are still supported -for backwards compatibility. - -An alias is formed by adding a colon and a string when running ifconfig. -This string is usually numeric, but this is not a must. - -o Alias creation. - Alias creation is done by 'magic' interface naming: eg. to create a - 200.1.1.1 alias for eth0 ... - - # ifconfig eth0:0 200.1.1.1 etc,etc.... - ~~ -> request alias #0 creation (if not yet exists) for eth0 - - The corresponding route is also set up by this command. - Please note: The route always points to the base interface. - - -o Alias deletion. - The alias is removed by shutting the alias down: - - # ifconfig eth0:0 down - ~~~~~~~~~~ -> will delete alias - - -o Alias (re-)configuring - - Aliases are not real devices, but programs should be able to configure and - refer to them as usual (ifconfig, route, etc). - - -o Relationship with main device - - If the base device is shut down the added aliases will be deleted - too. diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.rst index a27cb6214ed7..4aef9cddde2f 100644 --- a/Documentation/networking/bridge.txt +++ b/Documentation/networking/bridge.rst @@ -1,3 +1,9 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +Ethernet Bridging +================= + In order to use the Ethernet bridging functionality, you'll need the userspace tools. diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index fec8588a588e..f0ae9b65dfba 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -15,6 +15,10 @@ Contents: kapi z8530book msg_zerocopy + failover + net_failover + alias + bridge .. only:: subproject diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index ce8fbf5aa63c..77c37fb0b6a6 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -733,11 +733,11 @@ tcp_limit_output_bytes - INTEGER Controls TCP Small Queue limit per tcp socket. TCP bulk sender tends to increase packets in flight until it gets losses notifications. With SNDBUF autotuning, this can - result in a large amount of packets queued in qdisc/device - on the local machine, hurting latency of other flows, for - typical pfifo_fast qdiscs. - tcp_limit_output_bytes limits the number of bytes on qdisc - or device to reduce artificial RTT/cwnd and reduce bufferbloat. + result in a large amount of packets queued on the local machine + (e.g.: qdiscs, CPU backlog, or device) hurting latency of other + flows, for typical pfifo_fast qdiscs. tcp_limit_output_bytes + limits the number of bytes on qdisc or device to reduce artificial + RTT/cwnd and reduce bufferbloat. Default: 262144 tcp_challenge_ack_limit - INTEGER @@ -1834,6 +1834,16 @@ stable_secret - IPv6 address By default the stable secret is unset. +addr_gen_mode - INTEGER + Defines how link-local and autoconf addresses are generated. + + 0: generate address based on EUI64 (default) + 1: do no generate a link-local address, use EUI64 for addresses generated + from autoconf + 2: generate stable privacy addresses, using the secret from + stable_secret (RFC7217) + 3: generate stable privacy addresses, using a random secret if unset + drop_unicast_in_l2_multicast - BOOLEAN Drop any unicast IPv6 packets that are received in link-layer multicast (or broadcast) frames. diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst index 70ca2f5800c4..06c97dcb57ca 100644 --- a/Documentation/networking/net_failover.rst +++ b/Documentation/networking/net_failover.rst @@ -36,37 +36,39 @@ feature on the virtio-net interface and assign the same MAC address to both virtio-net and VF interfaces. Here is an example XML snippet that shows such configuration. - - <interface type='network'> - <mac address='52:54:00:00:12:53'/> - <source network='enp66s0f0_br'/> - <target dev='tap01'/> - <model type='virtio'/> - <driver name='vhost' queues='4'/> - <link state='down'/> - <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/> - </interface> - <interface type='hostdev' managed='yes'> - <mac address='52:54:00:00:12:53'/> - <source> - <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/> - </source> - <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/> - </interface> +:: + + <interface type='network'> + <mac address='52:54:00:00:12:53'/> + <source network='enp66s0f0_br'/> + <target dev='tap01'/> + <model type='virtio'/> + <driver name='vhost' queues='4'/> + <link state='down'/> + <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/> + </interface> + <interface type='hostdev' managed='yes'> + <mac address='52:54:00:00:12:53'/> + <source> + <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/> + </source> + <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/> + </interface> Booting a VM with the above configuration will result in the following 3 netdevs created in the VM. - -4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000 - link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff - inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10 - valid_lft 42482sec preferred_lft 42482sec - inet6 fe80::97d8:db2:8c10:b6d6/64 scope link - valid_lft forever preferred_lft forever -5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000 - link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff -7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000 - link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff +:: + + 4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff + inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10 + valid_lft 42482sec preferred_lft 42482sec + inet6 fe80::97d8:db2:8c10:b6d6/64 scope link + valid_lft forever preferred_lft forever + 5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff + 7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff ens10 is the 'failover' master netdev, ens10nsby and ens11 are the slave 'standby' and 'primary' netdevs respectively. @@ -80,37 +82,38 @@ the paravirtual datapath when the VF is unplugged. Here is a sample script that shows the steps to initiate live migration on the source hypervisor. +:: -# cat vf_xml -<interface type='hostdev' managed='yes'> - <mac address='52:54:00:00:12:53'/> - <source> - <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/> - </source> - <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/> -</interface> + # cat vf_xml + <interface type='hostdev' managed='yes'> + <mac address='52:54:00:00:12:53'/> + <source> + <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/> + </source> + <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/> + </interface> -# Source Hypervisor -#!/bin/bash + # Source Hypervisor + #!/bin/bash -DOMAIN=fedora27-tap01 -PF=enp66s0f0 -VF_NUM=5 -TAP_IF=tap01 -VF_XML= + DOMAIN=fedora27-tap01 + PF=enp66s0f0 + VF_NUM=5 + TAP_IF=tap01 + VF_XML= -MAC=52:54:00:00:12:53 -ZERO_MAC=00:00:00:00:00:00 + MAC=52:54:00:00:12:53 + ZERO_MAC=00:00:00:00:00:00 -virsh domif-setlink $DOMAIN $TAP_IF up -bridge fdb del $MAC dev $PF master -virsh detach-device $DOMAIN $VF_XML -ip link set $PF vf $VF_NUM mac $ZERO_MAC + virsh domif-setlink $DOMAIN $TAP_IF up + bridge fdb del $MAC dev $PF master + virsh detach-device $DOMAIN $VF_XML + ip link set $PF vf $VF_NUM mac $ZERO_MAC -virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system + virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system -# Destination Hypervisor -#!/bin/bash + # Destination Hypervisor + #!/bin/bash -virsh attach-device $DOMAIN $VF_XML -virsh domif-setlink $DOMAIN $TAP_IF down + virsh attach-device $DOMAIN $VF_XML + virsh domif-setlink $DOMAIN $TAP_IF down diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index f55639d71d35..b7056a8a0540 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -366,8 +366,13 @@ XPS: Transmit Packet Steering Transmit Packet Steering is a mechanism for intelligently selecting which transmit queue to use when transmitting a packet on a multi-queue -device. To accomplish this, a mapping from CPU to hardware queue(s) is -recorded. The goal of this mapping is usually to assign queues +device. This can be accomplished by recording two kinds of maps, either +a mapping of CPU to hardware queue(s) or a mapping of receive queue(s) +to hardware transmit queue(s). + +1. XPS using CPUs map + +The goal of this mapping is usually to assign queues exclusively to a subset of CPUs, where the transmit completions for these queues are processed on a CPU within this set. This choice provides two benefits. First, contention on the device queue lock is @@ -377,15 +382,40 @@ transmit queue). Secondly, cache miss rate on transmit completion is reduced, in particular for data cache lines that hold the sk_buff structures. -XPS is configured per transmit queue by setting a bitmap of CPUs that -may use that queue to transmit. The reverse mapping, from CPUs to -transmit queues, is computed and maintained for each network device. -When transmitting the first packet in a flow, the function -get_xps_queue() is called to select a queue. This function uses the ID -of the running CPU as a key into the CPU-to-queue lookup table. If the +2. XPS using receive queues map + +This mapping is used to pick transmit queue based on the receive +queue(s) map configuration set by the administrator. A set of receive +queues can be mapped to a set of transmit queues (many:many), although +the common use case is a 1:1 mapping. This will enable sending packets +on the same queue associations for transmit and receive. This is useful for +busy polling multi-threaded workloads where there are challenges in +associating a given CPU to a given application thread. The application +threads are not pinned to CPUs and each thread handles packets +received on a single queue. The receive queue number is cached in the +socket for the connection. In this model, sending the packets on the same +transmit queue corresponding to the associated receive queue has benefits +in keeping the CPU overhead low. Transmit completion work is locked into +the same queue-association that a given application is polling on. This +avoids the overhead of triggering an interrupt on another CPU. When the +application cleans up the packets during the busy poll, transmit completion +may be processed along with it in the same thread context and so result in +reduced latency. + +XPS is configured per transmit queue by setting a bitmap of +CPUs/receive-queues that may use that queue to transmit. The reverse +mapping, from CPUs to transmit queues or from receive-queues to transmit +queues, is computed and maintained for each network device. When +transmitting the first packet in a flow, the function get_xps_queue() is +called to select a queue. This function uses the ID of the receive queue +for the socket connection for a match in the receive queue-to-transmit queue +lookup table. Alternatively, this function can also use the ID of the +running CPU as a key into the CPU-to-queue lookup table. If the ID matches a single queue, that is used for transmission. If multiple queues match, one is selected by using the flow hash to compute an index -into the set. +into the set. When selecting the transmit queue based on receive queue(s) +map, the transmit device is not validated against the receive device as it +requires expensive lookup operation in the datapath. The queue chosen for transmitting a particular flow is saved in the corresponding socket structure for the flow (e.g. a TCP connection). @@ -404,11 +434,15 @@ acknowledged. XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by default for SMP). The functionality remains disabled until explicitly -configured. To enable XPS, the bitmap of CPUs that may use a transmit -queue is configured using the sysfs file entry: +configured. To enable XPS, the bitmap of CPUs/receive-queues that may +use a transmit queue is configured using the sysfs file entry: +For selection based on CPUs map: /sys/class/net/<dev>/queues/tx-<n>/xps_cpus +For selection based on receive-queues map: +/sys/class/net/<dev>/queues/tx-<n>/xps_rxqs + == Suggested Configuration For a network device with a single transmission queue, XPS configuration @@ -421,6 +455,11 @@ best CPUs to share a given queue are probably those that share the cache with the CPU that processes transmit completions for that queue (transmit interrupts). +For transmit queue selection based on receive queue(s), XPS has to be +explicitly configured mapping receive-queue(s) to transmit queue(s). If the +user configuration for receive-queue map does not apply, then the transmit +queue is selected based on the CPUs map. + Per TX Queue rate limitation: ============================= diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt index a289285d2412..7d3684e81df6 100644 --- a/Documentation/rfkill.txt +++ b/Documentation/rfkill.txt @@ -9,7 +9,7 @@ rfkill - RF kill switch support Introduction ============ -The rfkill subsystem provides a generic interface to disabling any radio +The rfkill subsystem provides a generic interface for disabling any radio transmitter in the system. When a transmitter is blocked, it shall not radiate any power. @@ -45,7 +45,7 @@ The rfkill subsystem is composed of three main components: * the rfkill drivers. The rfkill core provides API for kernel drivers to register their radio -transmitter with the kernel, methods for turning it on and off and, letting +transmitter with the kernel, methods for turning it on and off, and letting the system know about hardware-disabled states that may be implemented on the device. @@ -54,7 +54,7 @@ ways for userspace to query the current states. See the "Userspace support" section below. When the device is hard-blocked (either by a call to rfkill_set_hw_state() -or from query_hw_block) set_block() will be invoked for additional software +or from query_hw_block), set_block() will be invoked for additional software block, but drivers can ignore the method call since they can use the return value of the function rfkill_set_hw_state() to sync the software state instead of keeping track of calls to set_block(). In fact, drivers should @@ -65,7 +65,6 @@ keeps track of soft and hard block separately. Kernel API ========== - Drivers for radio transmitters normally implement an rfkill driver. Platform drivers might implement input devices if the rfkill button is just @@ -75,14 +74,14 @@ a way to turn on/off the transmitter(s). For some platforms, it is possible that the hardware state changes during suspend/hibernation, in which case it will be necessary to update the rfkill -core with the current state is at resume time. +core with the current state at resume time. To create an rfkill driver, driver's Kconfig needs to have:: depends on RFKILL || !RFKILL to ensure the driver cannot be built-in when rfkill is modular. The !RFKILL -case allows the driver to be built when rfkill is not configured, which +case allows the driver to be built when rfkill is not configured, in which case all rfkill API can still be used but will be provided by static inlines which compile to almost nothing. @@ -91,7 +90,7 @@ rfkill drivers that control devices that can be hard-blocked unless they also assign the poll_hw_block() callback (then the rfkill core will poll the device). Don't do this unless you cannot get the event in any other way. -RFKill provides per-switch LED triggers, which can be used to drive LEDs +rfkill provides per-switch LED triggers, which can be used to drive LEDs according to the switch state (LED_FULL when blocked, LED_OFF otherwise). @@ -114,7 +113,7 @@ a specified type) into a state which also updates the default state for hotplugged devices. After an application opens /dev/rfkill, it can read the current state of all -devices. Changes can be either obtained by either polling the descriptor for +devices. Changes can be obtained by either polling the descriptor for hotplug or state change events or by listening for uevents emitted by the rfkill core framework. @@ -127,8 +126,7 @@ environment variables set:: RFKILL_STATE RFKILL_TYPE -The contents of these variables corresponds to the "name", "state" and +The content of these variables corresponds to the "name", "state" and "type" sysfs files explained above. - For further details consult Documentation/ABI/stable/sysfs-class-rfkill. diff --git a/MAINTAINERS b/MAINTAINERS index 1505c8ea8e7b..fce04d747a1a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9158,6 +9158,7 @@ S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlxsw/ +F: tools/testing/selftests/drivers/net/mlxsw/ MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) M: mlxsw@mellanox.com @@ -12064,6 +12065,13 @@ S: Maintained F: sound/soc/codecs/rt* F: include/sound/rt*.h +REALTEK RTL83xx SMI DSA ROUTER CHIPS +M: Linus Walleij <linus.walleij@linaro.org> +S: Maintained +F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt +F: drivers/net/dsa/realtek-smi* +F: drivers/net/dsa/rtl83* + REGISTER MAP ABSTRACTION M: Mark Brown <broonie@kernel.org> L: linux-kernel@vger.kernel.org @@ -12171,6 +12179,8 @@ S: Maintained F: Documentation/rfkill.txt F: Documentation/ABI/stable/sysfs-class-rfkill F: net/rfkill/ +F: include/linux/rfkill.h +F: include/uapi/linux/rfkill.h RHASHTABLE M: Thomas Graf <tgraf@suug.ch> @@ -12178,7 +12188,9 @@ M: Herbert Xu <herbert@gondor.apana.org.au> L: netdev@vger.kernel.org S: Maintained F: lib/rhashtable.c +F: lib/test_rhashtable.c F: include/linux/rhashtable.h +F: include/linux/rhashtable-types.h RICOH R5C592 MEMORYSTICK DRIVER M: Maxim Levitsky <maximlevitsky@gmail.com> diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index be14f16149d5..065fb372e355 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -112,4 +112,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index fb5c954ab95a..6f258b50eb44 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -156,6 +156,100 @@ }; }; + /* This is a RealTek RTL8366RB switch and PHY using SMI over GPIO */ + switch { + compatible = "realtek,rtl8366rb"; + /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */ + mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>; + mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; + realtek,disable-leds; + + switch_intc: interrupt-controller { + /* GPIO 15 provides the interrupt */ + interrupt-parent = <&gpio0>; + interrupts = <15 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan0"; + phy-handle = <&phy0>; + }; + port@1 { + reg = <1>; + label = "lan1"; + phy-handle = <&phy1>; + }; + port@2 { + reg = <2>; + label = "lan2"; + phy-handle = <&phy2>; + }; + port@3 { + reg = <3>; + label = "lan3"; + phy-handle = <&phy3>; + }; + port@4 { + reg = <4>; + label = "wan"; + phy-handle = <&phy4>; + }; + rtl8366rb_cpu_port: port@5 { + reg = <5>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + + }; + + mdio { + compatible = "realtek,smi-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy0: phy@0 { + reg = <0>; + interrupt-parent = <&switch_intc>; + interrupts = <0>; + }; + phy1: phy@1 { + reg = <1>; + interrupt-parent = <&switch_intc>; + interrupts = <1>; + }; + phy2: phy@2 { + reg = <2>; + interrupt-parent = <&switch_intc>; + interrupts = <2>; + }; + phy3: phy@3 { + reg = <3>; + interrupt-parent = <&switch_intc>; + interrupts = <3>; + }; + phy4: phy@4 { + reg = <4>; + interrupt-parent = <&switch_intc>; + interrupts = <12>; + }; + }; + }; + soc { flash@30000000 { /* @@ -223,10 +317,12 @@ * gpio0bgrp cover line 7 used by WPS LED * gpio0cgrp cover line 8, 13 used by keys * and 11, 12 used by the HD LEDs + * and line 14, 15 used by RTL8366 + * RESET and phy ready * gpio0egrp cover line 16 used by VDISP * gpio0fgrp cover line 17 used by TK IRQ * gpio0ggrp cover line 20 used by panel CS - * gpio0hgrp cover line 21,22 used by RTL8366RB + * gpio0hgrp cover line 21,22 used by RTL8366RB MDIO */ gpio0_default_pins: pinctrl-gpio0 { mux { @@ -250,6 +346,32 @@ groups = "gpio1bgrp"; }; }; + pinctrl-gmii { + mux { + function = "gmii"; + groups = "gmii_gmac0_grp"; + }; + conf0 { + pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV", + "Y7 GMAC0 RXC", "Y11 GMAC1 RXC", + "T8 GMAC0 TXEN", "W11 GMAC1 TXEN", + "U8 GMAC0 TXC", "V11 GMAC1 TXC", + "W8 GMAC0 RXD0", "V9 GMAC0 RXD1", + "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3", + "T7 GMAC0 TXD0", "U6 GMAC0 TXD1", + "V7 GMAC0 TXD2", "U7 GMAC0 TXD3", + "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1", + "T11 GMAC1 RXD2", "W12 GMAC1 RXD3", + "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1", + "W10 GMAC1 TXD2", "T9 GMAC1 TXD3"; + skew-delay = <7>; + }; + /* Set up drive strength on GMAC0 to 16 mA */ + conf1 { + groups = "gmii_gmac0_grp"; + drive-strength = <16>; + }; + }; }; }; @@ -291,6 +413,22 @@ <0x6000 0 0 4 &pci_intc 2>; }; + ethernet@60000000 { + status = "okay"; + + ethernet-port@0 { + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + ethernet-port@1 { + /* Not used in this platform */ + }; + }; + ata@63000000 { status = "okay"; }; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index f6a62ae44a65..25b3ee85066e 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -22,6 +22,7 @@ #include <asm/cacheflush.h> #include <asm/hwcap.h> #include <asm/opcodes.h> +#include <asm/system_info.h> #include "bpf_jit_32.h" @@ -47,32 +48,73 @@ * The callee saved registers depends on whether frame pointers are enabled. * With frame pointers (to be compliant with the ABI): * - * high - * original ARM_SP => +------------------+ \ - * | pc | | - * current ARM_FP => +------------------+ } callee saved registers - * |r4-r8,r10,fp,ip,lr| | - * +------------------+ / - * low + * high + * original ARM_SP => +--------------+ \ + * | pc | | + * current ARM_FP => +--------------+ } callee saved registers + * |r4-r9,fp,ip,lr| | + * +--------------+ / + * low * * Without frame pointers: * - * high - * original ARM_SP => +------------------+ - * | r4-r8,r10,fp,lr | callee saved registers - * current ARM_FP => +------------------+ - * low + * high + * original ARM_SP => +--------------+ + * | r4-r9,fp,lr | callee saved registers + * current ARM_FP => +--------------+ + * low * * When popping registers off the stack at the end of a BPF function, we * reference them via the current ARM_FP register. */ #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ - 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \ + 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \ 1 << ARM_FP) #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) -#define STACK_OFFSET(k) (k) +enum { + /* Stack layout - these are offsets from (top of stack - 4) */ + BPF_R2_HI, + BPF_R2_LO, + BPF_R3_HI, + BPF_R3_LO, + BPF_R4_HI, + BPF_R4_LO, + BPF_R5_HI, + BPF_R5_LO, + BPF_R7_HI, + BPF_R7_LO, + BPF_R8_HI, + BPF_R8_LO, + BPF_R9_HI, + BPF_R9_LO, + BPF_FP_HI, + BPF_FP_LO, + BPF_TC_HI, + BPF_TC_LO, + BPF_AX_HI, + BPF_AX_LO, + /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, + * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, + * BPF_REG_FP and Tail call counts. + */ + BPF_JIT_SCRATCH_REGS, +}; + +/* + * Negative "register" values indicate the register is stored on the stack + * and are the offset from the top of the eBPF JIT scratch space. + */ +#define STACK_OFFSET(k) (-4 - (k) * 4) +#define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4) + +#ifdef CONFIG_FRAME_POINTER +#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4) +#else +#define EBPF_SCRATCH_TO_ARM_FP(x) (x) +#endif + #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ @@ -94,35 +136,35 @@ * scratch memory space and we have to build eBPF 64 bit register from those. * */ -static const u8 bpf2a32[][2] = { +static const s8 bpf2a32[][2] = { /* return value from in-kernel function, and exit value from eBPF */ [BPF_REG_0] = {ARM_R1, ARM_R0}, /* arguments from eBPF program to in-kernel function */ [BPF_REG_1] = {ARM_R3, ARM_R2}, /* Stored on stack scratch space */ - [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)}, - [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)}, - [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)}, - [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)}, + [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)}, + [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)}, + [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)}, + [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)}, /* callee saved registers that in-kernel function will preserve */ [BPF_REG_6] = {ARM_R5, ARM_R4}, /* Stored on stack scratch space */ - [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)}, - [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)}, - [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)}, + [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)}, + [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)}, + [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, /* Read only Frame Pointer to access Stack */ - [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)}, + [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)}, /* Temporary Register for internal BPF JIT, can be used * for constant blindings and others. */ [TMP_REG_1] = {ARM_R7, ARM_R6}, - [TMP_REG_2] = {ARM_R10, ARM_R8}, + [TMP_REG_2] = {ARM_R9, ARM_R8}, /* Tail call count. Stored on stack scratch space. */ - [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)}, + [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)}, /* temporary register for blinding constants. * Stored on stack scratch space. */ - [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)}, + [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)}, }; #define dst_lo dst[1] @@ -151,6 +193,7 @@ struct jit_ctx { unsigned int idx; unsigned int prologue_bytes; unsigned int epilogue_offset; + unsigned int cpu_architecture; u32 flags; u32 *offsets; u32 *target; @@ -196,9 +239,55 @@ static inline void emit(u32 inst, struct jit_ctx *ctx) } /* + * This is rather horrid, but necessary to convert an integer constant + * to an immediate operand for the opcodes, and be able to detect at + * build time whether the constant can't be converted (iow, usable in + * BUILD_BUG_ON()). + */ +#define imm12val(v, s) (rol32(v, (s)) | (s) << 7) +#define const_imm8m(x) \ + ({ int r; \ + u32 v = (x); \ + if (!(v & ~0x000000ff)) \ + r = imm12val(v, 0); \ + else if (!(v & ~0xc000003f)) \ + r = imm12val(v, 2); \ + else if (!(v & ~0xf000000f)) \ + r = imm12val(v, 4); \ + else if (!(v & ~0xfc000003)) \ + r = imm12val(v, 6); \ + else if (!(v & ~0xff000000)) \ + r = imm12val(v, 8); \ + else if (!(v & ~0x3fc00000)) \ + r = imm12val(v, 10); \ + else if (!(v & ~0x0ff00000)) \ + r = imm12val(v, 12); \ + else if (!(v & ~0x03fc0000)) \ + r = imm12val(v, 14); \ + else if (!(v & ~0x00ff0000)) \ + r = imm12val(v, 16); \ + else if (!(v & ~0x003fc000)) \ + r = imm12val(v, 18); \ + else if (!(v & ~0x000ff000)) \ + r = imm12val(v, 20); \ + else if (!(v & ~0x0003fc00)) \ + r = imm12val(v, 22); \ + else if (!(v & ~0x0000ff00)) \ + r = imm12val(v, 24); \ + else if (!(v & ~0x00003fc0)) \ + r = imm12val(v, 26); \ + else if (!(v & ~0x00000ff0)) \ + r = imm12val(v, 28); \ + else if (!(v & ~0x000003fc)) \ + r = imm12val(v, 30); \ + else \ + r = -1; \ + r; }) + +/* * Checks if immediate value can be converted to imm12(12 bits) value. */ -static int16_t imm8m(u32 x) +static int imm8m(u32 x) { u32 rot; @@ -208,6 +297,38 @@ static int16_t imm8m(u32 x) return -1; } +#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x)) + +static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12) +{ + op |= rt << 12 | rn << 16; + if (imm12 >= 0) + op |= ARM_INST_LDST__U; + else + imm12 = -imm12; + return op | (imm12 & ARM_INST_LDST__IMM12); +} + +static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8) +{ + op |= rt << 12 | rn << 16; + if (imm8 >= 0) + op |= ARM_INST_LDST__U; + else + imm8 = -imm8; + return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f); +} + +#define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off) +#define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off) +#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off) +#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off) + +#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off) +#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off) +#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off) +#define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off) + /* * Initializes the JIT space with undefined instructions. */ @@ -227,19 +348,10 @@ static void jit_fill_hole(void *area, unsigned int size) #define STACK_ALIGNMENT 4 #endif -/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, - * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, - * BPF_REG_FP and Tail call counts. - */ -#define SCRATCH_SIZE 80 - /* total stack size used in JITed code */ #define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE) #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) -/* Get the offset of eBPF REGISTERs stored on scratch space. */ -#define STACK_VAR(off) (STACK_SIZE - off) - #if __LINUX_ARM_ARCH__ < 7 static u16 imm_offset(u32 k, struct jit_ctx *ctx) @@ -355,7 +467,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) { - const u8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp = bpf2a32[TMP_REG_1]; #if __LINUX_ARM_ARCH__ == 7 if (elf_hwcap & HWCAP_IDIVA) { @@ -402,44 +514,110 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx); } -/* Checks whether BPF register is on scratch stack space or not. */ -static inline bool is_on_stack(u8 bpf_reg) +/* Is the translated BPF register on stack? */ +static bool is_stacked(s8 reg) +{ + return reg < 0; +} + +/* If a BPF register is on the stack (stk is true), load it to the + * supplied temporary register and return the temporary register + * for subsequent operations, otherwise just use the CPU register. + */ +static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx) +{ + if (is_stacked(reg)) { + emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); + reg = tmp; + } + return reg; +} + +static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, + struct jit_ctx *ctx) { - static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5, - BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT, - BPF_REG_2, BPF_REG_FP}; - int i, reg_len = sizeof(stack_regs); - - for (i = 0 ; i < reg_len ; i++) { - if (bpf_reg == stack_regs[i]) - return true; + if (is_stacked(reg[1])) { + if (__LINUX_ARM_ARCH__ >= 6 || + ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { + emit(ARM_LDRD_I(tmp[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + } else { + emit(ARM_LDR_I(tmp[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + emit(ARM_LDR_I(tmp[0], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); + } + reg = tmp; + } + return reg; +} + +/* If a BPF register is on the stack (stk is true), save the register + * back to the stack. If the source register is not the same, then + * move it into the correct register. + */ +static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx) +{ + if (is_stacked(reg)) + emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); + else if (reg != src) + emit(ARM_MOV_R(reg, src), ctx); +} + +static void arm_bpf_put_reg64(const s8 *reg, const s8 *src, + struct jit_ctx *ctx) +{ + if (is_stacked(reg[1])) { + if (__LINUX_ARM_ARCH__ >= 6 || + ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { + emit(ARM_STRD_I(src[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + } else { + emit(ARM_STR_I(src[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + emit(ARM_STR_I(src[0], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); + } + } else { + if (reg[1] != src[1]) + emit(ARM_MOV_R(reg[1], src[1]), ctx); + if (reg[0] != src[0]) + emit(ARM_MOV_R(reg[0], src[0]), ctx); } - return false; } -static inline void emit_a32_mov_i(const u8 dst, const u32 val, - bool dstk, struct jit_ctx *ctx) +static inline void emit_a32_mov_i(const s8 dst, const u32 val, + struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp = bpf2a32[TMP_REG_1]; - if (dstk) { + if (is_stacked(dst)) { emit_mov_i(tmp[1], val, ctx); - emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx); + arm_bpf_put_reg32(dst, tmp[1], ctx); } else { emit_mov_i(dst, val, ctx); } } +static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx) +{ + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *rd = is_stacked(dst_lo) ? tmp : dst; + + emit_mov_i(rd[1], (u32)val, ctx); + emit_mov_i(rd[0], val >> 32, ctx); + + arm_bpf_put_reg64(dst, rd, ctx); +} + /* Sign extended move */ -static inline void emit_a32_mov_i64(const bool is64, const u8 dst[], - const u32 val, bool dstk, - struct jit_ctx *ctx) { - u32 hi = 0; +static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[], + const u32 val, struct jit_ctx *ctx) { + u64 val64 = val; if (is64 && (val & (1<<31))) - hi = (u32)~0; - emit_a32_mov_i(dst_lo, val, dstk, ctx); - emit_a32_mov_i(dst_hi, hi, dstk, ctx); + val64 |= 0xffffffff00000000ULL; + emit_a32_mov_i64(dst, val64, ctx); } static inline void emit_a32_add_r(const u8 dst, const u8 src, @@ -521,75 +699,94 @@ static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, /* ALU operation (32 bit) * dst = dst (op) src */ -static inline void emit_a32_alu_r(const u8 dst, const u8 src, - bool dstk, bool sstk, +static inline void emit_a32_alu_r(const s8 dst, const s8 src, struct jit_ctx *ctx, const bool is64, const bool hi, const u8 op) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rn = sstk ? tmp[1] : src; - - if (sstk) - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx); + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rn, rd; + rn = arm_bpf_get_reg32(src, tmp[1], ctx); + rd = arm_bpf_get_reg32(dst, tmp[0], ctx); /* ALU operation */ - if (dstk) { - emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); - emit_alu_r(tmp[0], rn, is64, hi, op, ctx); - emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); - } else { - emit_alu_r(dst, rn, is64, hi, op, ctx); - } + emit_alu_r(rd, rn, is64, hi, op, ctx); + arm_bpf_put_reg32(dst, rd, ctx); } /* ALU operation (64 bit) */ -static inline void emit_a32_alu_r64(const bool is64, const u8 dst[], - const u8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx, +static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], + const s8 src[], struct jit_ctx *ctx, const u8 op) { - emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op); - if (is64) - emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op); - else - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; + + rd = arm_bpf_get_reg64(dst, tmp, ctx); + if (is64) { + const s8 *rs; + + rs = arm_bpf_get_reg64(src, tmp2, ctx); + + /* ALU operation */ + emit_alu_r(rd[1], rs[1], true, false, op, ctx); + emit_alu_r(rd[0], rs[0], true, true, op, ctx); + } else { + s8 rs; + + rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + + /* ALU operation */ + emit_alu_r(rd[1], rs, true, false, op, ctx); + emit_a32_mov_i(rd[0], 0, ctx); + } + + arm_bpf_put_reg64(dst, rd, ctx); } -/* dst = imm (4 bytes)*/ -static inline void emit_a32_mov_r(const u8 dst, const u8 src, - bool dstk, bool sstk, +/* dst = src (4 bytes)*/ +static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rt = sstk ? tmp[0] : src; + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rt; - if (sstk) - emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx); - if (dstk) - emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx); - else - emit(ARM_MOV_R(dst, rt), ctx); + rt = arm_bpf_get_reg32(src, tmp[0], ctx); + arm_bpf_put_reg32(dst, rt, ctx); } /* dst = src */ -static inline void emit_a32_mov_r64(const bool is64, const u8 dst[], - const u8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { - emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx); - if (is64) { +static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], + const s8 src[], + struct jit_ctx *ctx) { + if (!is64) { + emit_a32_mov_r(dst_lo, src_lo, ctx); + /* Zero out high 4 bytes */ + emit_a32_mov_i(dst_hi, 0, ctx); + } else if (__LINUX_ARM_ARCH__ < 6 && + ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { /* complete 8 byte move */ - emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx); + emit_a32_mov_r(dst_lo, src_lo, ctx); + emit_a32_mov_r(dst_hi, src_hi, ctx); + } else if (is_stacked(src_lo) && is_stacked(dst_lo)) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + + emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); + emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); + } else if (is_stacked(src_lo)) { + emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); + } else if (is_stacked(dst_lo)) { + emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); } else { - /* Zero out high 4 bytes */ - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit(ARM_MOV_R(dst[0], src[0]), ctx); + emit(ARM_MOV_R(dst[1], src[1]), ctx); } } /* Shift operations */ -static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk, +static inline void emit_a32_alu_i(const s8 dst, const u32 val, struct jit_ctx *ctx, const u8 op) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[0] : dst; + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rd; - if (dstk) - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + rd = arm_bpf_get_reg32(dst, tmp[0], ctx); /* Do shift operation */ switch (op) { @@ -604,303 +801,245 @@ static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk, break; } - if (dstk) - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + arm_bpf_put_reg32(dst, rd, ctx); } /* dst = ~dst (64 bit) */ -static inline void emit_a32_neg64(const u8 dst[], bool dstk, +static inline void emit_a32_neg64(const s8 dst[], struct jit_ctx *ctx){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[1] : dst[1]; - u8 rm = dstk ? tmp[0] : dst[0]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *rd; /* Setup Operand */ - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do Negate Operation */ - emit(ARM_RSBS_I(rd, rd, 0), ctx); - emit(ARM_RSC_I(rm, rm, 0), ctx); + emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx); + emit(ARM_RSC_I(rd[0], rd[0], 0), ctx); - if (dstk) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst << src */ -static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; +static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; + s8 rt; /* Setup Operands */ - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; - - if (sstk) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSH operation */ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); - emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); - emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); - emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx); - - if (dstk) { - emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_LR), ctx); - emit(ARM_MOV_R(rm, ARM_IP), ctx); - } + emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx); + + arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); + arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst >> src (signed)*/ -static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; +static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; + s8 rt; + /* Setup Operands */ - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; - - if (sstk) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do the ARSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); _emit(ARM_COND_MI, ARM_B(0), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx); - emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx); - if (dstk) { - emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_LR), ctx); - emit(ARM_MOV_R(rm, ARM_IP), ctx); - } + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx); + + arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); + arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst >> src */ -static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; +static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; + s8 rt; + /* Setup Operands */ - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; - - if (sstk) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do RSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); - emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx); - if (dstk) { - emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_LR), ctx); - emit(ARM_MOV_R(rm, ARM_IP), ctx); - } + emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx); + + arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); + arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst << val */ -static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, - const u32 val, struct jit_ctx *ctx){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup operands */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; +static inline void emit_a32_lsh_i64(const s8 dst[], + const u32 val, struct jit_ctx *ctx){ + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + /* Setup operands */ + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSH operation */ if (val < 32) { - emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx); - emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx); - emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx); + emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx); + emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx); + emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx); } else { if (val == 32) - emit(ARM_MOV_R(rm, rd), ctx); + emit(ARM_MOV_R(rd[0], rd[1]), ctx); else - emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx); - emit(ARM_EOR_R(rd, rd, rd), ctx); + emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx); + emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx); } - if (dstk) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst >> val */ -static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, +static inline void emit_a32_rsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup operands */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + /* Setup operands */ + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSR operation */ if (val < 32) { - emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); - emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx); + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); } else if (val == 32) { - emit(ARM_MOV_R(rd, rm), ctx); - emit(ARM_MOV_I(rm, 0), ctx); + emit(ARM_MOV_R(rd[1], rd[0]), ctx); + emit(ARM_MOV_I(rd[0], 0), ctx); } else { - emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx); - emit(ARM_MOV_I(rm, 0), ctx); + emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx); + emit(ARM_MOV_I(rd[0], 0), ctx); } - if (dstk) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst >> val (signed) */ -static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk, +static inline void emit_a32_arsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup operands */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; - - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; + + /* Setup operands */ + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do ARSH operation */ if (val < 32) { - emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); - emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx); + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); } else if (val == 32) { - emit(ARM_MOV_R(rd, rm), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + emit(ARM_MOV_R(rd[1], rd[0]), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); } else { - emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); } - if (dstk) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg64(dst, rd, ctx); } -static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; +static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd, *rt; + /* Setup operands for multiplication */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rn = sstk ? tmp2[0] : src_hi; - - if (dstk) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } - if (sstk) { - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx); - } + rd = arm_bpf_get_reg64(dst, tmp, ctx); + rt = arm_bpf_get_reg64(src, tmp2, ctx); /* Do Multiplication */ - emit(ARM_MUL(ARM_IP, rd, rn), ctx); - emit(ARM_MUL(ARM_LR, rm, rt), ctx); + emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx); + emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx); emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); - emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); - emit(ARM_ADD_R(rm, ARM_LR, rm), ctx); - if (dstk) { - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_IP), ctx); - } + emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx); + emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx); + + arm_bpf_put_reg32(dst_lo, ARM_IP, ctx); + arm_bpf_put_reg32(dst_hi, rd[0], ctx); } /* *(size *)(dst + off) = src */ -static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, - const s32 off, struct jit_ctx *ctx, const u8 sz){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[1] : dst; - - if (dstk) - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); - if (off) { - emit_a32_mov_i(tmp[0], off, false, ctx); - emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx); +static inline void emit_str_r(const s8 dst, const s8 src[], + s32 off, struct jit_ctx *ctx, const u8 sz){ + const s8 *tmp = bpf2a32[TMP_REG_1]; + s32 off_max; + s8 rd; + + rd = arm_bpf_get_reg32(dst, tmp[1], ctx); + + if (sz == BPF_H) + off_max = 0xff; + else + off_max = 0xfff; + + if (off < 0 || off > off_max) { + emit_a32_mov_i(tmp[0], off, ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); rd = tmp[0]; + off = 0; } switch (sz) { - case BPF_W: - /* Store a Word */ - emit(ARM_STR_I(src, rd, 0), ctx); + case BPF_B: + /* Store a Byte */ + emit(ARM_STRB_I(src_lo, rd, off), ctx); break; case BPF_H: /* Store a HalfWord */ - emit(ARM_STRH_I(src, rd, 0), ctx); + emit(ARM_STRH_I(src_lo, rd, off), ctx); break; - case BPF_B: - /* Store a Byte */ - emit(ARM_STRB_I(src, rd, 0), ctx); + case BPF_W: + /* Store a Word */ + emit(ARM_STR_I(src_lo, rd, off), ctx); + break; + case BPF_DW: + /* Store a Double Word */ + emit(ARM_STR_I(src_lo, rd, off), ctx); + emit(ARM_STR_I(src_hi, rd, off + 4), ctx); break; } } /* dst = *(size*)(src + off) */ -static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, +static inline void emit_ldx_r(const s8 dst[], const s8 src, s32 off, struct jit_ctx *ctx, const u8 sz){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *rd = dstk ? tmp : dst; - u8 rm = src; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *rd = is_stacked(dst_lo) ? tmp : dst; + s8 rm = src; s32 off_max; if (sz == BPF_H) @@ -909,7 +1048,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, off_max = 0xfff; if (off < 0 || off > off_max) { - emit_a32_mov_i(tmp[0], off, false, ctx); + emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; off = 0; @@ -921,17 +1060,17 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, case BPF_B: /* Load a Byte */ emit(ARM_LDRB_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, dstk, ctx); + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_H: /* Load a HalfWord */ emit(ARM_LDRH_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, dstk, ctx); + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_W: /* Load a Word */ emit(ARM_LDR_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, dstk, ctx); + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_DW: /* Load a Double Word */ @@ -939,10 +1078,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } - if (dstk) - emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx); - if (dstk && sz == BPF_DW) - emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx); + arm_bpf_put_reg64(dst, rd, ctx); } /* Arithmatic Operation */ @@ -981,64 +1117,66 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) { /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ - const u8 *r2 = bpf2a32[BPF_REG_2]; - const u8 *r3 = bpf2a32[BPF_REG_3]; - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; - const u8 *tcc = bpf2a32[TCALL_CNT]; + const s8 *r2 = bpf2a32[BPF_REG_2]; + const s8 *r3 = bpf2a32[BPF_REG_3]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tcc = bpf2a32[TCALL_CNT]; + const s8 *tc; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) #define jmp_offset (out_offset - (cur_offset) - 2) - u32 off, lo, hi; + u32 lo, hi; + s8 r_array, r_index; + int off; /* if (index >= array->map.max_entries) * goto out; */ + BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) > + ARM_INST_LDST__IMM12); off = offsetof(struct bpf_array, map.max_entries); - /* array->map.max_entries */ - emit_a32_mov_i(tmp[1], off, false, ctx); - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); - emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); + r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx); /* index is 32-bit for arrays */ - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); + r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); + /* array->map.max_entries */ + emit(ARM_LDR_I(tmp[1], r_array, off), ctx); /* index >= array->map.max_entries */ - emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); + emit(ARM_CMP_R(r_index, tmp[1]), ctx); _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); + /* tmp2[0] = array, tmp2[1] = index */ + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; * tail_call_cnt++; */ lo = (u32)MAX_TAIL_CALL_CNT; hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); - emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); - emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); - emit(ARM_CMP_I(tmp[0], hi), ctx); - _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx); + tc = arm_bpf_get_reg64(tcc, tmp, ctx); + emit(ARM_CMP_I(tc[0], hi), ctx); + _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx); _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); - emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx); - emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx); - emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); - emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); + emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx); + emit(ARM_ADC_I(tc[0], tc[0], 0), ctx); + arm_bpf_put_reg64(tcc, tmp, ctx); /* prog = array->ptrs[index] * if (prog == NULL) * goto out; */ - off = offsetof(struct bpf_array, ptrs); - emit_a32_mov_i(tmp[1], off, false, ctx); - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); - emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx); - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); - emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx); - emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx); + BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0); + off = imm8m(offsetof(struct bpf_array, ptrs)); + emit(ARM_ADD_I(tmp[1], r_array, off), ctx); + emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx); emit(ARM_CMP_I(tmp[1], 0), ctx); _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); /* goto *(prog->bpf_func + prologue_size); */ + BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) > + ARM_INST_LDST__IMM12); off = offsetof(struct bpf_prog, bpf_func); - emit_a32_mov_i(tmp2[1], off, false, ctx); - emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); + emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx); emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); emit_bx_r(tmp[1], ctx); @@ -1059,7 +1197,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 6 - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx); @@ -1074,7 +1212,7 @@ static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 6 - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx); @@ -1094,28 +1232,27 @@ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) } // push the scratch stack register on top of the stack -static inline void emit_push_r64(const u8 src[], const u8 shift, - struct jit_ctx *ctx) +static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx) { - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rt; u16 reg_set = 0; - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx); - emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx); + rt = arm_bpf_get_reg64(src, tmp2, ctx); - reg_set = (1 << tmp2[1]) | (1 << tmp2[0]); + reg_set = (1 << rt[1]) | (1 << rt[0]); emit(ARM_PUSH(reg_set), ctx); } static void build_prologue(struct jit_ctx *ctx) { - const u8 r0 = bpf2a32[BPF_REG_0][1]; - const u8 r2 = bpf2a32[BPF_REG_1][1]; - const u8 r3 = bpf2a32[BPF_REG_1][0]; - const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 fplo = bpf2a32[BPF_REG_FP][1]; - const u8 fphi = bpf2a32[BPF_REG_FP][0]; - const u8 *tcc = bpf2a32[TCALL_CNT]; + const s8 r0 = bpf2a32[BPF_REG_0][1]; + const s8 r2 = bpf2a32[BPF_REG_1][1]; + const s8 r3 = bpf2a32[BPF_REG_1][0]; + const s8 r4 = bpf2a32[BPF_REG_6][1]; + const s8 fplo = bpf2a32[BPF_REG_FP][1]; + const s8 fphi = bpf2a32[BPF_REG_FP][0]; + const s8 *tcc = bpf2a32[TCALL_CNT]; /* Save callee saved registers. */ #ifdef CONFIG_FRAME_POINTER @@ -1136,8 +1273,8 @@ static void build_prologue(struct jit_ctx *ctx) emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); /* Set up BPF prog stack base register */ - emit_a32_mov_r(fplo, ARM_IP, true, false, ctx); - emit_a32_mov_i(fphi, 0, true, ctx); + emit_a32_mov_r(fplo, ARM_IP, ctx); + emit_a32_mov_i(fphi, 0, ctx); /* mov r4, 0 */ emit(ARM_MOV_I(r4, 0), ctx); @@ -1146,8 +1283,8 @@ static void build_prologue(struct jit_ctx *ctx) emit(ARM_MOV_R(r3, r4), ctx); emit(ARM_MOV_R(r2, r0), ctx); /* Initialize Tail Count */ - emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx); - emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx); + emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx); + emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx); /* end of prologue */ } @@ -1178,17 +1315,16 @@ static void build_epilogue(struct jit_ctx *ctx) static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) { const u8 code = insn->code; - const u8 *dst = bpf2a32[insn->dst_reg]; - const u8 *src = bpf2a32[insn->src_reg]; - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *dst = bpf2a32[insn->dst_reg]; + const s8 *src = bpf2a32[insn->src_reg]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; - const bool dstk = is_on_stack(insn->dst_reg); - const bool sstk = is_on_stack(insn->src_reg); - u8 rd, rt, rm, rn; + const s8 *rd, *rs; + s8 rd_lo, rt, rm, rn; s32 jmp_offset; #define check_imm(bits, imm) do { \ @@ -1211,11 +1347,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_MOV | BPF_X: switch (BPF_SRC(code)) { case BPF_X: - emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx); + emit_a32_mov_r64(is64, dst, src, ctx); break; case BPF_K: /* Sign-extend immediate value to destination reg */ - emit_a32_mov_i64(is64, dst, imm, dstk, ctx); + emit_a32_mov_se_i64(is64, dst, imm, ctx); break; } break; @@ -1255,8 +1391,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_XOR | BPF_X: switch (BPF_SRC(code)) { case BPF_X: - emit_a32_alu_r64(is64, dst, src, dstk, sstk, - ctx, BPF_OP(code)); + emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code)); break; case BPF_K: /* Move immediate value to the temporary register @@ -1265,9 +1400,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) * value into temporary reg and then it would be * safe to do the operation on it. */ - emit_a32_mov_i64(is64, tmp2, imm, false, ctx); - emit_a32_alu_r64(is64, dst, tmp2, dstk, false, - ctx, BPF_OP(code)); + emit_a32_mov_se_i64(is64, tmp2, imm, ctx); + emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code)); break; } break; @@ -1277,26 +1411,22 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: - rt = src_lo; - rd = dstk ? tmp2[1] : dst_lo; - if (dstk) - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx); switch (BPF_SRC(code)) { case BPF_X: - rt = sstk ? tmp2[0] : rt; - if (sstk) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), - ctx); + rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx); break; case BPF_K: rt = tmp2[0]; - emit_a32_mov_i(rt, imm, false, ctx); + emit_a32_mov_i(rt, imm, ctx); + break; + default: + rt = src_lo; break; } - emit_udivmod(rd, rd, rt, ctx, BPF_OP(code)); - if (dstk) - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code)); + arm_bpf_put_reg32(dst_lo, rd_lo, ctx); + emit_a32_mov_i(dst_hi, 0, ctx); break; case BPF_ALU64 | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_X: @@ -1310,54 +1440,54 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) if (unlikely(imm > 31)) return -EINVAL; if (imm) - emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = dst << imm */ case BPF_ALU64 | BPF_LSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_lsh_i64(dst, dstk, imm, ctx); + emit_a32_lsh_i64(dst, imm, ctx); break; /* dst = dst >> imm */ case BPF_ALU64 | BPF_RSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_rsh_i64(dst, dstk, imm, ctx); + emit_a32_rsh_i64(dst, imm, ctx); break; /* dst = dst << src */ case BPF_ALU64 | BPF_LSH | BPF_X: - emit_a32_lsh_r64(dst, src, dstk, sstk, ctx); + emit_a32_lsh_r64(dst, src, ctx); break; /* dst = dst >> src */ case BPF_ALU64 | BPF_RSH | BPF_X: - emit_a32_rsh_r64(dst, src, dstk, sstk, ctx); + emit_a32_rsh_r64(dst, src, ctx); break; /* dst = dst >> src (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_X: - emit_a32_arsh_r64(dst, src, dstk, sstk, ctx); + emit_a32_arsh_r64(dst, src, ctx); break; /* dst = dst >> imm (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_arsh_i64(dst, dstk, imm, ctx); + emit_a32_arsh_i64(dst, imm, ctx); break; /* dst = ~dst */ case BPF_ALU | BPF_NEG: - emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = ~dst (64 bit) */ case BPF_ALU64 | BPF_NEG: - emit_a32_neg64(dst, dstk, ctx); + emit_a32_neg64(dst, ctx); break; /* dst = dst * src/imm */ case BPF_ALU64 | BPF_MUL | BPF_X: case BPF_ALU64 | BPF_MUL | BPF_K: switch (BPF_SRC(code)) { case BPF_X: - emit_a32_mul_r64(dst, src, dstk, sstk, ctx); + emit_a32_mul_r64(dst, src, ctx); break; case BPF_K: /* Move immediate value to the temporary register @@ -1366,8 +1496,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) * reg then it would be safe to do the operation * on it. */ - emit_a32_mov_i64(is64, tmp2, imm, false, ctx); - emit_a32_mul_r64(dst, tmp2, dstk, false, ctx); + emit_a32_mov_se_i64(is64, tmp2, imm, ctx); + emit_a32_mul_r64(dst, tmp2, ctx); break; } break; @@ -1375,25 +1505,20 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = htobe(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: - rd = dstk ? tmp[0] : dst_hi; - rt = dstk ? tmp[1] : dst_lo; - if (dstk) { - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rd = arm_bpf_get_reg64(dst, tmp, ctx); if (BPF_SRC(code) == BPF_FROM_LE) goto emit_bswap_uxt; switch (imm) { case 16: - emit_rev16(rt, rt, ctx); + emit_rev16(rd[1], rd[1], ctx); goto emit_bswap_uxt; case 32: - emit_rev32(rt, rt, ctx); + emit_rev32(rd[1], rd[1], ctx); goto emit_bswap_uxt; case 64: - emit_rev32(ARM_LR, rt, ctx); - emit_rev32(rt, rd, ctx); - emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit_rev32(ARM_LR, rd[1], ctx); + emit_rev32(rd[1], rd[0], ctx); + emit(ARM_MOV_R(rd[0], ARM_LR), ctx); break; } goto exit; @@ -1402,36 +1527,30 @@ emit_bswap_uxt: case 16: /* zero-extend 16 bits into 64 bits */ #if __LINUX_ARM_ARCH__ < 6 - emit_a32_mov_i(tmp2[1], 0xffff, false, ctx); - emit(ARM_AND_R(rt, rt, tmp2[1]), ctx); + emit_a32_mov_i(tmp2[1], 0xffff, ctx); + emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx); #else /* ARMv6+ */ - emit(ARM_UXTH(rt, rt), ctx); + emit(ARM_UXTH(rd[1], rd[1]), ctx); #endif - emit(ARM_EOR_R(rd, rd, rd), ctx); + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 32: /* zero-extend 32 bits into 64 bits */ - emit(ARM_EOR_R(rd, rd, rd), ctx); + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 64: /* nop */ break; } exit: - if (dstk) { - emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg64(dst, rd, ctx); break; /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: { - const struct bpf_insn insn1 = insn[1]; - u32 hi, lo = imm; + u64 val = (u32)imm | (u64)insn[1].imm << 32; - hi = insn1.imm; - emit_a32_mov_i(dst_lo, lo, dstk, ctx); - emit_a32_mov_i(dst_hi, hi, dstk, ctx); + emit_a32_mov_i64(dst, val, ctx); return 1; } @@ -1440,10 +1559,8 @@ exit: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: - rn = sstk ? tmp2[1] : src_lo; - if (sstk) - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code)); + rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: @@ -1453,18 +1570,15 @@ exit: switch (BPF_SIZE(code)) { case BPF_DW: /* Sign-extend immediate value into temp reg */ - emit_a32_mov_i64(true, tmp2, imm, false, ctx); - emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W); - emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W); + emit_a32_mov_se_i64(true, tmp2, imm, ctx); break; case BPF_W: case BPF_H: case BPF_B: - emit_a32_mov_i(tmp2[1], imm, false, ctx); - emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, - BPF_SIZE(code)); + emit_a32_mov_i(tmp2[1], imm, ctx); break; } + emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code)); break; /* STX XADD: lock *(u32 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_W: @@ -1476,25 +1590,9 @@ exit: case BPF_STX | BPF_MEM | BPF_H: case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_DW: - { - u8 sz = BPF_SIZE(code); - - rn = sstk ? tmp2[1] : src_lo; - rm = sstk ? tmp2[0] : src_hi; - if (sstk) { - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); - } - - /* Store the value */ - if (BPF_SIZE(code) == BPF_DW) { - emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W); - emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W); - } else { - emit_str_r(dst_lo, rn, dstk, off, ctx, sz); - } + rs = arm_bpf_get_reg64(src, tmp2, ctx); + emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code)); break; - } /* PC += off if dst == src */ /* PC += off if dst > src */ /* PC += off if dst >= src */ @@ -1518,12 +1616,8 @@ exit: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: /* Setup source registers */ - rm = sstk ? tmp2[0] : src_hi; - rn = sstk ? tmp2[1] : src_lo; - if (sstk) { - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); - } + rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); + rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); goto go_jmp; /* PC += off if dst == imm */ /* PC += off if dst > imm */ @@ -1552,18 +1646,13 @@ exit: rm = tmp2[0]; rn = tmp2[1]; /* Sign-extend immediate value */ - emit_a32_mov_i64(true, tmp2, imm, false, ctx); + emit_a32_mov_se_i64(true, tmp2, imm, ctx); go_jmp: /* Setup destination register */ - rd = dstk ? tmp[0] : dst_hi; - rt = dstk ? tmp[1] : dst_lo; - if (dstk) { - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Check for the condition */ - emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code)); + emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code)); /* Setup JUMP instruction */ jmp_offset = bpf2a32_offset(i+off, i, ctx); @@ -1619,21 +1708,21 @@ go_jmp: /* function call */ case BPF_JMP | BPF_CALL: { - const u8 *r0 = bpf2a32[BPF_REG_0]; - const u8 *r1 = bpf2a32[BPF_REG_1]; - const u8 *r2 = bpf2a32[BPF_REG_2]; - const u8 *r3 = bpf2a32[BPF_REG_3]; - const u8 *r4 = bpf2a32[BPF_REG_4]; - const u8 *r5 = bpf2a32[BPF_REG_5]; + const s8 *r0 = bpf2a32[BPF_REG_0]; + const s8 *r1 = bpf2a32[BPF_REG_1]; + const s8 *r2 = bpf2a32[BPF_REG_2]; + const s8 *r3 = bpf2a32[BPF_REG_3]; + const s8 *r4 = bpf2a32[BPF_REG_4]; + const s8 *r5 = bpf2a32[BPF_REG_5]; const u32 func = (u32)__bpf_call_base + (u32)imm; - emit_a32_mov_r64(true, r0, r1, false, false, ctx); - emit_a32_mov_r64(true, r1, r2, false, true, ctx); - emit_push_r64(r5, 0, ctx); - emit_push_r64(r4, 8, ctx); - emit_push_r64(r3, 16, ctx); + emit_a32_mov_r64(true, r0, r1, ctx); + emit_a32_mov_r64(true, r1, r2, ctx); + emit_push_r64(r5, ctx); + emit_push_r64(r4, ctx); + emit_push_r64(r3, ctx); - emit_a32_mov_i(tmp[1], func, false, ctx); + emit_a32_mov_i(tmp[1], func, ctx); emit_blx_r(tmp[1], ctx); emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean @@ -1745,6 +1834,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; + ctx.cpu_architecture = cpu_architecture(); /* Not able to allocate memory for offsets[] , then * we must fall back to the interpreter diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index d5cf5f6208aa..f4e58bcdaa43 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -77,11 +77,14 @@ #define ARM_INST_EOR_R 0x00200000 #define ARM_INST_EOR_I 0x02200000 -#define ARM_INST_LDRB_I 0x05d00000 +#define ARM_INST_LDST__U 0x00800000 +#define ARM_INST_LDST__IMM12 0x00000fff +#define ARM_INST_LDRB_I 0x05500000 #define ARM_INST_LDRB_R 0x07d00000 -#define ARM_INST_LDRH_I 0x01d000b0 +#define ARM_INST_LDRD_I 0x014000d0 +#define ARM_INST_LDRH_I 0x015000b0 #define ARM_INST_LDRH_R 0x019000b0 -#define ARM_INST_LDR_I 0x05900000 +#define ARM_INST_LDR_I 0x05100000 #define ARM_INST_LDR_R 0x07900000 #define ARM_INST_LDM 0x08900000 @@ -124,9 +127,10 @@ #define ARM_INST_SBC_R 0x00c00000 #define ARM_INST_SBCS_R 0x00d00000 -#define ARM_INST_STR_I 0x05800000 -#define ARM_INST_STRB_I 0x05c00000 -#define ARM_INST_STRH_I 0x01c000b0 +#define ARM_INST_STR_I 0x05000000 +#define ARM_INST_STRB_I 0x05400000 +#define ARM_INST_STRD_I 0x014000f0 +#define ARM_INST_STRH_I 0x014000b0 #define ARM_INST_TST_R 0x01100000 #define ARM_INST_TST_I 0x03100000 @@ -183,17 +187,18 @@ #define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm) #define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm) -#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ - | ((off) & 0xfff)) -#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \ +#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ | (rm)) -#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ - | (off)) -#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ +#define ARM_LDR_R_SI(rt, rn, rm, type, imm) \ + (ARM_INST_LDR_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ + | (imm) << 7 | (type) << 5 | (rm)) +#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ | (rm)) -#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \ - | (((off) & 0xf0) << 4) | ((off) & 0xf)) -#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \ +#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ | (rm)) #define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) @@ -254,13 +259,6 @@ #define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm) #define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm) -#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ - | ((off) & 0xfff)) -#define ARM_STRH_I(rt, rn, off) (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \ - | (((off) & 0xf0) << 4) | ((off) & 0xf)) -#define ARM_STRB_I(rt, rn, off) (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \ - | (((off) & 0xf0) << 4) | ((off) & 0xf)) - #define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) #define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi index 4dd06767f839..a56a408e9bf7 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi @@ -11,13 +11,14 @@ fman0: fman@1a00000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0x0 0x0 0x1a00000 0x100000>; - reg = <0x0 0x1a00000 0x0 0x100000>; + ranges = <0x0 0x0 0x1a00000 0xfe000>; + reg = <0x0 0x1a00000 0x0 0xfe000>; interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x800 0x10>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -73,9 +74,10 @@ fman0: fman@1a00000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; +}; - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +ptp_timer0: ptp-timer@1afe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x0 0x1afe000 0x0 0x1000>; + interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; }; diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 3efba40adc54..c872c4e6bafb 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -114,4 +114,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 49c3d4795963..71370fb3ceef 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -123,4 +123,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 1d0fdc3b5d22..061b9cf2a779 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -104,4 +104,7 @@ #define SO_ZEROCOPY 0x4035 +#define SO_TXTIME 0x4036 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi index abd01d466de4..6b124f73f67a 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi @@ -37,12 +37,13 @@ fman0: fman@400000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0 0x400000 0x100000>; - reg = <0x400000 0x100000>; + ranges = <0 0x400000 0xfe000>; + reg = <0x400000 0xfe000>; interrupts = <96 2 0 0>, <16 2 1 1>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x40 0xc>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -93,9 +94,10 @@ fman0: fman@400000 { reg = <0x87000 0x1000>; status = "disabled"; }; +}; - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +ptp_timer0: ptp-timer@4fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x4fe000 0x1000>; + interrupts = <96 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi index debea75fd3f0..b80aaf5f00a1 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi @@ -37,12 +37,13 @@ fman1: fman@500000 { #size-cells = <1>; cell-index = <1>; compatible = "fsl,fman"; - ranges = <0 0x500000 0x100000>; - reg = <0x500000 0x100000>; + ranges = <0 0x500000 0xfe000>; + reg = <0x500000 0xfe000>; interrupts = <97 2 0 0>, <16 2 1 0>; clocks = <&clockgen 3 1>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x60 0xc>; + ptimer-handle = <&ptp_timer1>; muram@0 { compatible = "fsl,fman-muram"; @@ -93,9 +94,10 @@ fman1: fman@500000 { reg = <0x87000 0x1000>; status = "disabled"; }; +}; - ptp_timer1: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +ptp_timer1: ptp-timer@5fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x5fe000 0x1000>; + interrupts = <97 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi index 3a20e0d1a6d2..d3720fdde26c 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi @@ -37,12 +37,13 @@ fman0: fman@400000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0 0x400000 0x100000>; - reg = <0x400000 0x100000>; + ranges = <0 0x400000 0xfe000>; + reg = <0x400000 0xfe000>; interrupts = <96 2 0 0>, <16 2 1 1>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x800 0x10>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -98,9 +99,10 @@ fman0: fman@400000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; +}; - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +ptp_timer0: ptp-timer@4fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x4fe000 0x1000>; + interrupts = <96 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi index 82750ac944c7..ae34c204a5bc 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi @@ -37,12 +37,13 @@ fman1: fman@500000 { #size-cells = <1>; cell-index = <1>; compatible = "fsl,fman"; - ranges = <0 0x500000 0x100000>; - reg = <0x500000 0x100000>; + ranges = <0 0x500000 0xfe000>; + reg = <0x500000 0xfe000>; interrupts = <97 2 0 0>, <16 2 1 0>; clocks = <&clockgen 3 1>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x820 0x10>; + ptimer-handle = <&ptp_timer1>; muram@0 { compatible = "fsl,fman-muram"; @@ -98,9 +99,10 @@ fman1: fman@500000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; +}; - ptp_timer1: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +ptp_timer1: ptp-timer@5fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x5fe000 0x1000>; + interrupts = <97 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi index 7f60b6060176..02f2755842cc 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi @@ -37,12 +37,13 @@ fman0: fman@400000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0 0x400000 0x100000>; - reg = <0x400000 0x100000>; + ranges = <0 0x400000 0xfe000>; + reg = <0x400000 0xfe000>; interrupts = <96 2 0 0>, <16 2 1 1>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x800 0x10>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -86,9 +87,10 @@ fman0: fman@400000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; +}; - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +ptp_timer0: ptp-timer@4fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x4fe000 0x1000>; + interrupts = <96 2 0 0>; }; diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 3510c0fd06f4..39d901476ee5 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -111,4 +111,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index d58520c2e6ff..7ea35e5601b6 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -101,6 +101,9 @@ #define SO_ZEROCOPY 0x003e +#define SO_TXTIME 0x003f +#define SCM_TXTIME SO_TXTIME + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 75a07b8119a9..1de07a7f7680 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -116,4 +116,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _XTENSA_SOCKET_H */ diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 2c288d1f42bb..e89146ddede6 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1385,14 +1385,12 @@ static void zatm_close(struct atm_vcc *vcc) static int zatm_open(struct atm_vcc *vcc) { - struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; short vpi = vcc->vpi; int vci = vcc->vci; int error; DPRINTK(">zatm_open\n"); - zatm_dev = ZATM_DEV(vcc->dev); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 74a05561b620..e07401d3901d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1571,7 +1571,7 @@ static int find_free_cb(int id, void *ptr, void *data) } /* Netlink interface. */ -static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { +static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { [NBD_ATTR_INDEX] = { .type = NLA_U32 }, [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 }, [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 }, @@ -1583,14 +1583,14 @@ static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED}, }; -static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { +static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { [NBD_SOCK_FD] = { .type = NLA_U32 }, }; /* We don't use this right now since we don't parse the incoming list, but we * still want it here so userspace knows what to expect. */ -static struct nla_policy __attribute__((unused)) +static const struct nla_policy __attribute__((unused)) nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = { [NBD_DEVICE_INDEX] = { .type = NLA_U32 }, [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 }, diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index e718b8c69a56..eeb7d31cbda5 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -19,6 +19,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/compiler.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> @@ -239,7 +240,7 @@ void cn_del_callback(struct cb_id *id) } EXPORT_SYMBOL_GPL(cn_del_callback); -static int cn_proc_show(struct seq_file *m, void *v) +static int __maybe_unused cn_proc_show(struct seq_file *m, void *v) { struct cn_queue_dev *dev = cdev.cbdev; struct cn_callback_entry *cbq; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 2bb6f0380758..0997e166ea57 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1673,7 +1673,7 @@ static void chtls_timewait(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); tp->rcv_nxt++; - tp->rx_opt.ts_recent_stamp = get_seconds(); + tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); tp->srtt_us = 0; tcp_time_wait(sk, TCP_TIME_WAIT, 0); } diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index e88c01961948..33d51281272b 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -394,12 +394,16 @@ static const char * const hwmon_power_attr_templates[] = { [hwmon_power_cap_hyst] = "power%d_cap_hyst", [hwmon_power_cap_max] = "power%d_cap_max", [hwmon_power_cap_min] = "power%d_cap_min", + [hwmon_power_min] = "power%d_min", [hwmon_power_max] = "power%d_max", + [hwmon_power_lcrit] = "power%d_lcrit", [hwmon_power_crit] = "power%d_crit", [hwmon_power_label] = "power%d_label", [hwmon_power_alarm] = "power%d_alarm", [hwmon_power_cap_alarm] = "power%d_cap_alarm", + [hwmon_power_min_alarm] = "power%d_min_alarm", [hwmon_power_max_alarm] = "power%d_max_alarm", + [hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm", [hwmon_power_crit_alarm] = "power%d_crit_alarm", }; diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index 5d65582fe4d9..616fc9b6fad8 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -423,7 +423,7 @@ tx_finish: static u16 hfi1_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c index 0c8aec62a425..61558788b3fa 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb, } static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); @@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, mdata->entropy = opa_vnic_calc_entropy(skb); mdata->vl = opa_vnic_get_vl(adapter, skb); rc = adapter->rn_ops->ndo_select_queue(netdev, skb, - accel_priv, fallback); + sb_dev, fallback); skb_pull(skb, sizeof(*mdata)); return rc; } diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 6e0c2814d032..ef5560b848ab 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -9,6 +9,7 @@ * */ +#include <linux/compiler.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -1321,7 +1322,7 @@ static inline void capinc_tty_exit(void) { } * /proc/capi/capi20: * minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt */ -static int capi20_proc_show(struct seq_file *m, void *v) +static int __maybe_unused capi20_proc_show(struct seq_file *m, void *v) { struct capidev *cdev; struct list_head *l; @@ -1344,7 +1345,7 @@ static int capi20_proc_show(struct seq_file *m, void *v) * /proc/capi/capi20ncci: * applid ncci */ -static int capi20ncci_proc_show(struct seq_file *m, void *v) +static int __maybe_unused capi20ncci_proc_show(struct seq_file *m, void *v) { struct capidev *cdev; struct capincci *np; diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index ee510f901720..e8949f3dcae1 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -9,6 +9,7 @@ * */ +#include <linux/compiler.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -2451,7 +2452,7 @@ lower_callback(struct notifier_block *nb, unsigned long val, void *v) * /proc/capi/capidrv: * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt */ -static int capidrv_proc_show(struct seq_file *m, void *v) +static int __maybe_unused capidrv_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%lu %lu %lu %lu\n", global.ap.nrecvctlpkt, diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 20d0a080a2b0..ecdeb89645d0 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -739,6 +739,7 @@ static void read_int_callback(struct urb *urb) case HD_OPEN_B2CHANNEL_ACK: ++channel; + /* fall through */ case HD_OPEN_B1CHANNEL_ACK: bcs = cs->bcs + channel; update_basstate(ucs, BS_B1OPEN << channel, 0); @@ -752,6 +753,7 @@ static void read_int_callback(struct urb *urb) case HD_CLOSE_B2CHANNEL_ACK: ++channel; + /* fall through */ case HD_CLOSE_B1CHANNEL_ACK: bcs = cs->bcs + channel; update_basstate(ucs, 0, BS_B1OPEN << channel); @@ -765,6 +767,7 @@ static void read_int_callback(struct urb *urb) case HD_B2_FLOW_CONTROL: ++channel; + /* fall through */ case HD_B1_FLOW_CONTROL: bcs = cs->bcs + channel; atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES, @@ -972,16 +975,14 @@ static int starturbs(struct bc_state *bcs) rc = -EFAULT; goto error; } + usb_fill_int_urb(urb, bcs->cs->hw.bas->udev, + usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel), + ubc->isoinbuf + k * BAS_INBUFSIZE, + BAS_INBUFSIZE, read_iso_callback, bcs, + BAS_FRAMETIME); - urb->dev = bcs->cs->hw.bas->udev; - urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel); urb->transfer_flags = URB_ISO_ASAP; - urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE; - urb->transfer_buffer_length = BAS_INBUFSIZE; urb->number_of_packets = BAS_NUMFRAMES; - urb->interval = BAS_FRAMETIME; - urb->complete = read_iso_callback; - urb->context = bcs; for (j = 0; j < BAS_NUMFRAMES; j++) { urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME; urb->iso_frame_desc[j].length = BAS_MAXFRAME; @@ -1005,15 +1006,15 @@ static int starturbs(struct bc_state *bcs) rc = -EFAULT; goto error; } - urb->dev = bcs->cs->hw.bas->udev; - urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel); + usb_fill_int_urb(urb, bcs->cs->hw.bas->udev, + usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel), + ubc->isooutbuf->data, + sizeof(ubc->isooutbuf->data), + write_iso_callback, &ubc->isoouturbs[k], + BAS_FRAMETIME); + urb->transfer_flags = URB_ISO_ASAP; - urb->transfer_buffer = ubc->isooutbuf->data; - urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data); urb->number_of_packets = BAS_NUMFRAMES; - urb->interval = BAS_FRAMETIME; - urb->complete = write_iso_callback; - urb->context = &ubc->isoouturbs[k]; for (j = 0; j < BAS_NUMFRAMES; ++j) { urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE; urb->iso_frame_desc[j].length = BAS_NORMFRAME; diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index ae2b2669af1b..8eb28a83832e 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -361,6 +361,7 @@ modehdlc(struct bchannel *bch, int protocol) switch (protocol) { case -1: /* used for init */ bch->state = -1; + /* fall through */ case ISDN_P_NONE: if (bch->state == ISDN_P_NONE) break; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 34c93874af23..72a271b98873 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1296,6 +1296,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) case (-1): /* used for init */ bch->state = -1; bch->nr = bc; + /* fall through */ case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 17cc879ad2bb..6d05946b445e 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -819,6 +819,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, int fifon = fifo->fifonum; int i; int hdlc = 0; + unsigned long flags; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) " @@ -835,7 +836,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, return; } - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->dch) { rx_skb = fifo->dch->rx_skb; maxlen = fifo->dch->maxlen; @@ -844,7 +845,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, if (fifo->bch) { if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) { fifo->bch->dropcnt += len; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } maxlen = bchannel_get_rxbuf(fifo->bch, len); @@ -854,7 +855,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, skb_trim(rx_skb, 0); pr_warning("%s.B%d: No bufferspace for %d bytes\n", hw->name, fifo->bch->nr, len); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } maxlen = fifo->bch->maxlen; @@ -878,7 +879,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, } else { printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n", hw->name, __func__); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } } @@ -888,7 +889,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, "for fifo(%d) HFCUSB_D_RX\n", hw->name, __func__, fifon); skb_trim(rx_skb, 0); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } } @@ -942,7 +943,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, /* deliver transparent data to layer2 */ recv_Bchannel(fifo->bch, MISDN_ID_ANY, false); } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } static void @@ -979,18 +980,19 @@ rx_iso_complete(struct urb *urb) __u8 *buf; static __u8 eof[8]; __u8 s0_state; + unsigned long flags; fifon = fifo->fifonum; status = urb->status; - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); /* * ISO transfer only partially completed, @@ -1096,15 +1098,16 @@ rx_int_complete(struct urb *urb) struct usb_fifo *fifo = (struct usb_fifo *) urb->context; struct hfcsusb *hw = fifo->hw; static __u8 eof[8]; + unsigned long flags; - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); fifon = fifo->fifonum; if ((!fifo->active) || (urb->status)) { @@ -1172,12 +1175,13 @@ tx_iso_complete(struct urb *urb) int *tx_idx; int frame_complete, fifon, status, fillempty = 0; __u8 threshbit, *p; + unsigned long flags; - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } @@ -1195,7 +1199,7 @@ tx_iso_complete(struct urb *urb) } else { printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n", hw->name, __func__); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } @@ -1375,7 +1379,7 @@ tx_iso_complete(struct urb *urb) hw->name, __func__, symbolic(urb_errlist, status), status, fifon); } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index 1fc290659e94..3e01012be4ab 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -887,6 +887,7 @@ release_card(struct inf_hw *card) { release_card(card->sc[i]); card->sc[i] = NULL; } + /* fall through */ default: pci_disable_device(card->pdev); pci_set_drvdata(card->pdev, NULL); diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index b791688d0228..386731ec2489 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -972,6 +972,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) { break; case PCTRL_CMD_FTM: p1 = 2; + /* fall through */ case PCTRL_CMD_FTH: send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_SILON, 1, &p1); @@ -1177,6 +1178,7 @@ setup_pump(struct isar_ch *ch) { send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, PMOD_DTMF, 1, param); } + /* fall through */ case ISDN_P_B_MODEM_ASYNC: ctrl = PMOD_DATAMODEM; if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) { @@ -1268,6 +1270,7 @@ setup_iom2(struct isar_ch *ch) { case ISDN_P_B_MODEM_ASYNC: case ISDN_P_B_T30_FAX: cmsb |= IOM_CTRL_RCV; + /* fall through */ case ISDN_P_B_L2DTMF: if (test_bit(FLG_DTMFSEND, &ch->bch.Flags)) cmsb |= IOM_CTRL_RCV; @@ -1560,6 +1563,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) ich->is->name, hh->id); ret = -EINVAL; } + /* fall through */ default: pr_info("%s: %s unknown prim(%x,%x)\n", ich->is->name, __func__, hh->prim, hh->id); diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c index a18b605fb4f2..b161456c942e 100644 --- a/drivers/isdn/hisax/avm_pci.c +++ b/drivers/isdn/hisax/avm_pci.c @@ -207,6 +207,7 @@ modehdlc(struct BCState *bcs, int mode, int bc) bcs->mode = 1; bcs->channel = bc; bc = 0; + /* fall through */ case (L1_MODE_NULL): if (bcs->mode == L1_MODE_NULL) return; diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c index ddec47a911a0..5f43783039d4 100644 --- a/drivers/isdn/hisax/callc.c +++ b/drivers/isdn/hisax/callc.c @@ -1369,6 +1369,7 @@ leased_l1l2(struct PStack *st, int pr, void *arg) case (PH_ACTIVATE | INDICATION): case (PH_ACTIVATE | CONFIRM): event = EV_LEASED; + /* fall through */ case (PH_DEACTIVATE | INDICATION): case (PH_DEACTIVATE | CONFIRM): if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags)) diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 7108bdb8742e..fcc9c46127b4 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1843,6 +1843,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg) case PH_DEACTIVATE | REQUEST: test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); skb_queue_purge(&bcs->squeue); + /* fall through */ default: B_L2L1(b_if, pr, arg); break; diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c index 35c6df6534ec..a6d8af02354a 100644 --- a/drivers/isdn/hisax/gazel.c +++ b/drivers/isdn/hisax/gazel.c @@ -108,6 +108,7 @@ ReadISAC(struct IsdnCardState *cs, u_char offset) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: return (readreg(cs->hw.gazel.isac, off2)); case R753: @@ -125,6 +126,7 @@ WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: writereg(cs->hw.gazel.isac, off2, value); break; @@ -203,6 +205,7 @@ ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: return (readreg(cs->hw.gazel.hscx[hscx], off2)); case R753: @@ -220,6 +223,7 @@ WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: writereg(cs->hw.gazel.hscx[hscx], off2, value); break; diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 97ecb3073045..1d4cd01d4685 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -432,16 +432,12 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, { int k; - urb->dev = dev; - urb->pipe = pipe; - urb->complete = complete; + usb_fill_int_urb(urb, dev, pipe, buf, packet_size * num_packets, + complete, context, interval); + urb->number_of_packets = num_packets; - urb->transfer_buffer_length = packet_size * num_packets; - urb->context = context; - urb->transfer_buffer = buf; urb->transfer_flags = URB_ISO_ASAP; urb->actual_length = 0; - urb->interval = interval; for (k = 0; k < num_packets; k++) { urb->iso_frame_desc[k].offset = packet_size * k; urb->iso_frame_desc[k].length = packet_size; diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index d01ff116797b..82c1879f5664 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c @@ -1089,6 +1089,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) { break; case PCTRL_CMD_FTM: p1 = 2; + /* fall through */ case PCTRL_CMD_FTH: sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_SILON, 1, &p1); @@ -1097,6 +1098,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) { case PCTRL_CMD_FRM: if (frm_extra_delay) mdelay(frm_extra_delay); + /* fall through */ case PCTRL_CMD_FRH: p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod; bcs->hw.isar.newmod = 0; diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c index da0a1c6aa329..98f60d1523f4 100644 --- a/drivers/isdn/hisax/l3_1tr6.c +++ b/drivers/isdn/hisax/l3_1tr6.c @@ -88,6 +88,7 @@ l3_1tr6_setup_req(struct l3_process *pc, u_char pr, void *arg) break; case 'C': channel = 0x08; + /* fall through */ case 'P': channel |= 0x80; teln++; diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c index 18a3484b1f7e..368d152a8f1d 100644 --- a/drivers/isdn/hisax/l3dss1.c +++ b/drivers/isdn/hisax/l3dss1.c @@ -1282,6 +1282,7 @@ l3dss1_setup_req(struct l3_process *pc, u_char pr, switch (0x5f & *teln) { case 'C': channel = 0x08; + /* fall through */ case 'P': channel |= 0x80; teln++; diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c index 1cb9930d5e24..f207fda691c7 100644 --- a/drivers/isdn/hisax/st5481_usb.c +++ b/drivers/isdn/hisax/st5481_usb.c @@ -408,15 +408,10 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, { int k; - urb->dev = dev; - urb->pipe = pipe; - urb->interval = 1; - urb->transfer_buffer = buf; + usb_fill_int_urb(urb, dev, pipe, buf, num_packets * packet_size, + complete, context, 1); + urb->number_of_packets = num_packets; - urb->transfer_buffer_length = num_packets * packet_size; - urb->actual_length = 0; - urb->complete = complete; - urb->context = context; urb->transfer_flags = URB_ISO_ASAP; for (k = 0; k < num_packets; k++) { urb->iso_frame_desc[k].offset = packet_size * k; diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c index 4a0425378f37..ba177c3a621b 100644 --- a/drivers/isdn/hysdn/hysdn_boot.c +++ b/drivers/isdn/hysdn/hysdn_boot.c @@ -99,6 +99,7 @@ pof_handle_data(hysdn_card *card, int datlen) case TAG_CBOOTDTA: DecryptBuf(boot, datlen); /* we need to encrypt the buffer */ + /* fall through */ case TAG_BOOTDTA: if (card->debug_flags & LOG_POF_RECORD) hysdn_addlog(card, "POF got %s len=%d offs=0x%lx", @@ -137,6 +138,7 @@ pof_handle_data(hysdn_card *card, int datlen) case TAG_CABSDATA: DecryptBuf(boot, datlen); /* we need to encrypt the buffer */ + /* fall through */ case TAG_ABSDATA: if (card->debug_flags & LOG_POF_RECORD) hysdn_addlog(card, "POF got %s len=%d offs=0x%lx", diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 960f26348bb5..b730037a0e2d 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -787,7 +787,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m) cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */ cmd.parm.cmsg.para[4] = 0; cmd.parm.cmsg.para[5] = l; - strncpy(&cmd.parm.cmsg.para[6], id, l); + memcpy(&cmd.parm.cmsg.para[6], id, l); cmd.command = CAPI_PUT_MESSAGE; cmd.driver = info->isdn_driver; cmd.arg = info->isdn_channel; @@ -877,7 +877,7 @@ isdn_tty_resume(char *id, modem_info *info, atemu *m) cmd.parm.cmsg.para[3] = 5; /* 16 bit 0x0005 Resume */ cmd.parm.cmsg.para[4] = 0; cmd.parm.cmsg.para[5] = l; - strncpy(&cmd.parm.cmsg.para[6], id, l); + memcpy(&cmd.parm.cmsg.para[6], id, l); cmd.command = CAPI_PUT_MESSAGE; info->dialing = 1; // strcpy(dev->num[i], n); diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c index 8b74ce412524..2a5f6668756c 100644 --- a/drivers/isdn/i4l/isdn_v110.c +++ b/drivers/isdn/i4l/isdn_v110.c @@ -354,6 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen) printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n"); return line; } + /* else: fall through */ case 128: m[line] = 128; /* leftmost -> set byte to 1000000 */ mbit = 64; /* current bit in the matrix line */ @@ -386,20 +387,28 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen) switch (++line % 10) { case 1: m[line++] = 0xfe; + /* fall through */ case 2: m[line++] = 0xfe; + /* fall through */ case 3: m[line++] = 0xfe; + /* fall through */ case 4: m[line++] = 0xfe; + /* fall through */ case 5: m[line++] = 0xbf; + /* fall through */ case 6: m[line++] = 0xfe; + /* fall through */ case 7: m[line++] = 0xfe; + /* fall through */ case 8: m[line++] = 0xfe; + /* fall through */ case 9: m[line++] = 0xfe; } diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 422dced7c90a..d97c6dd52223 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -539,6 +539,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, rq.protocol = ISDN_P_NT_S0; if (dev->Dprotocols & (1 << ISDN_P_NT_E1)) rq.protocol = ISDN_P_NT_E1; + /* fall through */ case ISDN_P_LAPD_TE: ch->recv = mISDN_queue_message; ch->peer = &dev->D.st->own; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 63e3844c5bec..9a2ea3c1f949 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4094,7 +4094,8 @@ static inline int bond_slave_override(struct bonding *bond, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2b81b97e994f..d3ce1e4cb4d3 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -5,7 +5,7 @@ source "drivers/net/dsa/b53/Kconfig" config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" - depends on HAS_IOMEM && NET_DSA && OF_MDIO + depends on HAS_IOMEM && NET_DSA select NET_DSA_TAG_BRCM select FIXED_PHY select BCM7XXX_PHY @@ -52,6 +52,17 @@ config NET_DSA_QCA8K This enables support for the Qualcomm Atheros QCA8K Ethernet switch chips. +config NET_DSA_REALTEK_SMI + tristate "Realtek SMI Ethernet switch family support" + depends on NET_DSA + select FIXED_PHY + select IRQ_DOMAIN + select REALTEK_PHY + select REGMAP + ---help--- + This enables support for the Realtek SMI-based switch + chips, currently only RTL8366RB. + config NET_DSA_SMSC_LAN9303 tristate select NET_DSA_TAG_LAN9303 @@ -76,4 +87,15 @@ config NET_DSA_SMSC_LAN9303_MDIO Enable access functions if the SMSC/Microchip LAN9303 is configured for MDIO managed mode. +config NET_DSA_VITESSE_VSC73XX + tristate "Vitesse VSC7385/7388/7395/7398 support" + depends on OF && SPI + depends on NET_DSA + select FIXED_PHY + select VITESSE_PHY + select GPIOLIB + ---help--- + This enables support for the Vitesse VSC7385, VSC7388, + VSC7395 and VSC7398 SparX integrated ethernet switches. + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 15c2a831edf1..46c1cba91ffe 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -8,9 +8,12 @@ endif obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o +obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o +realtek-objs := realtek-smi.o rtl8366.o rtl8366rb.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o +obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o obj-y += b53/ obj-y += microchip/ obj-y += mv88e6xxx/ diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 02e8982519ce..ac96ff40d37e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -220,7 +220,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 off, reg; + u32 reg; if (priv->wol_ports_mask & (1 << port)) return; @@ -231,11 +231,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); - if (dsa_is_cpu_port(ds, port)) - off = CORE_IMP_CTL; - else - off = CORE_G_PCTL_PORT(port); - b53_disable_port(ds, port, phy); /* Power down the port memory */ diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 437cd6eb4faa..9dd270978064 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2810,6 +2810,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .avb_ops = &mv88e6165_avb_ops, + .ptp_ops = &mv88e6165_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6165_ops = { @@ -2838,6 +2840,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .avb_ops = &mv88e6165_avb_ops, + .ptp_ops = &mv88e6165_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6171_ops = { @@ -3134,6 +3138,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6240_ops = { @@ -3176,6 +3182,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6290_ops = { @@ -3215,6 +3222,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6320_ops = { @@ -3253,6 +3261,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -3289,6 +3298,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6341_ops = { @@ -3329,6 +3339,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -3402,6 +3413,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6352_ops = { @@ -3444,6 +3456,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, .serdes_get_strings = mv88e6352_serdes_get_strings, .serdes_get_stats = mv88e6352_serdes_get_stats, @@ -3488,6 +3501,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6390x_ops = { @@ -3529,6 +3543,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_info mv88e6xxx_table[] = { @@ -3680,6 +3695,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6161_ops, }, @@ -3702,6 +3718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6165_ops, }, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 8ac3fbb15352..6aa6197ddc10 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -155,6 +155,7 @@ struct mv88e6xxx_bus_ops; struct mv88e6xxx_irq_ops; struct mv88e6xxx_gpio_ops; struct mv88e6xxx_avb_ops; +struct mv88e6xxx_ptp_ops; struct mv88e6xxx_irq { u16 masked; @@ -273,6 +274,7 @@ struct mv88e6xxx_chip { struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO]; u16 trig_config; u16 evcap_config; + u16 enable_count; /* Per-port timestamping resources. */ struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS]; @@ -439,6 +441,9 @@ struct mv88e6xxx_ops { /* Remote Management Unit operations */ int (*rmu_disable)(struct mv88e6xxx_chip *chip); + + /* Precision Time Protocol operations */ + const struct mv88e6xxx_ptp_ops *ptp_ops; }; struct mv88e6xxx_irq_ops { @@ -486,6 +491,24 @@ struct mv88e6xxx_avb_ops { int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data); }; +struct mv88e6xxx_ptp_ops { + u64 (*clock_read)(const struct cyclecounter *cc); + int (*ptp_enable)(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); + int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan); + void (*event_work)(struct work_struct *ugly); + int (*port_enable)(struct mv88e6xxx_chip *chip, int port); + int (*port_disable)(struct mv88e6xxx_chip *chip, int port); + int (*global_enable)(struct mv88e6xxx_chip *chip); + int (*global_disable)(struct mv88e6xxx_chip *chip); + int n_ext_ts; + int arr0_sts_reg; + int arr1_sts_reg; + int dep_sts_reg; + u32 rx_filters; +}; + #define STATS_TYPE_PORT BIT(0) #define STATS_TYPE_BANK0 BIT(1) #define STATS_TYPE_BANK1 BIT(2) diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 37e8ce2c72a0..194660d8c783 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -160,6 +160,7 @@ #define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000 #define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00 #define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe +#define MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL 0xf #define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf #define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00 #define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e @@ -335,6 +336,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; +extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops; extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops; extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops; @@ -484,6 +486,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {}; +static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {}; static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {}; static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {}; diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c index 2e398ccb88ca..672b503a67e1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_avb.c +++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c @@ -130,6 +130,31 @@ const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = { .tai_write = mv88e6352_g2_avb_tai_write, }; +static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + return mv88e6352_g2_avb_port_ptp_read(chip, + MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data, len); +} + +static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + return mv88e6352_g2_avb_port_ptp_write(chip, + MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data); +} + +const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = { + .port_ptp_read = mv88e6352_g2_avb_port_ptp_read, + .port_ptp_write = mv88e6352_g2_avb_port_ptp_write, + .ptp_read = mv88e6352_g2_avb_ptp_read, + .ptp_write = mv88e6352_g2_avb_ptp_write, + .tai_read = mv88e6165_g2_avb_tai_read, + .tai_write = mv88e6165_g2_avb_tai_write, +}; + static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip, int port, int addr, u16 *data, int len) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index a036c490b7ce..a17c16a2ab78 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -51,17 +51,30 @@ static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr, return chip->info->ops->avb_ops->ptp_write(chip, addr, data); } +static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data) +{ + if (!chip->info->ops->avb_ops->ptp_read) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1); +} + /* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX * timestamp. When working properly, hardware will produce a timestamp * within 1ms. Software may enounter delays due to MDIO contention, so * the timeout is set accordingly. */ -#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20) +#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40) int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *info) { - struct mv88e6xxx_chip *chip = ds->priv; + const struct mv88e6xxx_ptp_ops *ptp_ops; + struct mv88e6xxx_chip *chip; + + chip = ds->priv; + ptp_ops = chip->info->ops->ptp_ops; if (!chip->info->ptp_support) return -EOPNOTSUPP; @@ -74,17 +87,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + info->rx_filters = ptp_ops->rx_filters; return 0; } @@ -92,10 +95,9 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, struct hwtstamp_config *config) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; bool tstamp_enable = false; - u16 port_config0; - int err; /* Prevent the TX/RX paths from trying to interact with the * timestamp hardware while we reconfigure it. @@ -120,6 +122,14 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, /* The switch supports timestamping both L2 and L4; one cannot be * disabled independently of the other. */ + + if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) { + config->rx_filter = HWTSTAMP_FILTER_NONE; + dev_dbg(chip->dev, "Unsupported rx_filter %d\n", + config->rx_filter); + return -ERANGE; + } + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: tstamp_enable = false; @@ -141,24 +151,22 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, return -ERANGE; } + mutex_lock(&chip->reg_lock); if (tstamp_enable) { - /* Disable transportSpecific value matching, so that packets - * with either 1588 (0) and 802.1AS (1) will be timestamped. - */ - port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH; + chip->enable_count += 1; + if (chip->enable_count == 1 && ptp_ops->global_enable) + ptp_ops->global_enable(chip); + if (ptp_ops->port_enable) + ptp_ops->port_enable(chip, port); } else { - /* Disable PTP. This disables both RX and TX timestamping. */ - port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP; + if (ptp_ops->port_disable) + ptp_ops->port_disable(chip, port); + chip->enable_count -= 1; + if (chip->enable_count == 0 && ptp_ops->global_disable) + ptp_ops->global_disable(chip); } - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, - port_config0); mutex_unlock(&chip->reg_lock); - if (err < 0) - return err; - /* Once hardware has been configured, enable timestamp checks * in the RX/TX paths. */ @@ -338,17 +346,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip, struct mv88e6xxx_port_hwtstamp *ps) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct sk_buff *skb; skb = skb_dequeue(&ps->rx_queue); if (skb) - mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS, + mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg, &ps->rx_queue); skb = skb_dequeue(&ps->rx_queue2); if (skb) - mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS, + mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg, &ps->rx_queue2); } @@ -389,6 +398,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, struct mv88e6xxx_port_hwtstamp *ps) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct skb_shared_hwtstamps shhwtstamps; u16 departure_block[4], status; struct sk_buff *tmp_skb; @@ -401,7 +411,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_ptp_read(chip, ps->port_id, - MV88E6XXX_PORT_PTP_DEP_STS, + ptp_ops->dep_sts_reg, departure_block, ARRAY_SIZE(departure_block)); mutex_unlock(&chip->reg_lock); @@ -425,8 +435,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, /* We have the timestamp; go ahead and clear valid now */ mutex_lock(&chip->reg_lock); - mv88e6xxx_port_ptp_write(chip, ps->port_id, - MV88E6XXX_PORT_PTP_DEP_STS, 0); + mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0); mutex_unlock(&chip->reg_lock); status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK; @@ -522,8 +531,48 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, return true; } +int mv88e6165_global_disable(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val); + if (err) + return err; + val |= MV88E6165_PTP_CFG_DISABLE_PTP; + + return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val); +} + +int mv88e6165_global_enable(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val); + if (err) + return err; + + val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK); + + return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val); +} + +int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); +} + +int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH); +} + static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; ps->port_id = port; @@ -531,12 +580,15 @@ static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) skb_queue_head_init(&ps->rx_queue); skb_queue_head_init(&ps->rx_queue2); - return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, - MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); + if (ptp_ops->port_disable) + return ptp_ops->port_disable(chip, port); + + return 0; } int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int err; int i; @@ -547,6 +599,18 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) return err; } + /* Disable PTP globally */ + if (ptp_ops->global_disable) { + err = ptp_ops->global_disable(chip); + if (err) + return err; + } + + /* Set the ethertype of L2 PTP messages */ + err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588); + if (err) + return err; + /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to * timestamp. This affects all ports that have timestamping enabled, * but the timestamp config is per-port; thus we configure all events diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h index bc71c9212a08..b9a72661bcc4 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h @@ -19,7 +19,7 @@ #include "chip.h" -/* Global PTP registers */ +/* Global 6352 PTP registers */ /* Offset 0x00: PTP EtherType */ #define MV88E6XXX_PTP_ETHERTYPE 0x00 @@ -34,6 +34,12 @@ /* Offset 0x02: Timestamp Arrival Capture Pointers */ #define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02 +/* Offset 0x05: PTP Global Configuration */ +#define MV88E6165_PTP_CFG 0x05 +#define MV88E6165_PTP_CFG_TSPEC_MASK 0xf000 +#define MV88E6165_PTP_CFG_DISABLE_TS_OVERWRITE BIT(1) +#define MV88E6165_PTP_CFG_DISABLE_PTP BIT(0) + /* Offset 0x07: PTP Global Configuration */ #define MV88E6341_PTP_CFG 0x07 #define MV88E6341_PTP_CFG_UPDATE 0x8000 @@ -46,7 +52,7 @@ /* Offset 0x08: PTP Interrupt Status */ #define MV88E6XXX_PTP_IRQ_STATUS 0x08 -/* Per-Port PTP Registers */ +/* Per-Port 6352 PTP Registers */ /* Offset 0x00: PTP Configuration 0 */ #define MV88E6XXX_PORT_PTP_CFG0 0x00 #define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12 @@ -123,6 +129,10 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip); +int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port); +int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port); +int mv88e6165_global_enable(struct mv88e6xxx_chip *chip); +int mv88e6165_global_disable(struct mv88e6xxx_chip *chip); #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index bd85e2c390e1..4b336d8d4c67 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -16,6 +16,7 @@ #include "chip.h" #include "global2.h" +#include "hwtstamp.h" #include "ptp.h" /* Raw timestamps are in units of 8-ns clock periods. */ @@ -50,7 +51,7 @@ static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data) } /* TODO: places where this are called should be using pinctrl */ -static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, +static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, int func, int input) { int err; @@ -65,7 +66,7 @@ static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, return chip->info->ops->gpio_ops->set_pctl(chip, pin, func); } -static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) +static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc) { struct mv88e6xxx_chip *chip = cc_to_chip(cc); u16 phc_time[2]; @@ -79,13 +80,27 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) return ((u32)phc_time[1] << 16) | phc_time[0]; } -/* mv88e6xxx_config_eventcap - configure TAI event capture +static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc) +{ + struct mv88e6xxx_chip *chip = cc_to_chip(cc); + u16 phc_time[2]; + int err; + + err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time, + ARRAY_SIZE(phc_time)); + if (err) + return 0; + else + return ((u32)phc_time[1] << 16) | phc_time[0]; +} + +/* mv88e6352_config_eventcap - configure TAI event capture * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external) * @rising: zero for falling-edge trigger, else rising-edge trigger * * This will also reset the capture sequence counter. */ -static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, +static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event, int rising) { u16 global_config; @@ -118,7 +133,7 @@ static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, return err; } -static void mv88e6xxx_tai_event_work(struct work_struct *ugly) +static void mv88e6352_tai_event_work(struct work_struct *ugly) { struct delayed_work *dw = to_delayed_work(ugly); struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw); @@ -232,7 +247,7 @@ static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp, return 0; } -static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, +static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip, struct ptp_clock_request *rq, int on) { int rising = (rq->extts.flags & PTP_RISING_EDGE); @@ -250,18 +265,18 @@ static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, if (on) { func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ; - err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + err = mv88e6352_set_gpio_func(chip, pin, func, true); if (err) goto out; schedule_delayed_work(&chip->tai_event_work, TAI_EVENT_WORK_INTERVAL); - err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); + err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); } else { func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO; - err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + err = mv88e6352_set_gpio_func(chip, pin, func, true); cancel_delayed_work_sync(&chip->tai_event_work); } @@ -272,20 +287,20 @@ out: return err; } -static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp, +static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); switch (rq->type) { case PTP_CLK_REQ_EXTTS: - return mv88e6xxx_ptp_enable_extts(chip, rq, on); + return mv88e6352_ptp_enable_extts(chip, rq, on); default: return -EOPNOTSUPP; } } -static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, +static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { switch (func) { @@ -299,6 +314,55 @@ static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, return 0; } +const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { + .clock_read = mv88e6352_ptp_clock_read, + .ptp_enable = mv88e6352_ptp_enable, + .ptp_verify = mv88e6352_ptp_verify, + .event_work = mv88e6352_tai_event_work, + .port_enable = mv88e6352_hwtstamp_port_enable, + .port_disable = mv88e6352_hwtstamp_port_disable, + .n_ext_ts = 1, + .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), +}; + +const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { + .clock_read = mv88e6165_ptp_clock_read, + .global_enable = mv88e6165_global_enable, + .global_disable = mv88e6165_global_disable, + .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), +}; + +static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) +{ + struct mv88e6xxx_chip *chip = cc_to_chip(cc); + + if (chip->info->ops->ptp_ops->clock_read) + return chip->info->ops->ptp_ops->clock_read(cc); + + return 0; +} + /* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3 * seconds; this task forces periodic reads so that we don't miss any. */ @@ -317,6 +381,7 @@ static void mv88e6xxx_ptp_overflow_check(struct work_struct *work) int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int i; /* Set up the cycle counter */ @@ -330,14 +395,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) ktime_to_ns(ktime_get_real())); INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check); - INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work); + if (ptp_ops->event_work) + INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work); chip->ptp_clock_info.owner = THIS_MODULE; snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), dev_name(chip->dev)); chip->ptp_clock_info.max_adj = 1000000; - chip->ptp_clock_info.n_ext_ts = 1; + chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts; chip->ptp_clock_info.n_per_out = 0; chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip); chip->ptp_clock_info.pps = 0; @@ -355,8 +421,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime; chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime; chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime; - chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable; - chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify; + chip->ptp_clock_info.enable = ptp_ops->ptp_enable; + chip->ptp_clock_info.verify = ptp_ops->ptp_verify; chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work; chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev); @@ -373,7 +439,8 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { if (chip->ptp_clock) { cancel_delayed_work_sync(&chip->overflow_work); - cancel_delayed_work_sync(&chip->tai_event_work); + if (chip->info->ops->ptp_ops->event_work) + cancel_delayed_work_sync(&chip->tai_event_work); ptp_clock_unregister(chip->ptp_clock); chip->ptp_clock = NULL; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 10f271ab650d..28a030840517 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -78,6 +78,71 @@ /* Offset 0x12: Lock Status */ #define MV88E6XXX_TAI_LOCK_STATUS 0x12 +/* Offset 0x00: Ether Type */ +#define MV88E6XXX_PTP_GC_ETYPE 0x00 + +/* 6165 Global Control Registers */ +/* Offset 0x00: Ether Type */ +#define MV88E6XXX_PTP_GC_ETYPE 0x00 + +/* Offset 0x01: Message ID */ +#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01 + +/* Offset 0x02: Time Stamp Arrive Time */ +#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02 + +/* Offset 0x03: Port Arrival Interrupt Enable */ +#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03 + +/* Offset 0x04: Port Departure Interrupt Enable */ +#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04 + +/* Offset 0x05: Configuration */ +#define MV88E6XXX_PTP_GC_CONFIG 0x05 +#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1) +#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0) + +/* Offset 0x8: Interrupt Status */ +#define MV88E6XXX_PTP_GC_INT_STATUS 0x08 + +/* Offset 0x9/0xa: Global Time */ +#define MV88E6XXX_PTP_GC_TIME_LO 0x09 +#define MV88E6XXX_PTP_GC_TIME_HI 0x0A + +/* 6165 Per Port Registers */ +/* Offset 0: Arrival Time 0 Status */ +#define MV88E6165_PORT_PTP_ARR0_STS 0x00 + +/* Offset 0x01/0x02: PTP Arrival 0 Time */ +#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01 +#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02 + +/* Offset 0x03: PTP Arrival 0 Sequence ID */ +#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03 + +/* Offset 0x04: PTP Arrival 1 Status */ +#define MV88E6165_PORT_PTP_ARR1_STS 0x04 + +/* Offset 0x05/0x6E: PTP Arrival 1 Time */ +#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05 +#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06 + +/* Offset 0x07: PTP Arrival 1 Sequence ID */ +#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07 + +/* Offset 0x08: PTP Departure Status */ +#define MV88E6165_PORT_PTP_DEP_STS 0x08 + +/* Offset 0x09/0x0a: PTP Deperture Time */ +#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09 +#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a + +/* Offset 0x0b: PTP Departure Sequence ID */ +#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b + +/* Offset 0x0d: Port Status */ +#define MV88E6164_PORT_STATUS 0x0d + #ifdef CONFIG_NET_DSA_MV88E6XXX_PTP long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp); @@ -87,6 +152,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); #define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \ ptp_clock_info) +extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops; + #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp) @@ -103,6 +171,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { } +static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {}; + #endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ #endif /* _MV88E6XXX_PTP_H */ diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c new file mode 100644 index 000000000000..f941f4582825 --- /dev/null +++ b/drivers/net/dsa/realtek-smi.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Realtek Simple Management Interface (SMI) driver + * It can be discussed how "simple" this interface is. + * + * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels + * but the protocol is not MDIO at all. Instead it is a Realtek + * pecularity that need to bit-bang the lines in a special way to + * communicate with the switch. + * + * ASICs we intend to support with this driver: + * + * RTL8366 - The original version, apparently + * RTL8369 - Similar enough to have the same datsheet as RTL8366 + * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite + * different register layout from the other two + * RTL8366S - Is this "RTL8366 super"? + * RTL8367 - Has an OpenWRT driver as well + * RTL8368S - Seems to be an alternative name for RTL8366RB + * RTL8370 - Also uses SMI + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com> + * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv> + * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/bitops.h> +#include <linux/if_bridge.h> + +#include "realtek-smi.h" + +#define REALTEK_SMI_ACK_RETRY_COUNT 5 +#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */ +#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */ + +static inline void realtek_smi_clk_delay(struct realtek_smi *smi) +{ + ndelay(smi->clk_delay); +} + +static void realtek_smi_start(struct realtek_smi *smi) +{ + /* Set GPIO pins to output mode, with initial state: + * SCK = 0, SDA = 1 + */ + gpiod_direction_output(smi->mdc, 0); + gpiod_direction_output(smi->mdio, 1); + realtek_smi_clk_delay(smi); + + /* CLK 1: 0 -> 1, 1 -> 0 */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + + /* CLK 2: */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 1); +} + +static void realtek_smi_stop(struct realtek_smi *smi) +{ + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 0); + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + + /* Add a click */ + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + + /* Set GPIO pins to input mode */ + gpiod_direction_input(smi->mdio); + gpiod_direction_input(smi->mdc); +} + +static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len) +{ + for (; len > 0; len--) { + realtek_smi_clk_delay(smi); + + /* Prepare data */ + gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1)))); + realtek_smi_clk_delay(smi); + + /* Clocking */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + } +} + +static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data) +{ + gpiod_direction_input(smi->mdio); + + for (*data = 0; len > 0; len--) { + u32 u; + + realtek_smi_clk_delay(smi); + + /* Clocking */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + u = !!gpiod_get_value(smi->mdio); + gpiod_set_value(smi->mdc, 0); + + *data |= (u << (len - 1)); + } + + gpiod_direction_output(smi->mdio, 0); +} + +static int realtek_smi_wait_for_ack(struct realtek_smi *smi) +{ + int retry_cnt; + + retry_cnt = 0; + do { + u32 ack; + + realtek_smi_read_bits(smi, 1, &ack); + if (ack == 0) + break; + + if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) { + dev_err(smi->dev, "ACK timeout\n"); + return -ETIMEDOUT; + } + } while (1); + + return 0; +} + +static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data) +{ + realtek_smi_write_bits(smi, data, 8); + return realtek_smi_wait_for_ack(smi); +} + +static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data) +{ + realtek_smi_write_bits(smi, data, 8); + return 0; +} + +static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data) +{ + u32 t; + + /* Read data */ + realtek_smi_read_bits(smi, 8, &t); + *data = (t & 0xff); + + /* Send an ACK */ + realtek_smi_write_bits(smi, 0x00, 1); + + return 0; +} + +static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data) +{ + u32 t; + + /* Read data */ + realtek_smi_read_bits(smi, 8, &t); + *data = (t & 0xff); + + /* Send an ACK */ + realtek_smi_write_bits(smi, 0x01, 1); + + return 0; +} + +static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data) +{ + unsigned long flags; + u8 lo = 0; + u8 hi = 0; + int ret; + + spin_lock_irqsave(&smi->lock, flags); + + realtek_smi_start(smi); + + /* Send READ command */ + ret = realtek_smi_write_byte(smi, smi->cmd_read); + if (ret) + goto out; + + /* Set ADDR[7:0] */ + ret = realtek_smi_write_byte(smi, addr & 0xff); + if (ret) + goto out; + + /* Set ADDR[15:8] */ + ret = realtek_smi_write_byte(smi, addr >> 8); + if (ret) + goto out; + + /* Read DATA[7:0] */ + realtek_smi_read_byte0(smi, &lo); + /* Read DATA[15:8] */ + realtek_smi_read_byte1(smi, &hi); + + *data = ((u32)lo) | (((u32)hi) << 8); + + ret = 0; + + out: + realtek_smi_stop(smi); + spin_unlock_irqrestore(&smi->lock, flags); + + return ret; +} + +static int realtek_smi_write_reg(struct realtek_smi *smi, + u32 addr, u32 data, bool ack) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&smi->lock, flags); + + realtek_smi_start(smi); + + /* Send WRITE command */ + ret = realtek_smi_write_byte(smi, smi->cmd_write); + if (ret) + goto out; + + /* Set ADDR[7:0] */ + ret = realtek_smi_write_byte(smi, addr & 0xff); + if (ret) + goto out; + + /* Set ADDR[15:8] */ + ret = realtek_smi_write_byte(smi, addr >> 8); + if (ret) + goto out; + + /* Write DATA[7:0] */ + ret = realtek_smi_write_byte(smi, data & 0xff); + if (ret) + goto out; + + /* Write DATA[15:8] */ + if (ack) + ret = realtek_smi_write_byte(smi, data >> 8); + else + ret = realtek_smi_write_byte_noack(smi, data >> 8); + if (ret) + goto out; + + ret = 0; + + out: + realtek_smi_stop(smi); + spin_unlock_irqrestore(&smi->lock, flags); + + return ret; +} + +/* There is one single case when we need to use this accessor and that + * is when issueing soft reset. Since the device reset as soon as we write + * that bit, no ACK will come back for natural reasons. + */ +int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr, + u32 data) +{ + return realtek_smi_write_reg(smi, addr, data, false); +} +EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack); + +/* Regmap accessors */ + +static int realtek_smi_write(void *ctx, u32 reg, u32 val) +{ + struct realtek_smi *smi = ctx; + + return realtek_smi_write_reg(smi, reg, val, true); +} + +static int realtek_smi_read(void *ctx, u32 reg, u32 *val) +{ + struct realtek_smi *smi = ctx; + + return realtek_smi_read_reg(smi, reg, val); +} + +static const struct regmap_config realtek_smi_mdio_regmap_config = { + .reg_bits = 10, /* A4..A0 R4..R0 */ + .val_bits = 16, + .reg_stride = 1, + /* PHY regs are at 0x8000 */ + .max_register = 0xffff, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .reg_read = realtek_smi_read, + .reg_write = realtek_smi_write, + .cache_type = REGCACHE_NONE, +}; + +static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct realtek_smi *smi = bus->priv; + + return smi->ops->phy_read(smi, addr, regnum); +} + +static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct realtek_smi *smi = bus->priv; + + return smi->ops->phy_write(smi, addr, regnum, val); +} + +int realtek_smi_setup_mdio(struct realtek_smi *smi) +{ + struct device_node *mdio_np; + int ret; + + mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, + "realtek,smi-mdio"); + if (!mdio_np) { + dev_err(smi->dev, "no MDIO bus node\n"); + return -ENODEV; + } + + smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); + if (!smi->slave_mii_bus) + return -ENOMEM; + smi->slave_mii_bus->priv = smi; + smi->slave_mii_bus->name = "SMI slave MII"; + smi->slave_mii_bus->read = realtek_smi_mdio_read; + smi->slave_mii_bus->write = realtek_smi_mdio_write; + snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d", + smi->ds->index); + smi->slave_mii_bus->dev.of_node = mdio_np; + smi->slave_mii_bus->parent = smi->dev; + smi->ds->slave_mii_bus = smi->slave_mii_bus; + + ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np); + if (ret) { + dev_err(smi->dev, "unable to register MDIO bus %s\n", + smi->slave_mii_bus->id); + of_node_put(mdio_np); + } + + return 0; +} + +static int realtek_smi_probe(struct platform_device *pdev) +{ + const struct realtek_smi_variant *var; + struct device *dev = &pdev->dev; + struct realtek_smi *smi; + struct device_node *np; + int ret; + + var = of_device_get_match_data(dev); + np = dev->of_node; + + smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL); + if (!smi) + return -ENOMEM; + smi->map = devm_regmap_init(dev, NULL, smi, + &realtek_smi_mdio_regmap_config); + if (IS_ERR(smi->map)) { + ret = PTR_ERR(smi->map); + dev_err(dev, "regmap init failed: %d\n", ret); + return ret; + } + + /* Link forward and backward */ + smi->dev = dev; + smi->clk_delay = var->clk_delay; + smi->cmd_read = var->cmd_read; + smi->cmd_write = var->cmd_write; + smi->ops = var->ops; + + dev_set_drvdata(dev, smi); + spin_lock_init(&smi->lock); + + /* TODO: if power is software controlled, set up any regulators here */ + + /* Assert then deassert RESET */ + smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(smi->reset)) { + dev_err(dev, "failed to get RESET GPIO\n"); + return PTR_ERR(smi->reset); + } + msleep(REALTEK_SMI_HW_STOP_DELAY); + gpiod_set_value(smi->reset, 0); + msleep(REALTEK_SMI_HW_START_DELAY); + dev_info(dev, "deasserted RESET\n"); + + /* Fetch MDIO pins */ + smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW); + if (IS_ERR(smi->mdc)) + return PTR_ERR(smi->mdc); + smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW); + if (IS_ERR(smi->mdio)) + return PTR_ERR(smi->mdio); + + smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds"); + + ret = smi->ops->detect(smi); + if (ret) { + dev_err(dev, "unable to detect switch\n"); + return ret; + } + + smi->ds = dsa_switch_alloc(dev, smi->num_ports); + if (!smi->ds) + return -ENOMEM; + smi->ds->priv = smi; + + smi->ds->ops = var->ds_ops; + ret = dsa_register_switch(smi->ds); + if (ret) { + dev_err(dev, "unable to register switch ret = %d\n", ret); + return ret; + } + return 0; +} + +static int realtek_smi_remove(struct platform_device *pdev) +{ + struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); + + dsa_unregister_switch(smi->ds); + gpiod_set_value(smi->reset, 1); + + return 0; +} + +static const struct of_device_id realtek_smi_of_match[] = { + { + .compatible = "realtek,rtl8366rb", + .data = &rtl8366rb_variant, + }, + { + /* FIXME: add support for RTL8366S and more */ + .compatible = "realtek,rtl8366s", + .data = NULL, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, realtek_smi_of_match); + +static struct platform_driver realtek_smi_driver = { + .driver = { + .name = "realtek-smi", + .of_match_table = of_match_ptr(realtek_smi_of_match), + }, + .probe = realtek_smi_probe, + .remove = realtek_smi_remove, +}; +module_platform_driver(realtek_smi_driver); diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h new file mode 100644 index 000000000000..9a63b51e1d82 --- /dev/null +++ b/drivers/net/dsa/realtek-smi.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Realtek SMI interface driver defines + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + */ + +#ifndef _REALTEK_SMI_H +#define _REALTEK_SMI_H + +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/gpio/consumer.h> +#include <net/dsa.h> + +struct realtek_smi_ops; +struct dentry; +struct inode; +struct file; + +struct rtl8366_mib_counter { + unsigned int base; + unsigned int offset; + unsigned int length; + const char *name; +}; + +struct rtl8366_vlan_mc { + u16 vid; + u16 untag; + u16 member; + u8 fid; + u8 priority; +}; + +struct rtl8366_vlan_4k { + u16 vid; + u16 untag; + u16 member; + u8 fid; +}; + +struct realtek_smi { + struct device *dev; + struct gpio_desc *reset; + struct gpio_desc *mdc; + struct gpio_desc *mdio; + struct regmap *map; + struct mii_bus *slave_mii_bus; + + unsigned int clk_delay; + u8 cmd_read; + u8 cmd_write; + spinlock_t lock; /* Locks around command writes */ + struct dsa_switch *ds; + struct irq_domain *irqdomain; + bool leds_disabled; + + unsigned int cpu_port; + unsigned int num_ports; + unsigned int num_vlan_mc; + unsigned int num_mib_counters; + struct rtl8366_mib_counter *mib_counters; + + const struct realtek_smi_ops *ops; + + int vlan_enabled; + int vlan4k_enabled; + + char buf[4096]; +}; + +/** + * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations + * @detect: detects the chiptype + */ +struct realtek_smi_ops { + int (*detect)(struct realtek_smi *smi); + int (*reset_chip)(struct realtek_smi *smi); + int (*setup)(struct realtek_smi *smi); + void (*cleanup)(struct realtek_smi *smi); + int (*get_mib_counter)(struct realtek_smi *smi, + int port, + struct rtl8366_mib_counter *mib, + u64 *mibvalue); + int (*get_vlan_mc)(struct realtek_smi *smi, u32 index, + struct rtl8366_vlan_mc *vlanmc); + int (*set_vlan_mc)(struct realtek_smi *smi, u32 index, + const struct rtl8366_vlan_mc *vlanmc); + int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid, + struct rtl8366_vlan_4k *vlan4k); + int (*set_vlan_4k)(struct realtek_smi *smi, + const struct rtl8366_vlan_4k *vlan4k); + int (*get_mc_index)(struct realtek_smi *smi, int port, int *val); + int (*set_mc_index)(struct realtek_smi *smi, int port, int index); + bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan); + int (*enable_vlan)(struct realtek_smi *smi, bool enable); + int (*enable_vlan4k)(struct realtek_smi *smi, bool enable); + int (*enable_port)(struct realtek_smi *smi, int port, bool enable); + int (*phy_read)(struct realtek_smi *smi, int phy, int regnum); + int (*phy_write)(struct realtek_smi *smi, int phy, int regnum, + u16 val); +}; + +struct realtek_smi_variant { + const struct dsa_switch_ops *ds_ops; + const struct realtek_smi_ops *ops; + unsigned int clk_delay; + u8 cmd_read; + u8 cmd_write; +}; + +/* SMI core calls */ +int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr, + u32 data); +int realtek_smi_setup_mdio(struct realtek_smi *smi); + +/* RTL8366 library helpers */ +int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used); +int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid); +int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val); +int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid); +int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); +int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable); +int rtl8366_reset_vlan(struct realtek_smi *smi); +int rtl8366_init_vlan(struct realtek_smi *smi); +int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering); +int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +void rtl8366_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +int rtl8366_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data); +int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset); +void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); + +extern const struct realtek_smi_variant rtl8366rb_variant; + +#endif /* _REALTEK_SMI_H */ diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c new file mode 100644 index 000000000000..6dedd43442cc --- /dev/null +++ b/drivers/net/dsa/rtl8366.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI library helpers for the RTL8366x variants + * RTL8366RB and RTL8366S + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com> + * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv> + * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com> + */ +#include <linux/if_bridge.h> +#include <net/dsa.h> + +#include "realtek-smi.h" + +int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used) +{ + int ret; + int i; + + *used = 0; + for (i = 0; i < smi->num_ports; i++) { + int index = 0; + + ret = smi->ops->get_mc_index(smi, i, &index); + if (ret) + return ret; + + if (mc_index == index) { + *used = 1; + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_mc_is_used); + +int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid) +{ + struct rtl8366_vlan_4k vlan4k; + int ret; + int i; + + /* Update the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlan4k.member = member; + vlan4k.untag = untag; + vlan4k.fid = fid; + ret = smi->ops->set_vlan_4k(smi, &vlan4k); + if (ret) + return ret; + + /* Try to find an existing MC entry for this VID */ + for (i = 0; i < smi->num_vlan_mc; i++) { + struct rtl8366_vlan_mc vlanmc; + + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + /* update the MC entry */ + vlanmc.member = member; + vlanmc.untag = untag; + vlanmc.fid = fid; + + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtl8366_set_vlan); + +int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val) +{ + struct rtl8366_vlan_mc vlanmc; + int ret; + int index; + + ret = smi->ops->get_mc_index(smi, port, &index); + if (ret) + return ret; + + ret = smi->ops->get_vlan_mc(smi, index, &vlanmc); + if (ret) + return ret; + + *val = vlanmc.vid; + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_get_pvid); + +int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid) +{ + struct rtl8366_vlan_mc vlanmc; + struct rtl8366_vlan_4k vlan4k; + int ret; + int i; + + /* Try to find an existing MC entry for this VID */ + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + /* We have no MC entry for this VID, try to find an empty one */ + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vlanmc.vid == 0 && vlanmc.member == 0) { + /* Update the entry from the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlanmc.vid = vid; + vlanmc.member = vlan4k.member; + vlanmc.untag = vlan4k.untag; + vlanmc.fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + /* MC table is full, try to find an unused entry and replace it */ + for (i = 0; i < smi->num_vlan_mc; i++) { + int used; + + ret = rtl8366_mc_is_used(smi, i, &used); + if (ret) + return ret; + + if (!used) { + /* Update the entry from the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlanmc.vid = vid; + vlanmc.member = vlan4k.member; + vlanmc.untag = vlan4k.untag; + vlanmc.fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + dev_err(smi->dev, + "all VLAN member configurations are in use\n"); + + return -ENOSPC; +} +EXPORT_SYMBOL_GPL(rtl8366_set_pvid); + +int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable) +{ + int ret; + + /* To enable 4k VLAN, ordinary VLAN must be enabled first, + * but if we disable 4k VLAN it is fine to leave ordinary + * VLAN enabled. + */ + if (enable) { + /* Make sure VLAN is ON */ + ret = smi->ops->enable_vlan(smi, true); + if (ret) + return ret; + + smi->vlan_enabled = true; + } + + ret = smi->ops->enable_vlan4k(smi, enable); + if (ret) + return ret; + + smi->vlan4k_enabled = enable; + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k); + +int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable) +{ + int ret; + + ret = smi->ops->enable_vlan(smi, enable); + if (ret) + return ret; + + smi->vlan_enabled = enable; + + /* If we turn VLAN off, make sure that we turn off + * 4k VLAN as well, if that happened to be on. + */ + if (!enable) { + smi->vlan4k_enabled = false; + ret = smi->ops->enable_vlan4k(smi, false); + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtl8366_enable_vlan); + +int rtl8366_reset_vlan(struct realtek_smi *smi) +{ + struct rtl8366_vlan_mc vlanmc; + int ret; + int i; + + rtl8366_enable_vlan(smi, false); + rtl8366_enable_vlan4k(smi, false); + + /* Clear the 16 VLAN member configurations */ + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.member = 0; + vlanmc.untag = 0; + vlanmc.fid = 0; + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); + +int rtl8366_init_vlan(struct realtek_smi *smi) +{ + int port; + int ret; + + ret = rtl8366_reset_vlan(smi); + if (ret) + return ret; + + /* Loop over the available ports, for each port, associate + * it with the VLAN (port+1) + */ + for (port = 0; port < smi->num_ports; port++) { + u32 mask; + + if (port == smi->cpu_port) + /* For the CPU port, make all ports members of this + * VLAN. + */ + mask = GENMASK(smi->num_ports - 1, 0); + else + /* For all other ports, enable itself plus the + * CPU port. + */ + mask = BIT(port) | BIT(smi->cpu_port); + + /* For each port, set the port as member of VLAN (port+1) + * and untagged, except for the CPU port: the CPU port (5) is + * member of VLAN 6 and so are ALL the other ports as well. + * Use filter 0 (no filter). + */ + dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n", + (port + 1), port, mask); + ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0); + if (ret) + return ret; + + dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n", + (port + 1), port, (port + 1)); + ret = rtl8366_set_pvid(smi, port, (port + 1)); + if (ret) + return ret; + } + + return rtl8366_enable_vlan(smi, true); +} +EXPORT_SYMBOL_GPL(rtl8366_init_vlan); + +int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366_vlan_4k vlan4k; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return -EINVAL; + + dev_info(smi->dev, "%s filtering on port %d\n", + vlan_filtering ? "enable" : "disable", + port); + + /* TODO: + * The hardware support filter ID (FID) 0..7, I have no clue how to + * support this in the driver when the callback only says on/off. + */ + ret = smi->ops->get_vlan_4k(smi, port, &vlan4k); + if (ret) + return ret; + + /* Just set the filter to FID 1 for now then */ + ret = rtl8366_set_vlan(smi, port, + vlan4k.member, + vlan4k.untag, + 1); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering); + +int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return -EINVAL; + + dev_info(smi->dev, "prepare VLANs %04x..%04x\n", + vlan->vid_begin, vlan->vid_end); + + /* Enable VLAN in the hardware + * FIXME: what's with this 4k business? + * Just rtl8366_enable_vlan() seems inconclusive. + */ + ret = rtl8366_enable_vlan4k(smi, true); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare); + +void rtl8366_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID); + struct realtek_smi *smi = ds->priv; + u32 member = 0; + u32 untag = 0; + u16 vid; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return; + + dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", + port, + untagged ? "untagged" : "tagged", + pvid ? " PVID" : "no PVID"); + + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + dev_err(smi->dev, "port is DSA or CPU port\n"); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + int pvid_val = 0; + + dev_info(smi->dev, "add VLAN %04x\n", vid); + member |= BIT(port); + + if (untagged) + untag |= BIT(port); + + /* To ensure that we have a valid MC entry for this VLAN, + * initialize the port VLAN ID here. + */ + ret = rtl8366_get_pvid(smi, port, &pvid_val); + if (ret < 0) { + dev_err(smi->dev, "could not lookup PVID for port %d\n", + port); + return; + } + if (pvid_val == 0) { + ret = rtl8366_set_pvid(smi, port, vid); + if (ret < 0) + return; + } + } + + ret = rtl8366_set_vlan(smi, port, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_add); + +int rtl8366_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct realtek_smi *smi = ds->priv; + u16 vid; + int ret; + + dev_info(smi->dev, "del VLAN on port %d\n", port); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + int i; + + dev_info(smi->dev, "del VLAN %04x\n", vid); + + for (i = 0; i < smi->num_vlan_mc; i++) { + struct rtl8366_vlan_mc vlanmc; + + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + /* clear VLAN member configurations */ + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.member = 0; + vlanmc.untag = 0; + vlanmc.fid = 0; + + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) { + dev_err(smi->dev, + "failed to remove VLAN %04x\n", + vid); + return ret; + } + break; + } + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_del); + +void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366_mib_counter *mib; + int i; + + if (port >= smi->num_ports) + return; + + for (i = 0; i < smi->num_mib_counters; i++) { + mib = &smi->mib_counters[i]; + strncpy(data + i * ETH_GSTRING_LEN, + mib->name, ETH_GSTRING_LEN); + } +} +EXPORT_SYMBOL_GPL(rtl8366_get_strings); + +int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + struct realtek_smi *smi = ds->priv; + + /* We only support SS_STATS */ + if (sset != ETH_SS_STATS) + return 0; + if (port >= smi->num_ports) + return -EINVAL; + + return smi->num_mib_counters; +} +EXPORT_SYMBOL_GPL(rtl8366_get_sset_count); + +void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct realtek_smi *smi = ds->priv; + int i; + int ret; + + if (port >= smi->num_ports) + return; + + for (i = 0; i < smi->num_mib_counters; i++) { + struct rtl8366_mib_counter *mib; + u64 mibvalue = 0; + + mib = &smi->mib_counters[i]; + ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue); + if (ret) { + dev_err(smi->dev, "error reading MIB counter %s\n", + mib->name); + } + data[i] = mibvalue; + } +} +EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats); diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c new file mode 100644 index 000000000000..1e55b9bf8b56 --- /dev/null +++ b/drivers/net/dsa/rtl8366rb.c @@ -0,0 +1,1424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch + * + * This is a sparsely documented chip, the only viable documentation seems + * to be a patched up code drop from the vendor that appear in various + * GPL source trees. + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com> + * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv> + * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com> + */ + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> + +#include "realtek-smi.h" + +#define RTL8366RB_PORT_NUM_CPU 5 +#define RTL8366RB_NUM_PORTS 6 +#define RTL8366RB_PHY_NO_MAX 4 +#define RTL8366RB_PHY_ADDR_MAX 31 + +/* Switch Global Configuration register */ +#define RTL8366RB_SGCR 0x0000 +#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0) +#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4) +#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3) +#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0) +#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1) +#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2) +#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3) +#define RTL8366RB_SGCR_EN_VLAN BIT(13) +#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14) + +/* Port Enable Control register */ +#define RTL8366RB_PECR 0x0001 + +/* Switch Security Control registers */ +#define RTL8366RB_SSCR0 0x0002 +#define RTL8366RB_SSCR1 0x0003 +#define RTL8366RB_SSCR2 0x0004 +#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0) + +/* Port Mirror Control Register */ +#define RTL8366RB_PMCR 0x0007 +#define RTL8366RB_PMCR_SOURCE_PORT(a) (a) +#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f +#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4) +#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0 +#define RTL8366RB_PMCR_MIRROR_RX BIT(8) +#define RTL8366RB_PMCR_MIRROR_TX BIT(9) +#define RTL8366RB_PMCR_MIRROR_SPC BIT(10) +#define RTL8366RB_PMCR_MIRROR_ISO BIT(11) + +/* bits 0..7 = port 0, bits 8..15 = port 1 */ +#define RTL8366RB_PAACR0 0x0010 +/* bits 0..7 = port 2, bits 8..15 = port 3 */ +#define RTL8366RB_PAACR1 0x0011 +/* bits 0..7 = port 4, bits 8..15 = port 5 */ +#define RTL8366RB_PAACR2 0x0012 +#define RTL8366RB_PAACR_SPEED_10M 0 +#define RTL8366RB_PAACR_SPEED_100M 1 +#define RTL8366RB_PAACR_SPEED_1000M 2 +#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2) +#define RTL8366RB_PAACR_LINK_UP BIT(4) +#define RTL8366RB_PAACR_TX_PAUSE BIT(5) +#define RTL8366RB_PAACR_RX_PAUSE BIT(6) +#define RTL8366RB_PAACR_AN BIT(7) + +#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \ + RTL8366RB_PAACR_FULL_DUPLEX | \ + RTL8366RB_PAACR_LINK_UP | \ + RTL8366RB_PAACR_TX_PAUSE | \ + RTL8366RB_PAACR_RX_PAUSE) + +/* bits 0..7 = port 0, bits 8..15 = port 1 */ +#define RTL8366RB_PSTAT0 0x0014 +/* bits 0..7 = port 2, bits 8..15 = port 3 */ +#define RTL8366RB_PSTAT1 0x0015 +/* bits 0..7 = port 4, bits 8..15 = port 5 */ +#define RTL8366RB_PSTAT2 0x0016 + +#define RTL8366RB_POWER_SAVING_REG 0x0021 + +/* CPU port control reg */ +#define RTL8368RB_CPU_CTRL_REG 0x0061 +#define RTL8368RB_CPU_PORTS_MSK 0x00FF +/* Enables inserting custom tag length/type 0x8899 */ +#define RTL8368RB_CPU_INSTAG BIT(15) + +#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */ +#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */ +#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */ + +#define RTL8366RB_RESET_CTRL_REG 0x0100 +#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0) +#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1) + +#define RTL8366RB_CHIP_ID_REG 0x0509 +#define RTL8366RB_CHIP_ID_8366 0x5937 +#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A +#define RTL8366RB_CHIP_VERSION_MASK 0xf + +/* PHY registers control */ +#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000 +#define RTL8366RB_PHY_CTRL_READ BIT(0) +#define RTL8366RB_PHY_CTRL_WRITE 0 +#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001 +#define RTL8366RB_PHY_INT_BUSY BIT(0) +#define RTL8366RB_PHY_EXT_BUSY BIT(4) +#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002 +#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010 +#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011 +#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012 + +#define RTL8366RB_PHY_REG_MASK 0x1f +#define RTL8366RB_PHY_PAGE_OFFSET 5 +#define RTL8366RB_PHY_PAGE_MASK (0xf << 5) +#define RTL8366RB_PHY_NO_OFFSET 9 +#define RTL8366RB_PHY_NO_MASK (0x1f << 9) + +#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f + +/* LED control registers */ +#define RTL8366RB_LED_BLINKRATE_REG 0x0430 +#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 +#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 +#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 +#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 +#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 +#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 +#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 + +#define RTL8366RB_LED_CTRL_REG 0x0431 +#define RTL8366RB_LED_OFF 0x0 +#define RTL8366RB_LED_DUP_COL 0x1 +#define RTL8366RB_LED_LINK_ACT 0x2 +#define RTL8366RB_LED_SPD1000 0x3 +#define RTL8366RB_LED_SPD100 0x4 +#define RTL8366RB_LED_SPD10 0x5 +#define RTL8366RB_LED_SPD1000_ACT 0x6 +#define RTL8366RB_LED_SPD100_ACT 0x7 +#define RTL8366RB_LED_SPD10_ACT 0x8 +#define RTL8366RB_LED_SPD100_10_ACT 0x9 +#define RTL8366RB_LED_FIBER 0xa +#define RTL8366RB_LED_AN_FAULT 0xb +#define RTL8366RB_LED_LINK_RX 0xc +#define RTL8366RB_LED_LINK_TX 0xd +#define RTL8366RB_LED_MASTER 0xe +#define RTL8366RB_LED_FORCE 0xf +#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 +#define RTL8366RB_LED_1_OFFSET 6 +#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 +#define RTL8366RB_LED_3_OFFSET 6 + +#define RTL8366RB_MIB_COUNT 33 +#define RTL8366RB_GLOBAL_MIB_COUNT 1 +#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050 +#define RTL8366RB_MIB_COUNTER_BASE 0x1000 +#define RTL8366RB_MIB_CTRL_REG 0x13F0 +#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC +#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0) +#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1) +#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p)) +#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11) + +#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063 +#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \ + (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4) +#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf +#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4)) + +#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C +#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185 + +#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180 +#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01 +#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01 + +#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3) + +#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014 +#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003 +#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004 +#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010 +#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020 +#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040 +#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080 + +#define RTL8366RB_NUM_VLANS 16 +#define RTL8366RB_NUM_LEDGROUPS 4 +#define RTL8366RB_NUM_VIDS 4096 +#define RTL8366RB_PRIORITYMAX 7 +#define RTL8366RB_FIDMAX 7 + +#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */ +#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */ +#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */ +#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */ +#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */ + +#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */ + +#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4 | \ + RTL8366RB_PORT_5 | \ + RTL8366RB_PORT_CPU) + +#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4 | \ + RTL8366RB_PORT_5) + +#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4) + +#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU + +/* First configuration word per member config, VID and prio */ +#define RTL8366RB_VLAN_VID_MASK 0xfff +#define RTL8366RB_VLAN_PRIORITY_SHIFT 12 +#define RTL8366RB_VLAN_PRIORITY_MASK 0x7 +/* Second configuration word per member config, member and untagged */ +#define RTL8366RB_VLAN_UNTAG_SHIFT 8 +#define RTL8366RB_VLAN_UNTAG_MASK 0xff +#define RTL8366RB_VLAN_MEMBER_MASK 0xff +/* Third config word per member config, STAG currently unused */ +#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff +#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8 +#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7 +#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5 +#define RTL8366RB_VLAN_FID_MASK 0x7 + +/* Port ingress bandwidth control */ +#define RTL8366RB_IB_BASE 0x0200 +#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum)) +#define RTL8366RB_IB_BDTH_MASK 0x3fff +#define RTL8366RB_IB_PREIFG BIT(14) + +/* Port egress bandwidth control */ +#define RTL8366RB_EB_BASE 0x02d1 +#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum)) +#define RTL8366RB_EB_BDTH_MASK 0x3fff +#define RTL8366RB_EB_PREIFG_REG 0x02f8 +#define RTL8366RB_EB_PREIFG BIT(9) + +#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */ +#define RTL8366RB_BDTH_UNIT 64 +#define RTL8366RB_BDTH_REG_DEFAULT 16383 + +/* QOS */ +#define RTL8366RB_QOS BIT(15) +/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */ +#define RTL8366RB_QOS_DEFAULT_PREIFG 1 + +/* Interrupt handling */ +#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440 +#define RTL8366RB_INTERRUPT_POLARITY BIT(0) +#define RTL8366RB_P4_RGMII_LED BIT(2) +#define RTL8366RB_INTERRUPT_MASK_REG 0x0441 +#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0) +#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8) +#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9) +#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12) +#define RTL8366RB_INTERRUPT_P4_UTP BIT(13) +#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \ + RTL8366RB_INTERRUPT_ACLEXCEED | \ + RTL8366RB_INTERRUPT_STORMEXCEED | \ + RTL8366RB_INTERRUPT_P4_FIBER | \ + RTL8366RB_INTERRUPT_P4_UTP) +#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442 +#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */ + +/* bits 0..5 enable force when cleared */ +#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11 + +#define RTL8366RB_OAM_PARSER_REG 0x0F14 +#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15 + +#define RTL8366RB_GREEN_FEATURE_REG 0x0F51 +#define RTL8366RB_GREEN_FEATURE_MSK 0x0007 +#define RTL8366RB_GREEN_FEATURE_TX BIT(0) +#define RTL8366RB_GREEN_FEATURE_RX BIT(2) + +static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { + { 0, 0, 4, "IfInOctets" }, + { 0, 4, 4, "EtherStatsOctets" }, + { 0, 8, 2, "EtherStatsUnderSizePkts" }, + { 0, 10, 2, "EtherFragments" }, + { 0, 12, 2, "EtherStatsPkts64Octets" }, + { 0, 14, 2, "EtherStatsPkts65to127Octets" }, + { 0, 16, 2, "EtherStatsPkts128to255Octets" }, + { 0, 18, 2, "EtherStatsPkts256to511Octets" }, + { 0, 20, 2, "EtherStatsPkts512to1023Octets" }, + { 0, 22, 2, "EtherStatsPkts1024to1518Octets" }, + { 0, 24, 2, "EtherOversizeStats" }, + { 0, 26, 2, "EtherStatsJabbers" }, + { 0, 28, 2, "IfInUcastPkts" }, + { 0, 30, 2, "EtherStatsMulticastPkts" }, + { 0, 32, 2, "EtherStatsBroadcastPkts" }, + { 0, 34, 2, "EtherStatsDropEvents" }, + { 0, 36, 2, "Dot3StatsFCSErrors" }, + { 0, 38, 2, "Dot3StatsSymbolErrors" }, + { 0, 40, 2, "Dot3InPauseFrames" }, + { 0, 42, 2, "Dot3ControlInUnknownOpcodes" }, + { 0, 44, 4, "IfOutOctets" }, + { 0, 48, 2, "Dot3StatsSingleCollisionFrames" }, + { 0, 50, 2, "Dot3StatMultipleCollisionFrames" }, + { 0, 52, 2, "Dot3sDeferredTransmissions" }, + { 0, 54, 2, "Dot3StatsLateCollisions" }, + { 0, 56, 2, "EtherStatsCollisions" }, + { 0, 58, 2, "Dot3StatsExcessiveCollisions" }, + { 0, 60, 2, "Dot3OutPauseFrames" }, + { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" }, + { 0, 64, 2, "Dot1dTpPortInDiscards" }, + { 0, 66, 2, "IfOutUcastPkts" }, + { 0, 68, 2, "IfOutMulticastPkts" }, + { 0, 70, 2, "IfOutBroadcastPkts" }, +}; + +static int rtl8366rb_get_mib_counter(struct realtek_smi *smi, + int port, + struct rtl8366_mib_counter *mib, + u64 *mibvalue) +{ + u32 addr, val; + int ret; + int i; + + addr = RTL8366RB_MIB_COUNTER_BASE + + RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) + + mib->offset; + + /* Writing access counter address first + * then ASIC will prepare 64bits counter wait for being retrived + */ + ret = regmap_write(smi->map, addr, 0); /* Write whatever */ + if (ret) + return ret; + + /* Read MIB control register */ + ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val); + if (ret) + return -EIO; + + if (val & RTL8366RB_MIB_CTRL_BUSY_MASK) + return -EBUSY; + + if (val & RTL8366RB_MIB_CTRL_RESET_MASK) + return -EIO; + + /* Read each individual MIB 16 bits at the time */ + *mibvalue = 0; + for (i = mib->length; i > 0; i--) { + ret = regmap_read(smi->map, addr + (i - 1), &val); + if (ret) + return ret; + *mibvalue = (*mibvalue << 16) | (val & 0xFFFF); + } + return 0; +} + +static u32 rtl8366rb_get_irqmask(struct irq_data *d) +{ + int line = irqd_to_hwirq(d); + u32 val; + + /* For line interrupts we combine link down in bits + * 6..11 with link up in bits 0..5 into one interrupt. + */ + if (line < 12) + val = BIT(line) | BIT(line + 6); + else + val = BIT(line); + return val; +} + +static void rtl8366rb_mask_irq(struct irq_data *d) +{ + struct realtek_smi *smi = irq_data_get_irq_chip_data(d); + int ret; + + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, + rtl8366rb_get_irqmask(d), 0); + if (ret) + dev_err(smi->dev, "could not mask IRQ\n"); +} + +static void rtl8366rb_unmask_irq(struct irq_data *d) +{ + struct realtek_smi *smi = irq_data_get_irq_chip_data(d); + int ret; + + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, + rtl8366rb_get_irqmask(d), + rtl8366rb_get_irqmask(d)); + if (ret) + dev_err(smi->dev, "could not unmask IRQ\n"); +} + +static irqreturn_t rtl8366rb_irq(int irq, void *data) +{ + struct realtek_smi *smi = data; + u32 stat; + int ret; + + /* This clears the IRQ status register */ + ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, + &stat); + if (ret) { + dev_err(smi->dev, "can't read interrupt status\n"); + return IRQ_NONE; + } + stat &= RTL8366RB_INTERRUPT_VALID; + if (!stat) + return IRQ_NONE; + while (stat) { + int line = __ffs(stat); + int child_irq; + + stat &= ~BIT(line); + /* For line interrupts we combine link down in bits + * 6..11 with link up in bits 0..5 into one interrupt. + */ + if (line < 12 && line > 5) + line -= 5; + child_irq = irq_find_mapping(smi->irqdomain, line); + handle_nested_irq(child_irq); + } + return IRQ_HANDLED; +} + +static struct irq_chip rtl8366rb_irq_chip = { + .name = "RTL8366RB", + .irq_mask = rtl8366rb_mask_irq, + .irq_unmask = rtl8366rb_unmask_irq, +}; + +static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, domain->host_data); + irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq); + irq_set_nested_thread(irq, 1); + irq_set_noprobe(irq); + + return 0; +} + +static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_set_nested_thread(irq, 0); + irq_set_chip_and_handler(irq, NULL, NULL); + irq_set_chip_data(irq, NULL); +} + +static const struct irq_domain_ops rtl8366rb_irqdomain_ops = { + .map = rtl8366rb_irq_map, + .unmap = rtl8366rb_irq_unmap, + .xlate = irq_domain_xlate_onecell, +}; + +static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) +{ + struct device_node *intc; + unsigned long irq_trig; + int irq; + int ret; + u32 val; + int i; + + intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller"); + if (!intc) { + dev_err(smi->dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + /* RB8366RB IRQs cascade off this one */ + irq = of_irq_get(intc, 0); + if (irq <= 0) { + dev_err(smi->dev, "failed to get parent IRQ\n"); + return irq ? irq : -EINVAL; + } + + /* This clears the IRQ status register */ + ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, + &val); + if (ret) { + dev_err(smi->dev, "can't read interrupt status\n"); + return ret; + } + + /* Fetch IRQ edge information from the descriptor */ + irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + switch (irq_trig) { + case IRQF_TRIGGER_RISING: + case IRQF_TRIGGER_HIGH: + dev_info(smi->dev, "active high/rising IRQ\n"); + val = 0; + break; + case IRQF_TRIGGER_FALLING: + case IRQF_TRIGGER_LOW: + dev_info(smi->dev, "active low/falling IRQ\n"); + val = RTL8366RB_INTERRUPT_POLARITY; + break; + } + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_INTERRUPT_POLARITY, + val); + if (ret) { + dev_err(smi->dev, "could not configure IRQ polarity\n"); + return ret; + } + + ret = devm_request_threaded_irq(smi->dev, irq, NULL, + rtl8366rb_irq, IRQF_ONESHOT, + "RTL8366RB", smi); + if (ret) { + dev_err(smi->dev, "unable to request irq: %d\n", ret); + return ret; + } + smi->irqdomain = irq_domain_add_linear(intc, + RTL8366RB_NUM_INTERRUPT, + &rtl8366rb_irqdomain_ops, + smi); + if (!smi->irqdomain) { + dev_err(smi->dev, "failed to create IRQ domain\n"); + return -EINVAL; + } + for (i = 0; i < smi->num_ports; i++) + irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq); + + return 0; +} + +static int rtl8366rb_set_addr(struct realtek_smi *smi) +{ + u8 addr[ETH_ALEN]; + u16 val; + int ret; + + eth_random_addr(addr); + + dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + val = addr[0] << 8 | addr[1]; + ret = regmap_write(smi->map, RTL8366RB_SMAR0, val); + if (ret) + return ret; + val = addr[2] << 8 | addr[3]; + ret = regmap_write(smi->map, RTL8366RB_SMAR1, val); + if (ret) + return ret; + val = addr[4] << 8 | addr[5]; + ret = regmap_write(smi->map, RTL8366RB_SMAR2, val); + if (ret) + return ret; + + return 0; +} + +/* Found in a vendor driver */ + +/* For the "version 0" early silicon, appear in most source releases */ +static const u16 rtl8366rb_init_jam_ver_0[] = { + 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF, + 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF, + 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688, + 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830, + 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8, + 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000, + 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A, + 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A, + 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A, + 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A, + 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A, + 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A, + 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961, + 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146, + 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146, + 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320, + 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146, + 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146, + 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146, + 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294, + 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294, + 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294, + 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294, + 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448, + 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B, + 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD, + 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C, + 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13, + 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF, +}; + +/* This v1 init sequence is from Belkin F5D8235 U-Boot release */ +static const u16 rtl8366rb_init_jam_ver_1[] = { + 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C, + 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, + 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585, + 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135, + 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F, + 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20, + 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000, + 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140, + 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A, + 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0, + 0xBE79, 0x3C3C, 0xBE00, 0x1340, +}; + +/* This v2 init sequence is from Belkin F5D8235 U-Boot release */ +static const u16 rtl8366rb_init_jam_ver_2[] = { + 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432, + 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14, + 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85, + 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881, + 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885, + 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001, + 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320, + 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120, + 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007, + 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140, + 0xBE00, 0x1340, 0x0F51, 0x0010, +}; + +/* Appears in a DDWRT code dump */ +static const u16 rtl8366rb_init_jam_ver_3[] = { + 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432, + 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, + 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B, + 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B, + 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185, + 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585, + 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185, + 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51, + 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, + 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, + 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, + 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001, + 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320, + 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120, + 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140, + 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, + 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000, +}; + +/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */ +static const u16 rtl8366rb_init_jam_f5d8235[] = { + 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF, + 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F, + 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F, + 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F, + 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3, + 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304, + 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500, + 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00, + 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF, +}; + +/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */ +static const u16 rtl8366rb_init_jam_dgn3500[] = { + 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017, + 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000, + 0x0401, 0x0000, 0x0431, 0x0960, +}; + +/* This jam table activates "green ethernet", which means low power mode + * and is claimed to detect the cable length and not use more power than + * necessary, and the ports should enter power saving mode 10 seconds after + * a cable is disconnected. Seems to always be the same. + */ +static const u16 rtl8366rb_green_jam[][2] = { + {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7}, + {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C}, + {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C}, +}; + +static int rtl8366rb_setup(struct dsa_switch *ds) +{ + struct realtek_smi *smi = ds->priv; + const u16 *jam_table; + u32 chip_ver = 0; + u32 chip_id = 0; + int jam_size; + u32 val; + int ret; + int i; + + ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id); + if (ret) { + dev_err(smi->dev, "unable to read chip id\n"); + return ret; + } + + switch (chip_id) { + case RTL8366RB_CHIP_ID_8366: + break; + default: + dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id); + return -ENODEV; + } + + ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG, + &chip_ver); + if (ret) { + dev_err(smi->dev, "unable to read chip version\n"); + return ret; + } + + dev_info(smi->dev, "RTL%04x ver %u chip found\n", + chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK); + + /* Do the init dance using the right jam table */ + switch (chip_ver) { + case 0: + jam_table = rtl8366rb_init_jam_ver_0; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0); + break; + case 1: + jam_table = rtl8366rb_init_jam_ver_1; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1); + break; + case 2: + jam_table = rtl8366rb_init_jam_ver_2; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2); + break; + default: + jam_table = rtl8366rb_init_jam_ver_3; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3); + break; + } + + /* Special jam tables for special routers + * TODO: are these necessary? Maintainers, please test + * without them, using just the off-the-shelf tables. + */ + if (of_machine_is_compatible("belkin,f5d8235-v1")) { + jam_table = rtl8366rb_init_jam_f5d8235; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235); + } + if (of_machine_is_compatible("netgear,dgn3500") || + of_machine_is_compatible("netgear,dgn3500b")) { + jam_table = rtl8366rb_init_jam_dgn3500; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500); + } + + i = 0; + while (i < jam_size) { + if ((jam_table[i] & 0xBE00) == 0xBE00) { + ret = regmap_read(smi->map, + RTL8366RB_PHY_ACCESS_BUSY_REG, + &val); + if (ret) + return ret; + if (!(val & RTL8366RB_PHY_INT_BUSY)) { + ret = regmap_write(smi->map, + RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + } + } + dev_dbg(smi->dev, "jam %04x into register %04x\n", + jam_table[i + 1], + jam_table[i]); + ret = regmap_write(smi->map, + jam_table[i], + jam_table[i + 1]); + if (ret) + return ret; + i += 2; + } + + /* Set up the "green ethernet" feature */ + i = 0; + while (i < ARRAY_SIZE(rtl8366rb_green_jam)) { + ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG, + &val); + if (ret) + return ret; + if (!(val & RTL8366RB_PHY_INT_BUSY)) { + ret = regmap_write(smi->map, + RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + ret = regmap_write(smi->map, + rtl8366rb_green_jam[i][0], + rtl8366rb_green_jam[i][1]); + if (ret) + return ret; + i++; + } + } + ret = regmap_write(smi->map, + RTL8366RB_GREEN_FEATURE_REG, + (chip_ver == 1) ? 0x0007 : 0x0003); + if (ret) + return ret; + + /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */ + ret = regmap_write(smi->map, 0x0c, 0x240); + if (ret) + return ret; + ret = regmap_write(smi->map, 0x0d, 0x240); + if (ret) + return ret; + + /* Set some random MAC address */ + ret = rtl8366rb_set_addr(smi); + if (ret) + return ret; + + /* Enable CPU port and enable inserting CPU tag + * + * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour + * of the switch totally and it will start talking Realtek RRCP + * internally. It is probably possible to experiment with this, + * but then the kernel needs to understand and handle RRCP first. + */ + ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG, + 0xFFFF, + RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port)); + if (ret) + return ret; + + /* Make sure we default-enable the fixed CPU port */ + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, + BIT(smi->cpu_port), + 0); + if (ret) + return ret; + + /* Set maximum packet length to 1536 bytes */ + ret = regmap_update_bits(smi->map, RTL8366RB_SGCR, + RTL8366RB_SGCR_MAX_LENGTH_MASK, + RTL8366RB_SGCR_MAX_LENGTH_1536); + if (ret) + return ret; + + /* Enable learning for all ports */ + ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0); + if (ret) + return ret; + + /* Enable auto ageing for all ports */ + ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0); + if (ret) + return ret; + + /* Discard VLAN tagged packets if the port is not a member of + * the VLAN with which the packets is associated. + */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, + RTL8366RB_PORT_ALL); + if (ret) + return ret; + + /* Don't drop packets whose DA has not been learned */ + ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2, + RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0); + if (ret) + return ret; + + /* Set blinking, TODO: make this configurable */ + ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG, + RTL8366RB_LED_BLINKRATE_MASK, + RTL8366RB_LED_BLINKRATE_56MS); + if (ret) + return ret; + + /* Set up LED activity: + * Each port has 4 LEDs, we configure all ports to the same + * behaviour (no individual config) but we can set up each + * LED separately. + */ + if (smi->leds_disabled) { + /* Turn everything off */ + regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x0FFF, 0); + regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x0FFF, 0); + regmap_update_bits(smi->map, + RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_P4_RGMII_LED, + 0); + val = RTL8366RB_LED_OFF; + } else { + /* TODO: make this configurable per LED */ + val = RTL8366RB_LED_FORCE; + } + for (i = 0; i < 4; i++) { + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_CTRL_REG, + 0xf << (i * 4), + val << (i * 4)); + if (ret) + return ret; + } + + ret = rtl8366_init_vlan(smi); + if (ret) + return ret; + + ret = rtl8366rb_setup_cascaded_irq(smi); + if (ret) + dev_info(smi->dev, "no interrupt support\n"); + + ret = realtek_smi_setup_mdio(smi); + if (ret) { + dev_info(smi->dev, "could not set up MDIO bus\n"); + return -ENODEV; + } + + return 0; +} + +static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + /* For now, the RTL switches are handled without any custom tags. + * + * It is possible to turn on "custom tags" by removing the + * RTL8368RB_CPU_INSTAG flag when enabling the port but what it + * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek + * Remote Control Protocol (RRCP) start to appear on the CPU port of + * the device. So this is not the ordinary few extra bytes in the + * frame. Instead it appears that the switch starts to talk Realtek + * RRCP internally which means a pretty complex RRCP implementation + * decoding and responding the RRCP protocol is needed to exploit this. + * + * The OpenRRCP project (dormant since 2009) have reverse-egineered + * parts of the protocol. + */ + return DSA_TAG_PROTO_NONE; +} + +static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (port != smi->cpu_port) + return; + + dev_info(smi->dev, "adjust link on CPU port (%d)\n", port); + + /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */ + ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG, + BIT(port), BIT(port)); + if (ret) + return; + + ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2, + 0xFF00U, + RTL8366RB_PAACR_CPU_PORT << 8); + if (ret) + return; + + /* Enable the CPU port */ + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + 0); + if (ret) + return; +} + +static void rb8366rb_set_port_led(struct realtek_smi *smi, + int port, bool enable) +{ + u16 val = enable ? 0x3f : 0; + int ret; + + if (smi->leds_disabled) + return; + + switch (port) { + case 0: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x3F, val); + break; + case 1: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x3F << RTL8366RB_LED_1_OFFSET, + val << RTL8366RB_LED_1_OFFSET); + break; + case 2: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x3F, val); + break; + case 3: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x3F << RTL8366RB_LED_3_OFFSET, + val << RTL8366RB_LED_3_OFFSET); + break; + case 4: + ret = regmap_update_bits(smi->map, + RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_P4_RGMII_LED, + enable ? RTL8366RB_P4_RGMII_LED : 0); + break; + default: + dev_err(smi->dev, "no LED for port %d\n", port); + return; + } + if (ret) + dev_err(smi->dev, "error updating LED on port %d\n", port); +} + +static int +rtl8366rb_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + dev_dbg(smi->dev, "enable port %d\n", port); + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + 0); + if (ret) + return ret; + + rb8366rb_set_port_led(smi, port, true); + return 0; +} + +static void +rtl8366rb_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + dev_dbg(smi->dev, "disable port %d\n", port); + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + BIT(port)); + if (ret) + return; + + rb8366rb_set_port_led(smi, port, false); +} + +static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid, + struct rtl8366_vlan_4k *vlan4k) +{ + u32 data[3]; + int ret; + int i; + + memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k)); + + if (vid >= RTL8366RB_NUM_VIDS) + return -EINVAL; + + /* write VID */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE, + vid & RTL8366RB_VLAN_VID_MASK); + if (ret) + return ret; + + /* write table access control word */ + ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, + RTL8366RB_TABLE_VLAN_READ_CTRL); + if (ret) + return ret; + + for (i = 0; i < 3; i++) { + ret = regmap_read(smi->map, + RTL8366RB_VLAN_TABLE_READ_BASE + i, + &data[i]); + if (ret) + return ret; + } + + vlan4k->vid = vid; + vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) & + RTL8366RB_VLAN_UNTAG_MASK; + vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK; + vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK; + + return 0; +} + +static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi, + const struct rtl8366_vlan_4k *vlan4k) +{ + u32 data[3]; + int ret; + int i; + + if (vlan4k->vid >= RTL8366RB_NUM_VIDS || + vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK || + vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK || + vlan4k->fid > RTL8366RB_FIDMAX) + return -EINVAL; + + data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK; + data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) | + ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) << + RTL8366RB_VLAN_UNTAG_SHIFT); + data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK; + + for (i = 0; i < 3; i++) { + ret = regmap_write(smi->map, + RTL8366RB_VLAN_TABLE_WRITE_BASE + i, + data[i]); + if (ret) + return ret; + } + + /* write table access control word */ + ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, + RTL8366RB_TABLE_VLAN_WRITE_CTRL); + + return ret; +} + +static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index, + struct rtl8366_vlan_mc *vlanmc) +{ + u32 data[3]; + int ret; + int i; + + memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc)); + + if (index >= RTL8366RB_NUM_VLANS) + return -EINVAL; + + for (i = 0; i < 3; i++) { + ret = regmap_read(smi->map, + RTL8366RB_VLAN_MC_BASE(index) + i, + &data[i]); + if (ret) + return ret; + } + + vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK; + vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) & + RTL8366RB_VLAN_PRIORITY_MASK; + vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) & + RTL8366RB_VLAN_UNTAG_MASK; + vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK; + vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK; + + return 0; +} + +static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index, + const struct rtl8366_vlan_mc *vlanmc) +{ + u32 data[3]; + int ret; + int i; + + if (index >= RTL8366RB_NUM_VLANS || + vlanmc->vid >= RTL8366RB_NUM_VIDS || + vlanmc->priority > RTL8366RB_PRIORITYMAX || + vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK || + vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK || + vlanmc->fid > RTL8366RB_FIDMAX) + return -EINVAL; + + data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) | + ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) << + RTL8366RB_VLAN_PRIORITY_SHIFT); + data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) | + ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) << + RTL8366RB_VLAN_UNTAG_SHIFT); + data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK; + + for (i = 0; i < 3; i++) { + ret = regmap_write(smi->map, + RTL8366RB_VLAN_MC_BASE(index) + i, + data[i]); + if (ret) + return ret; + } + + return 0; +} + +static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val) +{ + u32 data; + int ret; + + if (port >= smi->num_ports) + return -EINVAL; + + ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + &data); + if (ret) + return ret; + + *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) & + RTL8366RB_PORT_VLAN_CTRL_MASK; + + return 0; +} + +static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index) +{ + if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS) + return -EINVAL; + + return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + RTL8366RB_PORT_VLAN_CTRL_MASK << + RTL8366RB_PORT_VLAN_CTRL_SHIFT(port), + (index & RTL8366RB_PORT_VLAN_CTRL_MASK) << + RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)); +} + +static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) +{ + unsigned int max = RTL8366RB_NUM_VLANS; + + if (smi->vlan4k_enabled) + max = RTL8366RB_NUM_VIDS - 1; + + if (vlan == 0 || vlan >= max) + return false; + + return true; +} + +static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable) +{ + dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable"); + return regmap_update_bits(smi->map, + RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN, + enable ? RTL8366RB_SGCR_EN_VLAN : 0); +} + +static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable) +{ + dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable"); + return regmap_update_bits(smi->map, RTL8366RB_SGCR, + RTL8366RB_SGCR_EN_VLAN_4KTB, + enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0); +} + +static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum) +{ + u32 val; + u32 reg; + int ret; + + if (phy > RTL8366RB_PHY_NO_MAX) + return -EINVAL; + + ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_READ); + if (ret) + return ret; + + reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; + + ret = regmap_write(smi->map, reg, 0); + if (ret) { + dev_err(smi->dev, + "failed to write PHY%d reg %04x @ %04x, ret %d\n", + phy, regnum, reg, ret); + return ret; + } + + ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val); + if (ret) + return ret; + + dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n", + phy, regnum, reg, val); + + return val; +} + +static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum, + u16 val) +{ + u32 reg; + int ret; + + if (phy > RTL8366RB_PHY_NO_MAX) + return -EINVAL; + + ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + + reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; + + dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n", + phy, regnum, reg, val); + + ret = regmap_write(smi->map, reg, val); + if (ret) + return ret; + + return 0; +} + +static int rtl8366rb_reset_chip(struct realtek_smi *smi) +{ + int timeout = 10; + u32 val; + int ret; + + realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG, + RTL8366RB_CHIP_CTRL_RESET_HW); + do { + usleep_range(20000, 25000); + ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val); + if (ret) + return ret; + + if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW)) + break; + } while (--timeout); + + if (!timeout) { + dev_err(smi->dev, "timeout waiting for the switch to reset\n"); + return -EIO; + } + + return 0; +} + +static int rtl8366rb_detect(struct realtek_smi *smi) +{ + struct device *dev = smi->dev; + int ret; + u32 val; + + /* Detect device */ + ret = regmap_read(smi->map, 0x5c, &val); + if (ret) { + dev_err(dev, "can't get chip ID (%d)\n", ret); + return ret; + } + + switch (val) { + case 0x6027: + dev_info(dev, "found an RTL8366S switch\n"); + dev_err(dev, "this switch is not yet supported, submit patches!\n"); + return -ENODEV; + case 0x5937: + dev_info(dev, "found an RTL8366RB switch\n"); + smi->cpu_port = RTL8366RB_PORT_NUM_CPU; + smi->num_ports = RTL8366RB_NUM_PORTS; + smi->num_vlan_mc = RTL8366RB_NUM_VLANS; + smi->mib_counters = rtl8366rb_mib_counters; + smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters); + break; + default: + dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n", + val); + break; + } + + ret = rtl8366rb_reset_chip(smi); + if (ret) + return ret; + + return 0; +} + +static const struct dsa_switch_ops rtl8366rb_switch_ops = { + .get_tag_protocol = rtl8366_get_tag_protocol, + .setup = rtl8366rb_setup, + .adjust_link = rtl8366rb_adjust_link, + .get_strings = rtl8366_get_strings, + .get_ethtool_stats = rtl8366_get_ethtool_stats, + .get_sset_count = rtl8366_get_sset_count, + .port_vlan_filtering = rtl8366_vlan_filtering, + .port_vlan_prepare = rtl8366_vlan_prepare, + .port_vlan_add = rtl8366_vlan_add, + .port_vlan_del = rtl8366_vlan_del, + .port_enable = rtl8366rb_port_enable, + .port_disable = rtl8366rb_port_disable, +}; + +static const struct realtek_smi_ops rtl8366rb_smi_ops = { + .detect = rtl8366rb_detect, + .get_vlan_mc = rtl8366rb_get_vlan_mc, + .set_vlan_mc = rtl8366rb_set_vlan_mc, + .get_vlan_4k = rtl8366rb_get_vlan_4k, + .set_vlan_4k = rtl8366rb_set_vlan_4k, + .get_mc_index = rtl8366rb_get_mc_index, + .set_mc_index = rtl8366rb_set_mc_index, + .get_mib_counter = rtl8366rb_get_mib_counter, + .is_vlan_valid = rtl8366rb_is_vlan_valid, + .enable_vlan = rtl8366rb_enable_vlan, + .enable_vlan4k = rtl8366rb_enable_vlan4k, + .phy_read = rtl8366rb_phy_read, + .phy_write = rtl8366rb_phy_write, +}; + +const struct realtek_smi_variant rtl8366rb_variant = { + .ds_ops = &rtl8366rb_switch_ops, + .ops = &rtl8366rb_smi_ops, + .clk_delay = 10, + .cmd_read = 0xa9, + .cmd_write = 0xa8, +}; +EXPORT_SYMBOL_GPL(rtl8366rb_variant); diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c new file mode 100644 index 000000000000..9f1b5f2e8a64 --- /dev/null +++ b/drivers/net/dsa/vitesse-vsc73xx.c @@ -0,0 +1,1365 @@ +// SPDX-License-Identifier: GPL-2.0 +/* DSA driver for: + * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch + * + * These switches have a built-in 8051 CPU and can download and execute a + * firmware in this CPU. They can also be configured to use an external CPU + * handling the switch in a memory-mapped manner by connecting to that external + * CPU's memory bus. + * + * This driver (currently) only takes control of the switch chip over SPI and + * configures it to route packages around when connected to a CPU port. The + * chip has embedded PHYs and VLAN support so we model it using DSA. + * + * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org> + * Includes portions of code from the firmware uploader by: + * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/bitops.h> +#include <linux/if_bridge.h> +#include <linux/etherdevice.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/random.h> +#include <net/dsa.h> + +#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */ +#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */ +#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */ +#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */ +#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */ +#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */ +#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */ + +#define CPU_PORT 6 /* CPU port */ + +/* MAC Block registers */ +#define VSC73XX_MAC_CFG 0x00 +#define VSC73XX_MACHDXGAP 0x02 +#define VSC73XX_FCCONF 0x04 +#define VSC73XX_FCMACHI 0x08 +#define VSC73XX_FCMACLO 0x0c +#define VSC73XX_MAXLEN 0x10 +#define VSC73XX_ADVPORTM 0x19 +#define VSC73XX_TXUPDCFG 0x24 +#define VSC73XX_TXQ_SELECT_CFG 0x28 +#define VSC73XX_RXOCT 0x50 +#define VSC73XX_TXOCT 0x51 +#define VSC73XX_C_RX0 0x52 +#define VSC73XX_C_RX1 0x53 +#define VSC73XX_C_RX2 0x54 +#define VSC73XX_C_TX0 0x55 +#define VSC73XX_C_TX1 0x56 +#define VSC73XX_C_TX2 0x57 +#define VSC73XX_C_CFG 0x58 +#define VSC73XX_CAT_DROP 0x6e +#define VSC73XX_CAT_PR_MISC_L2 0x6f +#define VSC73XX_CAT_PR_USR_PRIO 0x75 +#define VSC73XX_Q_MISC_CONF 0xdf + +/* MAC_CFG register bits */ +#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31) +#define VSC73XX_MAC_CFG_PORT_RST BIT(29) +#define VSC73XX_MAC_CFG_TX_EN BIT(28) +#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27) +#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19) +#define VSC73XX_MAC_CFG_SEED_OFFSET 19 +#define VSC73XX_MAC_CFG_FDX BIT(18) +#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17) +#define VSC73XX_MAC_CFG_RX_EN BIT(16) +#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15) +#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14) +#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */ +#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6) +#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6 +#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET) +#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET) +#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5) +#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4) +#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0) +#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0 +#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1 +#define VSC73XX_MAC_CFG_CLK_SEL_100M 2 +#define VSC73XX_MAC_CFG_CLK_SEL_10M 3 +#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4 + +#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_GIGA_MODE | \ + VSC73XX_MAC_CFG_TX_IPG_1000M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_TX_IPG_100_10M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_GIGA_MODE | \ + VSC73XX_MAC_CFG_TX_IPG_1000M | \ + VSC73XX_MAC_CFG_CLK_SEL_1000M) +#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \ + VSC73XX_MAC_CFG_MAC_RX_RST | \ + VSC73XX_MAC_CFG_MAC_TX_RST) + +/* Flow control register bits */ +#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17) +#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16) +#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0) + +/* ADVPORTM advanced port setup register bits */ +#define VSC73XX_ADVPORTM_IFG_PPM BIT(7) +#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6) +#define VSC73XX_ADVPORTM_EXT_PORT BIT(5) +#define VSC73XX_ADVPORTM_INV_GTX BIT(4) +#define VSC73XX_ADVPORTM_ENA_GTX BIT(3) +#define VSC73XX_ADVPORTM_DDR_MODE BIT(2) +#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1) +#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0) + +/* CAT_DROP categorizer frame dropping register bits */ +#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6) +#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4) +#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3) +#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2) +#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1) +#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0) + +#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31) +#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1) +#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1) +#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0) + +/* Frame analyzer block 2 registers */ +#define VSC73XX_STORMLIMIT 0x02 +#define VSC73XX_ADVLEARN 0x03 +#define VSC73XX_IFLODMSK 0x04 +#define VSC73XX_VLANMASK 0x05 +#define VSC73XX_MACHDATA 0x06 +#define VSC73XX_MACLDATA 0x07 +#define VSC73XX_ANMOVED 0x08 +#define VSC73XX_ANAGEFIL 0x09 +#define VSC73XX_ANEVENTS 0x0a +#define VSC73XX_ANCNTMASK 0x0b +#define VSC73XX_ANCNTVAL 0x0c +#define VSC73XX_LEARNMASK 0x0d +#define VSC73XX_UFLODMASK 0x0e +#define VSC73XX_MFLODMASK 0x0f +#define VSC73XX_RECVMASK 0x10 +#define VSC73XX_AGGRCTRL 0x20 +#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */ +#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */ +#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */ +#define VSC73XX_CAPENAB 0xa0 +#define VSC73XX_MACACCESS 0xb0 +#define VSC73XX_IPMCACCESS 0xb1 +#define VSC73XX_MACTINDX 0xc0 +#define VSC73XX_VLANACCESS 0xd0 +#define VSC73XX_VLANTIDX 0xe0 +#define VSC73XX_AGENCTRL 0xf0 +#define VSC73XX_CAPRST 0xff + +#define VSC73XX_MACACCESS_CPU_COPY BIT(14) +#define VSC73XX_MACACCESS_FWD_KILL BIT(13) +#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12) +#define VSC73XX_MACACCESS_AGED_FLAG BIT(11) +#define VSC73XX_MACACCESS_VALID BIT(10) +#define VSC73XX_MACACCESS_LOCKED BIT(9) +#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3) +#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0) +#define VSC73XX_MACACCESS_CMD_IDLE 0 +#define VSC73XX_MACACCESS_CMD_LEARN 1 +#define VSC73XX_MACACCESS_CMD_FORGET 2 +#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3 +#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4 +#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5 +#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6 +#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7 + +#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30) +#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29) +#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28) +#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2) +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0) +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3 + +/* MII block 3 registers */ +#define VSC73XX_MII_STAT 0x0 +#define VSC73XX_MII_CMD 0x1 +#define VSC73XX_MII_DATA 0x2 + +/* Arbiter block 5 registers */ +#define VSC73XX_ARBEMPTY 0x0c +#define VSC73XX_ARBDISC 0x0e +#define VSC73XX_SBACKWDROP 0x12 +#define VSC73XX_DBACKWDROP 0x13 +#define VSC73XX_ARBBURSTPROB 0x15 + +/* System block 7 registers */ +#define VSC73XX_ICPU_SIPAD 0x01 +#define VSC73XX_GMIIDELAY 0x05 +#define VSC73XX_ICPU_CTRL 0x10 +#define VSC73XX_ICPU_ADDR 0x11 +#define VSC73XX_ICPU_SRAM 0x12 +#define VSC73XX_HWSEM 0x13 +#define VSC73XX_GLORESET 0x14 +#define VSC73XX_ICPU_MBOX_VAL 0x15 +#define VSC73XX_ICPU_MBOX_SET 0x16 +#define VSC73XX_ICPU_MBOX_CLR 0x17 +#define VSC73XX_CHIPID 0x18 +#define VSC73XX_GPIO 0x34 + +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3 + +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4) + +#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31) +#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8) +#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7) +#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6) +#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3) +#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2) +#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1) +#define VSC73XX_ICPU_CTRL_SRST BIT(0) + +#define VSC73XX_CHIPID_ID_SHIFT 12 +#define VSC73XX_CHIPID_ID_MASK 0xffff +#define VSC73XX_CHIPID_REV_SHIFT 28 +#define VSC73XX_CHIPID_REV_MASK 0xf +#define VSC73XX_CHIPID_ID_7385 0x7385 +#define VSC73XX_CHIPID_ID_7388 0x7388 +#define VSC73XX_CHIPID_ID_7395 0x7395 +#define VSC73XX_CHIPID_ID_7398 0x7398 + +#define VSC73XX_GLORESET_STROBE BIT(4) +#define VSC73XX_GLORESET_ICPU_LOCK BIT(3) +#define VSC73XX_GLORESET_MEM_LOCK BIT(2) +#define VSC73XX_GLORESET_PHY_RESET BIT(1) +#define VSC73XX_GLORESET_MASTER_RESET BIT(0) + +#define VSC73XX_CMD_MODE_READ 0 +#define VSC73XX_CMD_MODE_WRITE 1 +#define VSC73XX_CMD_MODE_SHIFT 4 +#define VSC73XX_CMD_BLOCK_SHIFT 5 +#define VSC73XX_CMD_BLOCK_MASK 0x7 +#define VSC73XX_CMD_SUBBLOCK_MASK 0xf + +#define VSC7385_CLOCK_DELAY ((3 << 4) | 3) +#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3) + +#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \ + VSC73XX_ICPU_CTRL_BOOT_EN | \ + VSC73XX_ICPU_CTRL_EXT_ACC_EN) + +#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \ + VSC73XX_ICPU_CTRL_BOOT_EN | \ + VSC73XX_ICPU_CTRL_CLK_EN | \ + VSC73XX_ICPU_CTRL_SRST) + +/** + * struct vsc73xx - VSC73xx state container + */ +struct vsc73xx { + struct device *dev; + struct gpio_desc *reset; + struct spi_device *spi; + struct dsa_switch *ds; + struct gpio_chip gc; + u16 chipid; + u8 addr[ETH_ALEN]; + struct mutex lock; /* Protects SPI traffic */ +}; + +#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385) +#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388) +#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395) +#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398) +#define IS_739X(a) (IS_7395(a) || IS_7398(a)) + +struct vsc73xx_counter { + u8 counter; + const char *name; +}; + +/* Counters are named according to the MIB standards where applicable. + * Some counters are custom, non-standard. The standard counters are + * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex + * 30A Counters. + */ +static const struct vsc73xx_counter vsc73xx_rx_counters[] = { + { 0, "RxEtherStatsPkts" }, + { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */ + { 2, "RxTotalErrorPackets" }, /* non-standard counter */ + { 3, "RxEtherStatsBroadcastPkts" }, + { 4, "RxEtherStatsMulticastPkts" }, + { 5, "RxEtherStatsPkts64Octets" }, + { 6, "RxEtherStatsPkts65to127Octets" }, + { 7, "RxEtherStatsPkts128to255Octets" }, + { 8, "RxEtherStatsPkts256to511Octets" }, + { 9, "RxEtherStatsPkts512to1023Octets" }, + { 10, "RxEtherStatsPkts1024to1518Octets" }, + { 11, "RxJumboFrames" }, /* non-standard counter */ + { 12, "RxaPauseMACControlFramesTransmitted" }, + { 13, "RxFIFODrops" }, /* non-standard counter */ + { 14, "RxBackwardDrops" }, /* non-standard counter */ + { 15, "RxClassifierDrops" }, /* non-standard counter */ + { 16, "RxEtherStatsCRCAlignErrors" }, + { 17, "RxEtherStatsUndersizePkts" }, + { 18, "RxEtherStatsOversizePkts" }, + { 19, "RxEtherStatsFragments" }, + { 20, "RxEtherStatsJabbers" }, + { 21, "RxaMACControlFramesReceived" }, + /* 22-24 are undefined */ + { 25, "RxaFramesReceivedOK" }, + { 26, "RxQoSClass0" }, /* non-standard counter */ + { 27, "RxQoSClass1" }, /* non-standard counter */ + { 28, "RxQoSClass2" }, /* non-standard counter */ + { 29, "RxQoSClass3" }, /* non-standard counter */ +}; + +static const struct vsc73xx_counter vsc73xx_tx_counters[] = { + { 0, "TxEtherStatsPkts" }, + { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */ + { 2, "TxTotalErrorPackets" }, /* non-standard counter */ + { 3, "TxEtherStatsBroadcastPkts" }, + { 4, "TxEtherStatsMulticastPkts" }, + { 5, "TxEtherStatsPkts64Octets" }, + { 6, "TxEtherStatsPkts65to127Octets" }, + { 7, "TxEtherStatsPkts128to255Octets" }, + { 8, "TxEtherStatsPkts256to511Octets" }, + { 9, "TxEtherStatsPkts512to1023Octets" }, + { 10, "TxEtherStatsPkts1024to1518Octets" }, + { 11, "TxJumboFrames" }, /* non-standard counter */ + { 12, "TxaPauseMACControlFramesTransmitted" }, + { 13, "TxFIFODrops" }, /* non-standard counter */ + { 14, "TxDrops" }, /* non-standard counter */ + { 15, "TxEtherStatsCollisions" }, + { 16, "TxEtherStatsCRCAlignErrors" }, + { 17, "TxEtherStatsUndersizePkts" }, + { 18, "TxEtherStatsOversizePkts" }, + { 19, "TxEtherStatsFragments" }, + { 20, "TxEtherStatsJabbers" }, + /* 21-24 are undefined */ + { 25, "TxaFramesReceivedOK" }, + { 26, "TxQoSClass0" }, /* non-standard counter */ + { 27, "TxQoSClass1" }, /* non-standard counter */ + { 28, "TxQoSClass2" }, /* non-standard counter */ + { 29, "TxQoSClass3" }, /* non-standard counter */ +}; + +static int vsc73xx_is_addr_valid(u8 block, u8 subblock) +{ + switch (block) { + case VSC73XX_BLOCK_MAC: + switch (subblock) { + case 0 ... 4: + case 6: + return 1; + } + break; + + case VSC73XX_BLOCK_ANALYZER: + case VSC73XX_BLOCK_SYSTEM: + switch (subblock) { + case 0: + return 1; + } + break; + + case VSC73XX_BLOCK_MII: + case VSC73XX_BLOCK_CAPTURE: + case VSC73XX_BLOCK_ARBITER: + switch (subblock) { + case 0 ... 1: + return 1; + } + break; + } + + return 0; +} + +static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock) +{ + u8 ret; + + ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT; + ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT; + ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK; + + return ret; +} + +static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg, + u32 *val) +{ + struct spi_transfer t[2]; + struct spi_message m; + u8 cmd[4]; + u8 buf[4]; + int ret; + + if (!vsc73xx_is_addr_valid(block, subblock)) + return -EINVAL; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = sizeof(buf); + spi_message_add_tail(&t[1], &m); + + cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock); + cmd[1] = reg; + cmd[2] = 0; + cmd[3] = 0; + + mutex_lock(&vsc->lock); + ret = spi_sync(vsc->spi, &m); + mutex_unlock(&vsc->lock); + + if (ret) + return ret; + + *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + + return 0; +} + +static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg, + u32 val) +{ + struct spi_transfer t[2]; + struct spi_message m; + u8 cmd[2]; + u8 buf[4]; + int ret; + + if (!vsc73xx_is_addr_valid(block, subblock)) + return -EINVAL; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = sizeof(buf); + spi_message_add_tail(&t[1], &m); + + cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock); + cmd[1] = reg; + + buf[0] = (val >> 24) & 0xff; + buf[1] = (val >> 16) & 0xff; + buf[2] = (val >> 8) & 0xff; + buf[3] = val & 0xff; + + mutex_lock(&vsc->lock); + ret = spi_sync(vsc->spi, &m); + mutex_unlock(&vsc->lock); + + return ret; +} + +static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock, + u8 reg, u32 mask, u32 val) +{ + u32 tmp, orig; + int ret; + + /* Same read-modify-write algorithm as e.g. regmap */ + ret = vsc73xx_read(vsc, block, subblock, reg, &orig); + if (ret) + return ret; + tmp = orig & ~mask; + tmp |= val & mask; + return vsc73xx_write(vsc, block, subblock, reg, tmp); +} + +static int vsc73xx_detect(struct vsc73xx *vsc) +{ + bool icpu_si_boot_en; + bool icpu_pi_en; + u32 val; + u32 rev; + int ret; + u32 id; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_MBOX_VAL, &val); + if (ret) { + dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret); + return ret; + } + + if (val == 0xffffffff) { + dev_info(vsc->dev, "chip seems dead, assert reset\n"); + gpiod_set_value_cansleep(vsc->reset, 1); + /* Reset pulse should be 20ns minimum, according to datasheet + * table 245, so 10us should be fine + */ + usleep_range(10, 100); + gpiod_set_value_cansleep(vsc->reset, 0); + /* Wait 20ms according to datasheet table 245 */ + msleep(20); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_MBOX_VAL, &val); + if (val == 0xffffffff) { + dev_err(vsc->dev, "seems not to help, giving up\n"); + return -ENODEV; + } + } + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_CHIPID, &val); + if (ret) { + dev_err(vsc->dev, "unable to read chip id (%d)\n", ret); + return ret; + } + + id = (val >> VSC73XX_CHIPID_ID_SHIFT) & + VSC73XX_CHIPID_ID_MASK; + switch (id) { + case VSC73XX_CHIPID_ID_7385: + case VSC73XX_CHIPID_ID_7388: + case VSC73XX_CHIPID_ID_7395: + case VSC73XX_CHIPID_ID_7398: + break; + default: + dev_err(vsc->dev, "unsupported chip, id=%04x\n", id); + return -ENODEV; + } + + vsc->chipid = id; + rev = (val >> VSC73XX_CHIPID_REV_SHIFT) & + VSC73XX_CHIPID_REV_MASK; + dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_CTRL, &val); + if (ret) { + dev_err(vsc->dev, "unable to read iCPU control\n"); + return ret; + } + + /* The iCPU can always be used but can boot in different ways. + * If it is initially disabled and has no external memory, + * we are in control and can do whatever we like, else we + * are probably in trouble (we need some way to communicate + * with the running firmware) so we bail out for now. + */ + icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN); + icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN); + if (icpu_si_boot_en && icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled boots from SI, has external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + if (icpu_si_boot_en && !icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled boots from SI, no external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + if (!icpu_si_boot_en && icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled, boots from PI external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + /* !icpu_si_boot_en && !cpu_pi_en */ + dev_info(vsc->dev, "iCPU disabled, no external memory\n"); + + return 0; +} + +static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) +{ + struct vsc73xx *vsc = ds->priv; + u32 cmd; + u32 val; + int ret; + + /* Setting bit 26 means "read" */ + cmd = BIT(26) | (phy << 21) | (regnum << 16); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; + msleep(2); + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val); + if (ret) + return ret; + if (val & BIT(16)) { + dev_err(vsc->dev, "reading reg %02x from phy%d failed\n", + regnum, phy); + return -EIO; + } + val &= 0xFFFFU; + + dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n", + regnum, phy, val); + + return val; +} + +static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum, + u16 val) +{ + struct vsc73xx *vsc = ds->priv; + u32 cmd; + int ret; + + /* It was found through tedious experiments that this router + * chip really hates to have it's PHYs reset. They + * never recover if that happens: autonegotiation stops + * working after a reset. Just filter out this command. + * (Resetting the whole chip is OK.) + */ + if (regnum == 0 && (val & BIT(15))) { + dev_info(vsc->dev, "reset PHY - disallowed\n"); + return 0; + } + + cmd = (phy << 21) | (regnum << 16); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; + + dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n", + val, regnum, phy); + return 0; +} + +static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + /* The switch internally uses a 8 byte header with length, + * source port, tag, LPA and priority. This is supposedly + * only accessible when operating the switch using the internal + * CPU or with an external CPU mapping the device in, but not + * when operating the switch over SPI and putting frames in/out + * on port 6 (the CPU port). So far we must assume that we + * cannot access the tag. (See "Internal frame header" section + * 3.9.1 in the manual.) + */ + return DSA_TAG_PROTO_NONE; +} + +static int vsc73xx_setup(struct dsa_switch *ds) +{ + struct vsc73xx *vsc = ds->priv; + int i; + + dev_info(vsc->dev, "set up the switch\n"); + + /* Issue RESET */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, + VSC73XX_GLORESET_MASTER_RESET); + usleep_range(125, 200); + + /* Initialize memory, initialize RAM bank 0..15 except 6 and 7 + * This sequence appears in the + * VSC7385 SparX-G5 datasheet section 6.6.1 + * VSC7395 SparX-G5e datasheet section 6.6.1 + * "initialization sequence". + * No explanation is given to the 0x1010400 magic number. + */ + for (i = 0; i <= 15; i++) { + if (i != 6 && i != 7) { + vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT, + 2, + 0, 0x1010400 + i); + mdelay(1); + } + } + mdelay(30); + + /* Clear MAC table */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACACCESS, + VSC73XX_MACACCESS_CMD_CLEAR_TABLE); + + /* Clear VLAN table */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_VLANACCESS, + VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE); + + msleep(40); + + /* Use 20KiB buffers on all ports on VSC7395 + * The VSC7385 has 16KiB buffers and that is the + * default if we don't set this up explicitly. + * Port "31" is "all ports". + */ + if (IS_739X(vsc)) + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f, + VSC73XX_Q_MISC_CONF, + VSC73XX_Q_MISC_CONF_EXTENT_MEM); + + /* Put all ports into reset until enabled */ + for (i = 0; i < 7; i++) { + if (i == 5) + continue; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4, + VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET); + } + + /* MII delay, set both GTX and RX delay to 2 ns */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY, + VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS | + VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS); + /* Enable reception of frames on all ports */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK, + 0x5f); + /* IP multicast flood mask (table 144) */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK, + 0xff); + + mdelay(50); + + /* Release reset from the internal PHYs */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, + VSC73XX_GLORESET_PHY_RESET); + + udelay(4); + + return 0; +} + +static void vsc73xx_init_port(struct vsc73xx *vsc, int port) +{ + u32 val; + + /* MAC configure, first reset the port and then write defaults */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET); + + /* Take up the port in 1Gbit mode by default, this will be + * augmented after auto-negotiation on the PHY-facing + * ports. + */ + if (port == CPU_PORT) + val = VSC73XX_MAC_CFG_1000M_F_RGMII; + else + val = VSC73XX_MAC_CFG_1000M_F_PHY; + + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAC_CFG, + val | + VSC73XX_MAC_CFG_TX_EN | + VSC73XX_MAC_CFG_RX_EN); + + /* Max length, we can do up to 9.6 KiB, so allow that. + * According to application not "VSC7398 Jumbo Frames" setting + * up the MTU to 9.6 KB does not affect the performance on standard + * frames, so just enable it. It is clear from the application note + * that "9.6 kilobytes" == 9600 bytes. + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAXLEN, 9600); + + /* Flow control for the CPU port: + * Use a zero delay pause frame when pause condition is left + * Obey pause control frames + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCCONF, + VSC73XX_FCCONF_ZERO_PAUSE_EN | + VSC73XX_FCCONF_FLOW_CTRL_OBEY); + + /* Issue pause control frames on PHY facing ports. + * Allow early initiation of MAC transmission if the amount + * of egress data is below 512 bytes on CPU port. + * FIXME: enable 20KiB buffers? + */ + if (port == CPU_PORT) + val = VSC73XX_Q_MISC_CONF_EARLY_TX_512; + else + val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE; + val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_Q_MISC_CONF, + val); + + /* Flow control MAC: a MAC address used in flow control frames */ + val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]); + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCMACHI, + val); + val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]); + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCMACLO, + val); + + /* Tell the categorizer to forward pause frames, not control + * frame. Do not drop anything. + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_CAT_DROP, + VSC73XX_CAT_DROP_FWD_PAUSE_ENA); + + /* Clear all counters */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, VSC73XX_C_RX0, 0); +} + +static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc, + int port, struct phy_device *phydev, + u32 initval) +{ + u32 val = initval; + u8 seed; + + /* Reset this port FIXME: break out subroutine */ + val |= VSC73XX_MAC_CFG_RESET; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); + + /* Seed the port randomness with randomness */ + get_random_bytes(&seed, 1); + val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET; + val |= VSC73XX_MAC_CFG_SEED_LOAD; + val |= VSC73XX_MAC_CFG_WEXC_DIS; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); + + /* Flow control for the PHY facing ports: + * Use a zero delay pause frame when pause condition is left + * Obey pause control frames + * When generating pause frames, use 0xff as pause value + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF, + VSC73XX_FCCONF_ZERO_PAUSE_EN | + VSC73XX_FCCONF_FLOW_CTRL_OBEY | + 0xff); + + /* Disallow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), 0); + + /* Enable TX, RX, deassert reset, stop loading seed */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD | + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN, + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN); +} + +static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct vsc73xx *vsc = ds->priv; + u32 val; + + /* Special handling of the CPU-facing port */ + if (port == CPU_PORT) { + /* Other ports are already initialized but not this one */ + vsc73xx_init_port(vsc, CPU_PORT); + /* Select the external port for this interface (EXT_PORT) + * Enable the GMII GTX external clock + * Use double data rate (DDR mode) + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + CPU_PORT, + VSC73XX_ADVPORTM, + VSC73XX_ADVPORTM_EXT_PORT | + VSC73XX_ADVPORTM_ENA_GTX | + VSC73XX_ADVPORTM_DDR_MODE); + } + + /* This is the MAC confiuration that always need to happen + * after a PHY or the CPU port comes up or down. + */ + if (!phydev->link) { + int maxloop = 10; + + dev_dbg(vsc->dev, "port %d: went down\n", + port); + + /* Disable RX on this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RX_EN, 0); + + /* Discard packets */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBDISC, BIT(port), BIT(port)); + + /* Wait until queue is empty */ + vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBEMPTY, &val); + while (!(val & BIT(port))) { + msleep(1); + vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBEMPTY, &val); + if (--maxloop == 0) { + dev_err(vsc->dev, + "timeout waiting for block arbiter\n"); + /* Continue anyway */ + break; + } + } + + /* Put this port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET); + + /* Accept packets again */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBDISC, BIT(port), 0); + + /* Allow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), BIT(port)); + + /* Receive mask (disable forwarding) */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), 0); + + return; + } + + /* Figure out what speed was negotiated */ + if (phydev->speed == SPEED_1000) { + dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n", + port); + + /* Set up default for internal port or external RGMII */ + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + val = VSC73XX_MAC_CFG_1000M_F_RGMII; + else + val = VSC73XX_MAC_CFG_1000M_F_PHY; + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else if (phydev->speed == SPEED_100) { + if (phydev->duplex == DUPLEX_FULL) { + val = VSC73XX_MAC_CFG_100_10M_F_PHY; + dev_dbg(vsc->dev, + "port %d: 100 Mbit full duplex mode\n", + port); + } else { + val = VSC73XX_MAC_CFG_100_10M_H_PHY; + dev_dbg(vsc->dev, + "port %d: 100 Mbit half duplex mode\n", + port); + } + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else if (phydev->speed == SPEED_10) { + if (phydev->duplex == DUPLEX_FULL) { + val = VSC73XX_MAC_CFG_100_10M_F_PHY; + dev_dbg(vsc->dev, + "port %d: 10 Mbit full duplex mode\n", + port); + } else { + val = VSC73XX_MAC_CFG_100_10M_H_PHY; + dev_dbg(vsc->dev, + "port %d: 10 Mbit half duplex mode\n", + port); + } + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else { + dev_err(vsc->dev, + "could not adjust link: unknown speed\n"); + } + + /* Enable port (forwarding) in the receieve mask */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), BIT(port)); +} + +static int vsc73xx_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct vsc73xx *vsc = ds->priv; + + dev_info(vsc->dev, "enable port %d\n", port); + vsc73xx_init_port(vsc, port); + + return 0; +} + +static void vsc73xx_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct vsc73xx *vsc = ds->priv; + + /* Just put the port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET); +} + +static const struct vsc73xx_counter * +vsc73xx_find_counter(struct vsc73xx *vsc, + u8 counter, + bool tx) +{ + const struct vsc73xx_counter *cnts; + int num_cnts; + int i; + + if (tx) { + cnts = vsc73xx_tx_counters; + num_cnts = ARRAY_SIZE(vsc73xx_tx_counters); + } else { + cnts = vsc73xx_rx_counters; + num_cnts = ARRAY_SIZE(vsc73xx_rx_counters); + } + + for (i = 0; i < num_cnts; i++) { + const struct vsc73xx_counter *cnt; + + cnt = &cnts[i]; + if (cnt->counter == counter) + return cnt; + } + + return NULL; +} + +static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + const struct vsc73xx_counter *cnt; + struct vsc73xx *vsc = ds->priv; + u8 indices[6]; + int i, j; + u32 val; + int ret; + + if (stringset != ETH_SS_STATS) + return; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_C_CFG, &val); + if (ret) + return; + + indices[0] = (val & 0x1f); /* RX counter 0 */ + indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */ + indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */ + indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */ + indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */ + indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */ + + /* The first counters is the RX octets */ + j = 0; + strncpy(data + j * ETH_GSTRING_LEN, + "RxEtherStatsOctets", ETH_GSTRING_LEN); + j++; + + /* Each port supports recording 3 RX counters and 3 TX counters, + * figure out what counters we use in this set-up and return the + * names of them. The hardware default counters will be number of + * packets on RX/TX, combined broadcast+multicast packets RX/TX and + * total error packets RX/TX. + */ + for (i = 0; i < 3; i++) { + cnt = vsc73xx_find_counter(vsc, indices[i], false); + if (cnt) + strncpy(data + j * ETH_GSTRING_LEN, + cnt->name, ETH_GSTRING_LEN); + j++; + } + + /* TX stats begins with the number of TX octets */ + strncpy(data + j * ETH_GSTRING_LEN, + "TxEtherStatsOctets", ETH_GSTRING_LEN); + j++; + + for (i = 3; i < 6; i++) { + cnt = vsc73xx_find_counter(vsc, indices[i], true); + if (cnt) + strncpy(data + j * ETH_GSTRING_LEN, + cnt->name, ETH_GSTRING_LEN); + j++; + } +} + +static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + /* We only support SS_STATS */ + if (sset != ETH_SS_STATS) + return 0; + /* RX and TX packets, then 3 RX counters, 3 TX counters */ + return 8; +} + +static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct vsc73xx *vsc = ds->priv; + u8 regs[] = { + VSC73XX_RXOCT, + VSC73XX_C_RX0, + VSC73XX_C_RX1, + VSC73XX_C_RX2, + VSC73XX_TXOCT, + VSC73XX_C_TX0, + VSC73XX_C_TX1, + VSC73XX_C_TX2, + }; + u32 val; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port, + regs[i], &val); + if (ret) { + dev_err(vsc->dev, "error reading counter %d\n", i); + return; + } + data[i] = val; + } +} + +static const struct dsa_switch_ops vsc73xx_ds_ops = { + .get_tag_protocol = vsc73xx_get_tag_protocol, + .setup = vsc73xx_setup, + .phy_read = vsc73xx_phy_read, + .phy_write = vsc73xx_phy_write, + .adjust_link = vsc73xx_adjust_link, + .get_strings = vsc73xx_get_strings, + .get_ethtool_stats = vsc73xx_get_ethtool_stats, + .get_sset_count = vsc73xx_get_sset_count, + .port_enable = vsc73xx_port_enable, + .port_disable = vsc73xx_port_disable, +}; + +static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, &val); + if (ret) + return ret; + + return !!(val & BIT(offset)); +} + +static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset, + int val) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 tmp = val ? BIT(offset) : 0; + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset), tmp); +} + +static int vsc73xx_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int val) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 tmp = val ? BIT(offset) : 0; + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset + 4) | BIT(offset), + BIT(offset + 4) | tmp); +} + +static int vsc73xx_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset + 4), + 0); +} + +static int vsc73xx_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, &val); + if (ret) + return ret; + + return !(val & BIT(offset + 4)); +} + +static int vsc73xx_gpio_probe(struct vsc73xx *vsc) +{ + int ret; + + vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x", + vsc->chipid); + vsc->gc.ngpio = 4; + vsc->gc.owner = THIS_MODULE; + vsc->gc.parent = vsc->dev; + vsc->gc.of_node = vsc->dev->of_node; + vsc->gc.base = -1; + vsc->gc.get = vsc73xx_gpio_get; + vsc->gc.set = vsc73xx_gpio_set; + vsc->gc.direction_input = vsc73xx_gpio_direction_input; + vsc->gc.direction_output = vsc73xx_gpio_direction_output; + vsc->gc.get_direction = vsc73xx_gpio_get_direction; + vsc->gc.can_sleep = true; + ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc); + if (ret) { + dev_err(vsc->dev, "unable to register GPIO chip\n"); + return ret; + } + return 0; +} + +static int vsc73xx_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct vsc73xx *vsc; + int ret; + + vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL); + if (!vsc) + return -ENOMEM; + + spi_set_drvdata(spi, vsc); + vsc->spi = spi_dev_get(spi); + vsc->dev = dev; + mutex_init(&vsc->lock); + + /* Release reset, if any */ + vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(vsc->reset)) { + dev_err(dev, "failed to get RESET GPIO\n"); + return PTR_ERR(vsc->reset); + } + if (vsc->reset) + /* Wait 20ms according to datasheet table 245 */ + msleep(20); + + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret < 0) { + dev_err(dev, "spi setup failed.\n"); + return ret; + } + + ret = vsc73xx_detect(vsc); + if (ret) { + dev_err(dev, "no chip found (%d)\n", ret); + return -ENODEV; + } + + eth_random_addr(vsc->addr); + dev_info(vsc->dev, + "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n", + vsc->addr[0], vsc->addr[1], vsc->addr[2], + vsc->addr[3], vsc->addr[4], vsc->addr[5]); + + /* The VSC7395 switch chips have 5+1 ports which means 5 + * ordinary ports and a sixth CPU port facing the processor + * with an RGMII interface. These ports are numbered 0..4 + * and 6, so they leave a "hole" in the port map for port 5, + * which is invalid. + * + * The VSC7398 has 8 ports, port 7 is again the CPU port. + * + * We allocate 8 ports and avoid access to the nonexistant + * ports. + */ + vsc->ds = dsa_switch_alloc(dev, 8); + if (!vsc->ds) + return -ENOMEM; + vsc->ds->priv = vsc; + + vsc->ds->ops = &vsc73xx_ds_ops; + ret = dsa_register_switch(vsc->ds); + if (ret) { + dev_err(dev, "unable to register switch (%d)\n", ret); + return ret; + } + + ret = vsc73xx_gpio_probe(vsc); + if (ret) { + dsa_unregister_switch(vsc->ds); + return ret; + } + + return 0; +} + +static int vsc73xx_remove(struct spi_device *spi) +{ + struct vsc73xx *vsc = spi_get_drvdata(spi); + + dsa_unregister_switch(vsc->ds); + gpiod_set_value(vsc->reset, 1); + + return 0; +} + +static const struct of_device_id vsc73xx_of_match[] = { + { + .compatible = "vitesse,vsc7385", + }, + { + .compatible = "vitesse,vsc7388", + }, + { + .compatible = "vitesse,vsc7395", + }, + { + .compatible = "vitesse,vsc7398", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, vsc73xx_of_match); + +static struct spi_driver vsc73xx_driver = { + .probe = vsc73xx_probe, + .remove = vsc73xx_remove, + .driver = { + .name = "vsc73xx", + .of_match_table = vsc73xx_of_match, + }, +}; +module_spi_driver(vsc73xx_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 8fbfe9ce2fa5..22555e7fa752 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ -obj-$(CONFIG_NET_CADENCE) += cadence/ +obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ @@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ obj-$(CONFIG_LPC_ENET) += nxp/ obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ obj-$(CONFIG_ETHOC) += ethoc.o -obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ +obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ @@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ obj-$(CONFIG_NET_VENDOR_SIS) += sis/ -obj-$(CONFIG_SFC) += sfc/ -obj-$(CONFIG_SFC_FALCON) += sfc/falcon/ +obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/ diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 8f71b79b4949..08945baee48a 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -1933,7 +1933,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) while (idx != rxretprd) { struct ring_info *rip; struct sk_buff *skb; - struct rx_desc *rxdesc, *retdesc; + struct rx_desc *retdesc; u32 skbidx; int bd_flags, desc_type, mapsize; u16 csum; @@ -1959,19 +1959,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) case 0: rip = &ap->skb->rx_std_skbuff[skbidx]; mapsize = ACE_STD_BUFSIZE; - rxdesc = &ap->rx_std_ring[skbidx]; std_count++; break; case BD_FLG_JUMBO: rip = &ap->skb->rx_jumbo_skbuff[skbidx]; mapsize = ACE_JUMBO_BUFSIZE; - rxdesc = &ap->rx_jumbo_ring[skbidx]; atomic_dec(&ap->cur_jumbo_bufs); break; case BD_FLG_MINI: rip = &ap->skb->rx_mini_skbuff[skbidx]; mapsize = ACE_MINI_BUFSIZE; - rxdesc = &ap->rx_mini_ring[skbidx]; mini_count++; break; default: diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index f2af87d70594..c673ac2df65b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev) #endif /* CONFIG_NET_POLL_CONTROLLER */ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { u16 qid; /* we suspect that this is good for in--kernel network services that @@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, if (skb_rx_queue_recorded(skb)) qid = skb_get_rx_queue(skb); else - qid = fallback(dev, skb); + qid = fallback(dev, skb, NULL); return qid; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index f2d8063a2cef..08c9fa6ca71f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -11,6 +11,7 @@ #include "aq_ethtool.h" #include "aq_nic.h" +#include "aq_vec.h" static void aq_ethtool_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) @@ -284,6 +285,117 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static int aq_ethtool_nway_reset(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + if (unlikely(!aq_nic->aq_fw_ops->renegotiate)) + return -EOPNOTSUPP; + + if (netif_running(ndev)) + return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw); + + return 0; +} + +static void aq_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + pause->autoneg = 0; + + if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) + pause->rx_pause = 1; + if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) + pause->tx_pause = 1; +} + +static int aq_ethtool_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + if (!aq_nic->aq_fw_ops->set_flow_control) + return -EOPNOTSUPP; + + if (pause->autoneg == AUTONEG_ENABLE) + return -EOPNOTSUPP; + + if (pause->rx_pause) + aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX; + else + aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX; + + if (pause->tx_pause) + aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX; + else + aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX; + + err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw); + + return err; +} + +static void aq_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); + + ring->rx_pending = aq_nic_cfg->rxds; + ring->tx_pending = aq_nic_cfg->txds; + + ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max; + ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max; +} + +static int aq_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + int err = 0; + bool ndev_running = false; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); + const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) { + err = -EOPNOTSUPP; + goto err_exit; + } + + if (netif_running(ndev)) { + ndev_running = true; + dev_close(ndev); + } + + aq_nic_free_vectors(aq_nic); + + aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); + aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max); + aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE); + + aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min); + aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max); + aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE); + + for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs; + aq_nic->aq_vecs++) { + aq_nic->aq_vec[aq_nic->aq_vecs] = + aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg); + if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) { + err = -ENOMEM; + goto err_exit; + } + } + if (ndev_running) + err = dev_open(ndev); + +err_exit: + return err; +} + const struct ethtool_ops aq_ethtool_ops = { .get_link = aq_ethtool_get_link, .get_regs_len = aq_ethtool_get_regs_len, @@ -291,6 +403,11 @@ const struct ethtool_ops aq_ethtool_ops = { .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .nway_reset = aq_ethtool_nway_reset, + .get_ringparam = aq_get_ringparam, + .set_ringparam = aq_set_ringparam, + .get_pauseparam = aq_ethtool_get_pauseparam, + .set_pauseparam = aq_ethtool_set_pauseparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, .get_rxfh = aq_ethtool_get_rss, .get_rxnfc = aq_ethtool_get_rxnfc, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 2c6ebd91a9f2..5c00671f248d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -24,8 +24,10 @@ struct aq_hw_caps_s { u64 link_speed_msk; unsigned int hw_priv_flags; u32 media_type; - u32 rxds; - u32 txds; + u32 rxds_max; + u32 txds_max; + u32 rxds_min; + u32 txds_min; u32 txhwb_alignment; u32 irq_mask; u32 vecs; @@ -98,6 +100,9 @@ struct aq_stats_s { #define AQ_HW_MEDIA_TYPE_TP 1U #define AQ_HW_MEDIA_TYPE_FIBRE 2U +#define AQ_HW_TXD_MULTIPLE 8U +#define AQ_HW_RXD_MULTIPLE 8U + #define AQ_HW_MULTICAST_ADDRESS_MAX 32U struct aq_hw_s { @@ -199,25 +204,30 @@ struct aq_hw_ops { int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); - int (*hw_deinit)(struct aq_hw_s *self); - int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); }; struct aq_fw_ops { int (*init)(struct aq_hw_s *self); + int (*deinit)(struct aq_hw_s *self); + int (*reset)(struct aq_hw_s *self); + int (*renegotiate)(struct aq_hw_s *self); + int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac); int (*set_link_speed)(struct aq_hw_s *self, u32 speed); - int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); + int (*set_state)(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); int (*update_link_status)(struct aq_hw_s *self); int (*update_stats)(struct aq_hw_s *self); + + int (*set_flow_control)(struct aq_hw_s *self); }; #endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 7a22d0257e04..26dc6782b475 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self) aq_nic_rss_init(self, cfg->num_rss_queues); /*descriptors */ - cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF); - cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF); + cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); + cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); /*rss rings */ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); @@ -768,10 +768,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); - if (self->aq_nic_cfg.flow_control) + if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX) ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); else @@ -886,7 +890,7 @@ void aq_nic_deinit(struct aq_nic_s *self) aq_vec_deinit(aq_vec); if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_hw_ops->hw_deinit(self->aq_hw); + (void)self->aq_fw_ops->deinit(self->aq_hw); } else { (void)self->aq_hw_ops->hw_set_power(self->aq_hw, self->power_state); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 8cc6abadc03b..97addfa6f895 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -19,29 +19,31 @@ #include "hw_atl_a0_internal.h" #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_A0_RSS_MAX, \ - .tcs = HW_ATL_A0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_A0_RXD_SIZE, \ - .rxds = 248U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_A0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_A0_TX_RINGS, \ - .rx_rings = HW_ATL_A0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_A0_RSS_MAX, \ + .tcs = HW_ATL_A0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_A0_RXD_SIZE, \ + .rxds_max = HW_ATL_A0_MAX_RXD, \ + .rxds_min = HW_ATL_A0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_A0_TXD_SIZE, \ + .txds_max = HW_ATL_A0_MAX_TXD, \ + .txds_min = HW_ATL_A0_MIN_RXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_A0_TX_RINGS, \ + .rx_rings = HW_ATL_A0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO, \ .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_A0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .flow_control = true, \ + .mtu = HW_ATL_A0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { @@ -875,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, .hw_init = hw_atl_a0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_a0_hw_reset, .hw_start = hw_atl_a0_hw_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 1d8855558d74..3c94cff57876 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -88,4 +88,12 @@ #define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U +#define HW_ATL_A0_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL_A0_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL_A0_MAX_RXD 8184U +#define HW_ATL_A0_MAX_TXD 8184U + #endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 956860a69797..4809bf4baa34 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -20,30 +20,32 @@ #include "hw_atl_llh_internal.h" #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_B0_RSS_MAX, \ - .tcs = HW_ATL_B0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_B0_RXD_SIZE, \ - .rxds = 4U * 1024U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_B0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_B0_TX_RINGS, \ - .rx_rings = HW_ATL_B0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXCSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_SG | \ - NETIF_F_TSO | \ - NETIF_F_LRO, \ - .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_B0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_B0_RSS_MAX, \ + .tcs = HW_ATL_B0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_B0_RXD_SIZE, \ + .rxds_max = HW_ATL_B0_MAX_RXD, \ + .rxds_min = HW_ATL_B0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_B0_TXD_SIZE, \ + .txds_max = HW_ATL_B0_MAX_TXD, \ + .txds_min = HW_ATL_B0_MIN_TXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_B0_TX_RINGS, \ + .rx_rings = HW_ATL_B0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_LRO, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL_B0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { @@ -933,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_b0_hw_reset, .hw_start = hw_atl_b0_hw_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 405d1455c222..28568f5fa74b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -142,6 +142,14 @@ #define HW_ATL_INTR_MODER_MAX 0x1FF #define HW_ATL_INTR_MODER_MIN 0xFF +#define HW_ATL_B0_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL_B0_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL_B0_MAX_RXD 8184U +#define HW_ATL_B0_MAX_TXD 8184U + /* HW layer capabilities */ #endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index e652d86b87d4..c965e65d07db 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -30,10 +30,11 @@ #define HW_ATL_MPI_CONTROL_ADR 0x0368U #define HW_ATL_MPI_STATE_ADR 0x036CU -#define HW_ATL_MPI_STATE_MSK 0x00FFU -#define HW_ATL_MPI_STATE_SHIFT 0U -#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U -#define HW_ATL_MPI_SPEED_SHIFT 16U +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U +#define HW_ATL_MPI_SPEED_SHIFT 16U +#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U #define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704 #define HW_ATL_MPI_BOOT_EXIT_CODE 0x388 @@ -525,19 +526,20 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) { u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); - val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT); + val = val & ~HW_ATL_MPI_SPEED_MSK; + val |= speed << HW_ATL_MPI_SPEED_SHIFT; aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); return 0; } -void hw_atl_utils_mpi_set(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state, - u32 speed) +static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) { int err = 0; u32 transaction_id = 0; struct hw_aq_atl_utils_mbox_header mbox; + u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); if (state == MPI_RESET) { hw_atl_utils_mpi_read_mbox(self, &mbox); @@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, if (err < 0) goto err_exit; } + /* On interface DEINIT we disable DW (raise bit) + * Otherwise enable DW (clear bit) + */ + if (state == MPI_DEINIT || state == MPI_POWER) + val |= HW_ATL_MPI_DIRTY_WAKE_MSK; + else + val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK; - aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, - (speed << HW_ATL_MPI_SPEED_SHIFT) | state); - -err_exit:; -} - -static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state) -{ - u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); + /* Set new state bits */ + val = val & ~HW_ATL_MPI_STATE_MSK; + val |= state & HW_ATL_MPI_STATE_MSK; - val = state | (val & HW_ATL_MPI_SPEED_MSK); aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); - return 0; +err_exit: + return err; } int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) @@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) *p = chip_features; } -int hw_atl_utils_hw_deinit(struct aq_hw_s *self) +static int hw_atl_fw1x_deinit(struct aq_hw_s *self) { - hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_DEINIT); return 0; } int hw_atl_utils_hw_set_power(struct aq_hw_s *self, unsigned int power_state) { - hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_POWER); return 0; } @@ -823,10 +827,12 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, + .deinit = hw_atl_fw1x_deinit, .reset = NULL, .get_mac_permanent = hw_atl_utils_get_mac_permanent, .set_link_speed = hw_atl_utils_mpi_set_speed, .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .set_flow_control = NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index cd8f18f39c61..b875590efcbd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi { CAPS_HI_TRANSACTION_ID, }; +enum hw_atl_fw2x_ctrl { + CTRL_RESERVED1 = 0x00, + CTRL_RESERVED2, + CTRL_RESERVED3, + CTRL_PAUSE, + CTRL_ASYMMETRIC_PAUSE, + CTRL_RESERVED4, + CTRL_RESERVED5, + CTRL_RESERVED6, + CTRL_1GBASET_FD_EEE, + CTRL_2P5GBASET_FD_EEE, + CTRL_5GBASET_FD_EEE, + CTRL_10GBASET_FD_EEE, + CTRL_THERMAL_SHUTDOWN, + CTRL_PHY_LOGS, + CTRL_EEE_AUTO_DISABLE, + CTRL_PFC, + CTRL_WAKE_ON_LINK, + CTRL_CABLE_DIAG, + CTRL_TEMPERATURE, + CTRL_DOWNSHIFT, + CTRL_PTP_AVB, + CTRL_RESERVED7, + CTRL_LINK_DROP, + CTRL_SLEEP_PROXY, + CTRL_WOL, + CTRL_MAC_STOP, + CTRL_EXT_LOOPBACK, + CTRL_INT_LOOPBACK, + CTRL_RESERVED8, + CTRL_WOL_TIMER, + CTRL_STATISTICS, + CTRL_FORCE_RECONNECT, +}; + struct aq_hw_s; struct aq_fw_ops; struct aq_hw_caps_s; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 39cd3a27fe77..e37943760a58 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -28,6 +28,10 @@ #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); +static int aq_fw2x_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); + static int aq_fw2x_init(struct aq_hw_s *self) { int err = 0; @@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self) return err; } +static int aq_fw2x_deinit(struct aq_hw_s *self) +{ + int err = aq_fw2x_set_link_speed(self, 0); + + if (!err) + err = aq_fw2x_set_state(self, MPI_DEINIT); + + return err; +} + static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) { enum hw_atl_fw2x_rate rate = 0; @@ -73,10 +87,38 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) return 0; } +static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) +{ + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) + *mpi_state |= BIT(CAPS_HI_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_PAUSE); + + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) + *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); +} + static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { - /* No explicit state in 2x fw */ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + switch (state) { + case MPI_INIT: + mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + aq_fw2x_set_mpi_flow_control(self, &mpi_state); + break; + case MPI_DEINIT: + mpi_state |= BIT(CAPS_HI_LINK_DROP); + break; + case MPI_RESET: + case MPI_POWER: + /* No actions */ + break; + } + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); return 0; } @@ -173,12 +215,37 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_renegotiate(struct aq_hw_s *self) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + mpi_opts |= BIT(CTRL_FORCE_RECONNECT); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + return 0; +} + +static int aq_fw2x_set_flow_control(struct aq_hw_s *self) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + aq_fw2x_set_mpi_flow_control(self, &mpi_state); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); + + return 0; +} + const struct aq_fw_ops aq_fw_2x_ops = { .init = aq_fw2x_init, + .deinit = aq_fw2x_deinit, .reset = NULL, + .renegotiate = aq_fw2x_renegotiate, .get_mac_permanent = aq_fw2x_get_mac_permanent, .set_link_speed = aq_fw2x_set_link_speed, .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, + .set_flow_control = aq_fw2x_set_flow_control, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index a445de6837a6..94efc6477bdc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -12,8 +12,8 @@ #define NIC_MAJOR_DRIVER_VERSION 2 #define NIC_MINOR_DRIVER_VERSION 0 -#define NIC_BUILD_DRIVER_VERSION 2 -#define NIC_REVISION_DRIVER_VERSION 1 +#define NIC_BUILD_DRIVER_VERSION 3 +#define NIC_REVISION_DRIVER_VERSION 0 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5e5022fa1d04..6d3221134927 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx) struct alx_hw *hw = &alx->hw; unsigned long flags; int old_speed; - u8 old_duplex; int err; /* clear PHY internal interrupt status, otherwise the main @@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx) alx_clear_phy_intr(hw); old_speed = hw->link_speed; - old_duplex = hw->duplex; err = alx_read_phy_link(hw); if (err < 0) goto reset; diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig index 8ba7f8ff3434..392f564d8fd4 100644 --- a/drivers/net/ethernet/aurora/Kconfig +++ b/drivers/net/ethernet/aurora/Kconfig @@ -1,5 +1,6 @@ config NET_VENDOR_AURORA bool "Aurora VLSI devices" + default y help If you have a network (Ethernet) device belonging to this class, say Y. diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index e94159507847..c8d1f8fa4713 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget) again: do { - struct nb8800_rx_buf *rxb; unsigned int len; next = (last + 1) % RX_DESC_COUNT; - rxb = &priv->rx_bufs[next]; rxd = &priv->rx_descs[next]; if (!rxd->report) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 4c3bfde6e8de..b7aa8ad96dfb 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,7 +61,7 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" - depends on OF && HAS_IOMEM + depends on HAS_IOMEM select MII select PHYLIB select FIXED_PHY @@ -181,7 +181,7 @@ config BGMAC_PLATFORM config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" - depends on OF + depends on HAS_IOMEM depends on NET_DSA || !NET_DSA select MII select PHYLIB diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index a1f60f89e059..631617d95769 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2107,7 +2107,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { }; static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, unsigned int q, port; if (!netdev_uses_dsa(dev)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); /* DSA tagging layer will have configured the correct queue */ q = BRCM_TAG_GET_QUEUE(queue); @@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; if (unlikely(!tx_ring)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); return tx_ring->index; } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index e6ea8e61f96d..4c94d9218bba 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { struct device *dma_dev = bgmac->dma_dev; int empty_slot; - bool freed = false; unsigned bytes_compl = 0, pkts_compl = 0; /* The last slot that hardware didn't consume yet */ @@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) slot->dma_addr = 0; ring->start++; - freed = true; } if (!pkts_compl) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index af7b5a4d8ba0..5a727d4729da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) } u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct bnx2x *bp = netdev_priv(dev); @@ -1932,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, } /* select a non-FCoE queue */ - return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); + return fallback(dev, skb, NULL) % + (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); } void bnx2x_set_num_queues(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a8ce5c55bbb0..0e508e5defce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 22243c480a05..98d4c5a3ff21 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params, */ if (!vars->link_up) break; + /* else: fall through */ case LED_MODE_ON: if (((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params, switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_10M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_10M_FULL: phy->req_line_speed = SPEED_10; break; case PORT_FEATURE_LINK_SPEED_100M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_100M_FULL: phy->req_line_speed = SPEED_100; break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 57348f2b49a3..71362b7f6040 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp) bp->num_queues, 1 + bp->num_cnic_queues); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_INTX: bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8baf9d3eb4b1..3f4d2c8da21a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: @@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index dc77bfded865..62da46537734 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1827,6 +1827,7 @@ get_vf: DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); + /* fall through */ case EVENT_RING_OPCODE_VF_FLR: /* Do nothing for now */ return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4394c1162be4..c612d74451a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1727,7 +1727,7 @@ static int bnxt_async_event_process(struct bnxt *bp, speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); - /* fall thru */ + /* fall through */ } case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); @@ -3012,13 +3012,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp) bp->hwrm_cmd_resp_dma_addr); bp->hwrm_cmd_resp_addr = NULL; - if (bp->hwrm_dbg_resp_addr) { - dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, - bp->hwrm_dbg_resp_addr, - bp->hwrm_dbg_resp_dma_addr); - - bp->hwrm_dbg_resp_addr = NULL; - } } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -3030,12 +3023,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp) GFP_KERNEL); if (!bp->hwrm_cmd_resp_addr) return -ENOMEM; - bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, - HWRM_DBG_REG_BUF_SIZE, - &bp->hwrm_dbg_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_dbg_resp_addr) - netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); return 0; } @@ -7991,7 +7978,7 @@ static int bnxt_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, - bp, bp); + bp, bp, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 91575ef97c8c..934aa11c82eb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1287,9 +1287,6 @@ struct bnxt { dma_addr_t hwrm_short_cmd_req_dma_addr; void *hwrm_cmd_resp_addr; dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_dbg_resp_addr; - dma_addr_t hwrm_dbg_resp_dma_addr; -#define HWRM_DBG_REG_BUF_SIZE 128 struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 402fa32f7a88..7bd96ab4f7c5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ }; +static const struct bnxt_dl_nvm_param nvm_params[] = { + {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, + BNXT_NVM_SHARED_CFG, 1}, +}; + +static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, + int msg_len, union devlink_param_value *val) +{ + struct hwrm_nvm_variable_input *req = msg; + void *data_addr = NULL, *buf = NULL; + struct bnxt_dl_nvm_param nvm_param; + int bytesize, idx = 0, rc, i; + dma_addr_t data_dma_addr; + + /* Get/Set NVM CFG parameter is supported only on PFs */ + if (BNXT_VF(bp)) + return -EPERM; + + for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { + if (nvm_params[i].id == param_id) { + nvm_param = nvm_params[i]; + break; + } + } + + if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) + idx = bp->pf.port_id; + else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) + idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; + + bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; + if (nvm_param.num_bits == 1) + buf = &val->vbool; + + data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, + &data_dma_addr, GFP_KERNEL); + if (!data_addr) + return -ENOMEM; + + req->data_addr = cpu_to_le64(data_dma_addr); + req->data_len = cpu_to_le16(nvm_param.num_bits); + req->option_num = cpu_to_le16(nvm_param.offset); + req->index_0 = cpu_to_le16(idx); + if (idx) + req->dimensions = cpu_to_le16(1); + + if (req->req_type == HWRM_NVM_SET_VARIABLE) + memcpy(data_addr, buf, bytesize); + + rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + if (!rc && req->req_type == HWRM_NVM_GET_VARIABLE) + memcpy(buf, data_addr, bytesize); + + dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); + if (rc) + return -EIO; + return 0; +} + +static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hwrm_nvm_get_variable_input req = {0}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hwrm_nvm_set_variable_input req = {0}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static const struct devlink_param bnxt_dl_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), +}; + int bnxt_dl_register(struct bnxt *bp) { struct devlink *dl; int rc; - if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) - return 0; - - if (bp->hwrm_spec_code < 0x10803) { - netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + if (bp->hwrm_spec_code < 0x10600) { + netdev_warn(bp->dev, "Firmware does not support NVM params"); return -ENOTSUPP; } @@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp) } bnxt_link_bp_to_dl(bp, dl); - bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + + /* Add switchdev eswitch mode setting, if SRIOV supported */ + if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && + bp->hwrm_spec_code > 0x10803) + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); if (rc) { - bnxt_link_bp_to_dl(bp, NULL); - devlink_free(dl); netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); - return rc; + goto err_dl_free; + } + + rc = devlink_params_register(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + if (rc) { + netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", + rc); + goto err_dl_unreg; } return 0; + +err_dl_unreg: + devlink_unregister(dl); +err_dl_free: + bnxt_link_bp_to_dl(bp, NULL); + devlink_free(dl); + return rc; } void bnxt_dl_unregister(struct bnxt *bp) @@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; + devlink_params_unregister(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index e92a35d8b642..2f68dc048390 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) } } +#define NVM_OFF_ENABLE_SRIOV 401 + +enum bnxt_nvm_dir_type { + BNXT_NVM_SHARED_CFG = 40, + BNXT_NVM_PORT_CFG, + BNXT_NVM_FUNC_CFG, +}; + +struct bnxt_dl_nvm_param { + u16 id; + u16 offset; + u16 dir_type; + u16 num_bits; +}; + int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 0fe0ea8dce6c..c75d7fa6dab6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -6201,6 +6201,19 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; +struct hwrm_nvm_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 data_addr; + __le16 data_len; + __le16 option_num; + __le16 dimensions; + __le16 index_0; +}; + /* hwrm_nvm_get_variable_input (size:320b/40B) */ struct hwrm_nvm_get_variable_input { __le16 req_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 491bd40a254d..139d96c5a023 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1568,22 +1568,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp) int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *cls_flower) { - int rc = 0; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); - break; - + return bnxt_tc_add_flow(bp, src_fid, cls_flower); case TC_CLSFLOWER_DESTROY: - rc = bnxt_tc_del_flow(bp, cls_flower); - break; - + return bnxt_tc_del_flow(bp, cls_flower); case TC_CLSFLOWER_STATS: - rc = bnxt_tc_get_flow_stats(bp, cls_flower); - break; + return bnxt_tc_get_flow_stats(bp, cls_flower); + default: + return -EOPNOTSUPP; } - return rc; } static const struct rhashtable_params bnxt_tc_flow_ht_params = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 05d405905906..e31f5d803c13 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_vf_rep_setup_tc_block_cb, - vf_rep, vf_rep); + vf_rep, vf_rep, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep); @@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (bp->hwrm_spec_code < 0x10803) { + netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n"); + rc = -ENOTSUPP; + goto done; + } + if (pci_num_vf(bp->pdev) == 0) { - netdev_info(bp->dev, - "Enable VFs before setting switchdev mode"); + netdev_info(bp->dev, "Enable VFs before setting switchdev mode"); rc = -EPERM; goto done; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 1f0e872d0667..0584d07c8c33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) rc = bnxt_xdp_set(bp, xdp->prog); break; case XDP_QUERY_PROG: - xdp->prog_attached = !!bp->xdp_prog; xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; rc = 0; break; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 4fd829b5e65d..d83233ae4a15 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) { - struct fcoe_kwqe_destroy *req; union l5cm_specific_data l5_data; struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); @@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); - req = (struct fcoe_kwqe_destroy *) kwqe; cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); memset(&l5_data, 0, sizeof(l5_data)); @@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; - kfree(cp->csk_tbl); + kvfree(cp->csk_tbl); cp->csk_tbl = NULL; cnic_free_id_tbl(&cp->csk_port_tbl); } @@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; u32 port_id; - cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), - GFP_KERNEL); + cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), + GFP_KERNEL); if (!cp->csk_tbl) return -ENOMEM; @@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_eth_dev *ethdev = cp->ethdev; - int func, ret; + int ret; u32 pfid; dev->stats_addr = ethdev->addr_drv_info_to_mcp; cp->func = bp->pf_num; - func = CNIC_FUNC(cp); pfid = bp->pfid; ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index aa1374d0af93..d8dad07f826a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -725,6 +725,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -785,6 +786,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -10719,28 +10721,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + /* fall through */ case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + /* fall through */ case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + /* fall through */ case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + /* fall through */ case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + /* fall through */ case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + /* fall through */ case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + /* fall through */ case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + /* fall through */ case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + /* fall through */ case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + /* fall through */ case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + /* fall through */ case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + /* fall through */ case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 427d65a1a126..b9984015ca8c 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -2,7 +2,7 @@ # Atmel device configuration # -config NET_CADENCE +config NET_VENDOR_CADENCE bool "Cadence devices" depends on HAS_IOMEM default y @@ -16,7 +16,7 @@ config NET_CADENCE the remaining Atmel network card questions. If you say Y, you will be asked for your specific card in the following questions. -if NET_CADENCE +if NET_VENDOR_CADENCE config MACB tristate "Cadence MACB/GEM support" @@ -48,4 +48,4 @@ config MACB_PCI To compile this driver as a module, choose M here: the module will be called macb_pci. -endif # NET_CADENCE +endif # NET_VENDOR_CADENCE diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 92d88c5f76fb..5f03199a3acf 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -4,7 +4,6 @@ config NET_VENDOR_CAVIUM bool "Cavium ethernet drivers" - depends on PCI default y ---help--- Select this option if you want enable Cavium network support. @@ -36,7 +35,7 @@ config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT && PCI select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI select THUNDER_NIC_RGX ---help--- This driver supports programming and controlling of MAC @@ -46,7 +45,7 @@ config THUNDER_NIC_RGX tristate "Thunder MAC interface driver (RGX)" depends on 64BIT && PCI select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI ---help--- This driver supports configuring XCV block of RGX interface present on CN81XX chip. @@ -67,6 +66,7 @@ config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT && PCI depends on MAY_USE_DEVLINK + depends on PCI imply PTP_1588_CLOCK select FW_LOADER select LIBCRC32C diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 929d485a3a2f..e088dedc1747 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) for (q_no = srn; q_no < ern; q_no++) { reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 9338a0008378..91dce8b8ef7e 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct) reg_val = octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; @@ -379,7 +382,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct) mbox_cmd.recv_len = 0; mbox_cmd.recv_status = 0; mbox_cmd.fn = NULL; - mbox_cmd.fn_arg = 0; + mbox_cmd.fn_arg = NULL; octeon_mbox_write(oct, &mbox_cmd); } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 7e8454d3b1ad..8ef87a76692b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -687,7 +687,7 @@ static void lio_sync_octeon_time(struct work_struct *work) lt = (struct lio_time *)sc->virtdptr; /* Get time of the day */ - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); lt->sec = ts.tv_sec; lt->nsec = ts.tv_nsec; octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); @@ -2631,7 +2631,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); } return ret; @@ -2909,7 +2909,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM; octnet_send_nic_ctrl_pkt(oct, &nctrl); @@ -3068,7 +3068,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, nctrl.ncmd.s.param2 = linkstate; nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM; octnet_send_nic_ctrl_pkt(oct, &nctrl); @@ -3302,7 +3302,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) { struct lio *lio = NULL; struct net_device *netdev; - u8 mac[6], i, j, *fw_ver; + u8 mac[6], i, j, *fw_ver, *micro_ver; + unsigned long micro; + u32 cur_ver; struct octeon_soft_command *sc; struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; @@ -3432,6 +3434,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) fw_ver); } + /* extract micro version field; point past '<maj>.<min>.' */ + micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; + if (kstrtoul(micro_ver, 10, µ) != 0) + micro = 0; + octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; + octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; + octeon_dev->fw_info.ver.rev = micro; + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3); @@ -3572,9 +3582,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { u8 vfmac[ETH_ALEN]; - random_ether_addr(&vfmac[0]); - if (__liquidio_set_vf_mac(netdev, j, - &vfmac[0], false)) { + eth_random_addr(vfmac); + if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); @@ -3675,7 +3684,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTEON_CN2350_25GB_SUBSYS_ID || octeon_dev->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { - liquidio_get_speed(lio); + cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, + octeon_dev->fw_info.ver.min, + octeon_dev->fw_info.ver.rev); + + /* speed control unsupported in f/w older than 1.7.2 */ + if (cur_ver < OCT_FW_VER(1, 7, 2)) { + dev_info(&octeon_dev->pci_dev->dev, + "speed setting not supported by f/w."); + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } else { + liquidio_get_speed(lio); + } if (octeon_dev->speed_setting == 0) { octeon_dev->speed_setting = 25; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 7fa0212873ac..b77835724dc8 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev, ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); } return ret; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 7f97ae48efed..0cc2338d8d2a 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, * * Octeon always uses UTC time. so timezone information is not sent. */ - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); ret = snprintf(boottime, MAX_BOOTTIME_SIZE, " time_sec=%lld time_nsec=%ld", (s64)ts.tv_sec, ts.tv_nsec); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 94a4ed88d618..d99ca6ba23a4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -288,8 +288,17 @@ struct oct_fw_info { */ u32 app_mode; char liquidio_firmware_version[32]; + /* Fields extracted from legacy string 'liquidio_firmware_version' */ + struct { + u8 maj; + u8 min; + u8 rev; + } ver; }; +#define OCT_FW_VER(maj, min, rev) \ + (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev))) + /* wrappers around work structs */ struct cavium_wk { struct delayed_work work; diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 1f2e75da28f8..d5d9e47daa4b 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct, memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); - dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", - iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); + dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n", + iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); iq->txpciq.u64 = txpciq.u64; iq->fill_threshold = (u32)conf->db_min; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 135766c4296b..768f584f8392 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return nicvf_xdp_setup(nic, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!nic->xdp_prog; xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; return 0; default: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 3c5057868ab3..aaf7985aef4c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -281,12 +281,18 @@ struct cudbg_tid_data { #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 +#define CUDBG_NUM_ULPTX_ASIC 6 +#define CUDBG_NUM_ULPTX_ASIC_READ 128 + +#define CUDBG_ULPTX_LA_REV 1 struct cudbg_ulptx_la { u32 rdptr[CUDBG_NUM_ULPTX]; u32 wrptr[CUDBG_NUM_ULPTX]; u32 rddata[CUDBG_NUM_ULPTX]; u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ]; + u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ]; + u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC]; }; #define CUDBG_CHAC_PBT_ADDR 0x2800 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 0afcfe99bff3..b1eb843035ee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -2586,15 +2586,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ulptx_la *ulptx_la_buff; + struct cudbg_ver_hdr *ver_hdr; u32 i, j; int rc; - rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la), + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la), &temp_buff); if (rc) return rc; - ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data; + ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_ULPTX_LA_REV; + ver_hdr->size = sizeof(struct cudbg_ulptx_la); + + ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data + + sizeof(*ver_hdr)); for (i = 0; i < CUDBG_NUM_ULPTX; i++) { ulptx_la_buff->rdptr[i] = t4_read_reg(padap, ULP_TX_LA_RDPTR_0_A + @@ -2610,6 +2619,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, t4_read_reg(padap, ULP_TX_LA_RDDATA_0_A + 0x10 * i); } + + for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) { + t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1); + ulptx_la_buff->rdptr_asic[i] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A); + ulptx_la_buff->rddata_asic[i][0] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A); + ulptx_la_buff->rddata_asic[i][1] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A); + ulptx_la_buff->rddata_asic[i][2] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A); + ulptx_la_buff->rddata_asic[i][3] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A); + ulptx_la_buff->rddata_asic[i][4] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A); + ulptx_la_buff->rddata_asic[i][5] = + t4_read_reg(padap, PM_RX_BASE_ADDR); + } + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0dbe2d9e22d6..3da9299cd786 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -46,6 +46,7 @@ #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/vmalloc.h> +#include <linux/rhashtable.h> #include <linux/etherdevice.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> @@ -319,6 +320,21 @@ struct vpd_params { u8 na[MACADDR_LEN + 1]; }; +/* Maximum resources provisioned for a PCI PF. + */ +struct pf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + struct pci_params { unsigned int vpd_cap_addr; unsigned char speed; @@ -346,6 +362,7 @@ struct adapter_params { struct sge_params sge; struct tp_params tp; struct vpd_params vpd; + struct pf_resources pfres; struct pci_params pci; struct devlog_params devlog; enum pcie_memwin drv_memwin; @@ -521,6 +538,15 @@ enum { MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, }; +enum { + PRIV_FLAG_PORT_TX_VM_BIT, +}; + +#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT) + +#define PRIV_FLAGS_ADAP 0 +#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM + struct adapter; struct sge_rspq; @@ -557,6 +583,7 @@ struct port_info { struct hwtstamp_config tstamp_config; bool ptp_enable; struct sched_table *sched_tbl; + u32 eth_flags; }; struct dentry; @@ -867,6 +894,7 @@ struct adapter { unsigned int flags; unsigned int adap_idx; enum chip_type chip; + u32 eth_flags; int msg_enable; __be16 vxlan_port; @@ -956,6 +984,7 @@ struct adapter { struct chcr_stats_debug chcr_stats; /* TC flower offload */ + bool tc_flower_initialized; struct rhashtable flower_tbl; struct rhashtable_params flower_ht_params; struct timer_list flower_stats_timer; @@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); void t4_free_sge_resources(struct adapter *adap); void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); @@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz); int t4_seeprom_wp(struct adapter *adapter, bool enable); int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_pfres(struct adapter *adapter); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 8d751efcb90e..55b46592af28 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -273,7 +273,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } break; case CUDBG_ULPTX_LA: - len = sizeof(struct cudbg_ulptx_la); + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la); break; case CUDBG_UP_CIM_INDIRECT: n = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index c301aaf79d64..631b78baedbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2414,6 +2414,44 @@ static const struct file_operations rss_vf_config_debugfs_fops = { .release = seq_release_private }; +static int resources_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct pf_resources *pfres = &adapter->params.pfres; + + #define S(desc, fmt, var) \ + seq_printf(seq, "%-60s " fmt "\n", \ + desc " (" #var "):", pfres->var) + + S("Virtual Interfaces", "%d", nvi); + S("Egress Queues", "%d", neq); + S("Ethernet Control", "%d", nethctrl); + S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); + S("Ingress Queues", "%d", niq); + S("Traffic Class", "%d", tc); + S("Port Access Rights Mask", "%#x", pmask); + S("MAC Address Filters", "%d", nexactf); + S("Firmware Command Read Capabilities", "%#x", r_caps); + S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); + + #undef S + + return 0; +} + +static int resources_open(struct inode *inode, struct file *file) +{ + return single_open(file, resources_show, inode->i_private); +} + +static const struct file_operations resources_debugfs_fops = { + .owner = THIS_MODULE, + .open = resources_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + /** * ethqset2pinfo - return port_info of an Ethernet Queue Set * @adap: the adapter @@ -2820,6 +2858,7 @@ static int meminfo_show(struct seq_file *seq, void *v) { static const char * const memory[] = { "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"}; + unsigned int free_rx_cnt, free_tx_cnt; struct adapter *adap = seq->private; struct cudbg_meminfo meminfo; int i, rc; @@ -2851,13 +2890,20 @@ static int meminfo_show(struct seq_file *seq, void *v) mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo, meminfo.up_extmem2_hi); - seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n", - meminfo.rx_pages_data[0], meminfo.rx_pages_data[1], - meminfo.rx_pages_data[2]); - - seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n", - meminfo.tx_pages_data[0], meminfo.tx_pages_data[1], - meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]); + for (i = 0, free_rx_cnt = 0; i < 2; i++) + free_rx_cnt += FREERXPAGECOUNT_G + (t4_read_reg(adap, TP_FLM_FREE_RX_CNT_A)); + seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n", + meminfo.rx_pages_data[0], free_rx_cnt, + meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]); + + for (i = 0, free_tx_cnt = 0; i < 4; i++) + free_tx_cnt += FREETXPAGECOUNT_G + (t4_read_reg(adap, TP_FLM_FREE_TX_CNT_A)); + seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n", + meminfo.tx_pages_data[0], free_tx_cnt, + meminfo.tx_pages_data[1], meminfo.tx_pages_data[2], + meminfo.tx_pages_data[3]); seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs); @@ -2924,6 +2970,169 @@ static const struct file_operations chcr_stats_debugfs_fops = { .llseek = seq_lseek, .release = single_release, }; + +#define PRINT_ADAP_STATS(string, value) \ + seq_printf(seq, "%-25s %-20llu\n", (string), \ + (unsigned long long)(value)) + +#define PRINT_CH_STATS(string, value) \ +do { \ + seq_printf(seq, "%-25s ", (string)); \ + for (i = 0; i < adap->params.arch.nchan; i++) \ + seq_printf(seq, "%-20llu ", \ + (unsigned long long)stats.value[i]); \ + seq_printf(seq, "\n"); \ +} while (0) + +#define PRINT_CH_STATS2(string, value) \ +do { \ + seq_printf(seq, "%-25s ", (string)); \ + for (i = 0; i < adap->params.arch.nchan; i++) \ + seq_printf(seq, "%-20llu ", \ + (unsigned long long)stats[i].value); \ + seq_printf(seq, "\n"); \ +} while (0) + +static void show_tcp_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_tcp_stats v4, v6; + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, &v4, &v6, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts); + PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs); + PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs); + PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs); + PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts); + PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs); + PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs); + PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs); +} + +static void show_ddp_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_usm_stats stats; + + spin_lock(&adap->stats_lock); + t4_get_usm_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames); + PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets); + PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops); +} + +static void show_rdma_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_rdma_stats stats; + + spin_lock(&adap->stats_lock); + t4_tp_get_rdma_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod); + PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt); +} + +static void show_tp_err_adapter_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_err_stats stats; + + spin_lock(&adap->stats_lock); + t4_tp_get_err_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh); + PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer); +} + +static void show_cpl_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_cpl_stats stats; + u8 i; + + spin_lock(&adap->stats_lock); + t4_tp_get_cpl_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS("tp_cpl_requests:", req); + PRINT_CH_STATS("tp_cpl_responses:", rsp); +} + +static void show_tp_err_channel_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_err_stats stats; + u8 i; + + spin_lock(&adap->stats_lock); + t4_tp_get_err_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs); + PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs); + PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs); + PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs); + PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops); + PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops); + PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops); + PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops); +} + +static void show_fcoe_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_fcoe_stats stats[NCHAN]; + u8 i; + + spin_lock(&adap->stats_lock); + for (i = 0; i < adap->params.arch.nchan; i++) + t4_get_fcoe_stats(adap, i, &stats[i], false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp); + PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp); + PRINT_CH_STATS2("fcoe_frames_drop", frames_drop); +} + +#undef PRINT_CH_STATS2 +#undef PRINT_CH_STATS +#undef PRINT_ADAP_STATS + +static int tp_stats_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + + seq_puts(seq, "\n--------Adapter Stats--------\n"); + show_tcp_stats(seq); + show_ddp_stats(seq); + show_rdma_stats(seq); + show_tp_err_adapter_stats(seq); + + seq_puts(seq, "\n-------- Channel Stats --------\n"); + if (adap->params.arch.nchan == NCHAN) + seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n", + " ", "channel 0", "channel 1", + "channel 2", "channel 3"); + else + seq_printf(seq, "%-25s %-20s %-20s\n", + " ", "channel 0", "channel 1"); + show_cpl_stats(seq); + show_tp_err_channel_stats(seq); + show_fcoe_stats(seq); + + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats); + /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, @@ -2973,6 +3182,7 @@ int t4_setup_debugfs(struct adapter *adap) { "rss_key", &rss_key_debugfs_fops, 0400, 0 }, { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 }, { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 }, + { "resources", &resources_debugfs_fops, 0400, 0 }, { "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 }, { "ibq_tp0", &cim_ibq_fops, 0400, 0 }, { "ibq_tp1", &cim_ibq_fops, 0400, 1 }, @@ -2999,6 +3209,7 @@ int t4_setup_debugfs(struct adapter *adap) { "blocked_fl", &blocked_fl_fops, 0600, 0 }, { "meminfo", &meminfo_fops, 0400, 0 }, { "crypto", &chcr_stats_debugfs_fops, 0400, 0 }, + { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index f7eef93ffc87..d07230c892a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = { "db_drop ", "db_full ", "db_empty ", - "tcp_ipv4_out_rsts ", - "tcp_ipv4_in_segs ", - "tcp_ipv4_out_segs ", - "tcp_ipv4_retrans_segs ", - "tcp_ipv6_out_rsts ", - "tcp_ipv6_in_segs ", - "tcp_ipv6_out_segs ", - "tcp_ipv6_retrans_segs ", - "usm_ddp_frames ", - "usm_ddp_octets ", - "usm_ddp_drops ", - "rdma_no_rqe_mod_defer ", - "rdma_no_rqe_pkt_defer ", - "tp_err_ofld_no_neigh ", - "tp_err_ofld_cong_defer ", "write_coal_success ", "write_coal_fail ", }; -static char channel_stats_strings[][ETH_GSTRING_LEN] = { - "--------Channel--------- ", - "tp_cpl_requests ", - "tp_cpl_responses ", - "tp_mac_in_errs ", - "tp_hdr_in_errs ", - "tp_tcp_in_errs ", - "tp_tcp6_in_errs ", - "tp_tnl_cong_drops ", - "tp_tnl_tx_drops ", - "tp_ofld_vlan_drops ", - "tp_ofld_chan_drops ", - "fcoe_octets_ddp ", - "fcoe_frames_ddp ", - "fcoe_frames_drop ", -}; - static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "-------Loopback----------- ", "octets_ok ", @@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "bg3_frames_trunc ", }; +static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = { + [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr", +}; + static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings) + ARRAY_SIZE(adapter_stats_strings) + - ARRAY_SIZE(channel_stats_strings) + ARRAY_SIZE(loopback_stats_strings); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(cxgb4_priv_flags_strings); default: return -EOPNOTSUPP; } @@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) FW_HDR_FW_VER_MINOR_G(exprom_vers), FW_HDR_FW_VER_MICRO_G(exprom_vers), FW_HDR_FW_VER_BUILD_G(exprom_vers)); + info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings); } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) memcpy(data, adapter_stats_strings, sizeof(adapter_stats_strings)); data += sizeof(adapter_stats_strings); - memcpy(data, channel_stats_strings, - sizeof(channel_stats_strings)); - data += sizeof(channel_stats_strings); memcpy(data, loopback_stats_strings, sizeof(loopback_stats_strings)); + } else if (stringset == ETH_SS_PRIV_FLAGS) { + memcpy(data, cxgb4_priv_flags_strings, + sizeof(cxgb4_priv_flags_strings)); } } @@ -270,41 +244,10 @@ struct adapter_stats { u64 db_drop; u64 db_full; u64 db_empty; - u64 tcp_v4_out_rsts; - u64 tcp_v4_in_segs; - u64 tcp_v4_out_segs; - u64 tcp_v4_retrans_segs; - u64 tcp_v6_out_rsts; - u64 tcp_v6_in_segs; - u64 tcp_v6_out_segs; - u64 tcp_v6_retrans_segs; - u64 frames; - u64 octets; - u64 drops; - u64 rqe_dfr_mod; - u64 rqe_dfr_pkt; - u64 ofld_no_neigh; - u64 ofld_cong_defer; u64 wc_success; u64 wc_fail; }; -struct channel_stats { - u64 cpl_req; - u64 cpl_rsp; - u64 mac_in_errs; - u64 hdr_in_errs; - u64 tcp_in_errs; - u64 tcp6_in_errs; - u64 tnl_cong_drops; - u64 tnl_tx_drops; - u64 ofld_vlan_drops; - u64 ofld_chan_drops; - u64 octets_ddp; - u64 frames_ddp; - u64 frames_drop; -}; - static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) @@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap, static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) { - struct tp_tcp_stats v4, v6; - struct tp_rdma_stats rdma_stats; - struct tp_err_stats err_stats; - struct tp_usm_stats usm_stats; u64 val1, val2; memset(s, 0, sizeof(*s)); - spin_lock(&adap->stats_lock); - t4_tp_get_tcp_stats(adap, &v4, &v6, false); - t4_tp_get_rdma_stats(adap, &rdma_stats, false); - t4_get_usm_stats(adap, &usm_stats, false); - t4_tp_get_err_stats(adap, &err_stats, false); - spin_unlock(&adap->stats_lock); - s->db_drop = adap->db_stats.db_drop; s->db_full = adap->db_stats.db_full; s->db_empty = adap->db_stats.db_empty; - s->tcp_v4_out_rsts = v4.tcp_out_rsts; - s->tcp_v4_in_segs = v4.tcp_in_segs; - s->tcp_v4_out_segs = v4.tcp_out_segs; - s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; - s->tcp_v6_out_rsts = v6.tcp_out_rsts; - s->tcp_v6_in_segs = v6.tcp_in_segs; - s->tcp_v6_out_segs = v6.tcp_out_segs; - s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; - - if (is_offload(adap)) { - s->frames = usm_stats.frames; - s->octets = usm_stats.octets; - s->drops = usm_stats.drops; - s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; - s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; - } - - s->ofld_no_neigh = err_stats.ofld_no_neigh; - s->ofld_cong_defer = err_stats.ofld_cong_defer; - if (!is_t4(adap->params.chip)) { int v; @@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) } } -static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, - u8 i) -{ - struct tp_cpl_stats cpl_stats; - struct tp_err_stats err_stats; - struct tp_fcoe_stats fcoe_stats; - - memset(s, 0, sizeof(*s)); - - spin_lock(&adap->stats_lock); - t4_tp_get_cpl_stats(adap, &cpl_stats, false); - t4_tp_get_err_stats(adap, &err_stats, false); - t4_get_fcoe_stats(adap, i, &fcoe_stats, false); - spin_unlock(&adap->stats_lock); - - s->cpl_req = cpl_stats.req[i]; - s->cpl_rsp = cpl_stats.rsp[i]; - s->mac_in_errs = err_stats.mac_in_errs[i]; - s->hdr_in_errs = err_stats.hdr_in_errs[i]; - s->tcp_in_errs = err_stats.tcp_in_errs[i]; - s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; - s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; - s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; - s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; - s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; - s->octets_ddp = fcoe_stats.octets_ddp; - s->frames_ddp = fcoe_stats.frames_ddp; - s->frames_drop = fcoe_stats.frames_drop; -} - static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -429,11 +311,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, data += sizeof(struct adapter_stats) / sizeof(u64); *data++ = (u64)pi->port_id; - collect_channel_stats(adapter, (struct channel_stats *)data, - pi->port_id); - data += sizeof(struct channel_stats) / sizeof(u64); - - *data++ = (u64)pi->port_id; memset(&s, 0, sizeof(s)); t4_get_lb_stats(adapter, pi->port_id, &s); @@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev, fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); - if (netif_carrier_ok(dev)) { - base->speed = pi->link_cfg.speed; - base->duplex = DUPLEX_FULL; - } else { - base->speed = SPEED_UNKNOWN; - base->duplex = DUPLEX_UNKNOWN; - } + base->speed = (netif_carrier_ok(dev) + ? pi->link_cfg.speed + : SPEED_UNKNOWN); + base->duplex = DUPLEX_FULL; if (pi->link_cfg.fc & PAUSE_RX) { if (pi->link_cfg.fc & PAUSE_TX) { @@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev, offset, len, &data[eprom->len - len]); } +static u32 cxgb4_get_priv_flags(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + return (adapter->eth_flags | pi->eth_flags); +} + +/** + * set_flags - set/unset specified flags if passed in new_flags + * @cur_flags: pointer to current flags + * @new_flags: new incoming flags + * @flags: set of flags to set/unset + */ +static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags) +{ + *cur_flags = (*cur_flags & ~flags) | (new_flags & flags); +} + +static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP); + set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT); + + return 0; +} + static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, @@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_dump_data = get_dump_data, .get_module_info = cxgb4_get_module_info, .get_module_eeprom = cxgb4_get_module_eeprom, + .get_priv_flags = cxgb4_get_priv_flags, + .set_priv_flags = cxgb4_set_priv_flags, }; void cxgb4_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index bc03c175a3cd..40cf8dc9f163 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -924,12 +924,14 @@ static int setup_sge_queues(struct adapter *adap) QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); return 0; freeout: + dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); t4_free_sge_resources(adap); return err; } static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { int txq; @@ -971,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, return txq; } - return fallback(dev, skb) % dev->real_num_tx_queues; + return fallback(dev, skb, NULL) % dev->real_num_tx_queues; } static int closest_timer(const struct sge *s, int time) @@ -3016,7 +3018,7 @@ static int cxgb_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, - pi, dev); + pi, dev, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); return 0; @@ -3217,7 +3219,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev, static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, - .ndo_start_xmit = t4_eth_xmit, + .ndo_start_xmit = t4_start_xmit, .ndo_select_queue = cxgb_select_queue, .ndo_get_stats64 = cxgb_get_stats, .ndo_set_rx_mode = cxgb_set_rxmode, @@ -3536,6 +3538,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) u32 v; int ret; + /* Now that we've successfully configured and initialized the adapter + * can ask the Firmware what resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning information\n"); + return ret; + } + /* get device capabilities */ memset(c, 0, sizeof(*c)); c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | @@ -4170,32 +4182,6 @@ static int adap_init0(struct adapter *adap) goto bye; } - /* - * Grab VPD parameters. This should be done after we establish a - * connection to the firmware since some of the VPD parameters - * (notably the Core Clock frequency) are retrieved via requests to - * the firmware. On the other hand, we need these fairly early on - * so we do this right after getting ahold of the firmware. - */ - ret = t4_get_vpd_params(adap, &adap->params.vpd); - if (ret < 0) - goto bye; - - /* - * Find out what ports are available to us. Note that we need to do - * this before calling adap_init0_no_config() since it needs nports - * and portvec ... - */ - v = - FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); - ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); - if (ret < 0) - goto bye; - - adap->params.nports = hweight32(port_vec); - adap->params.portvec = port_vec; - /* If the firmware is initialized already, emit a simply note to that * effect. Otherwise, it's time to try initializing the adapter. */ @@ -4246,6 +4232,45 @@ static int adap_init0(struct adapter *adap) } } + /* Now that we've successfully configured and initialized the adapter + * (or found it already initialized), we can ask the Firmware what + * resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning information\n"); + goto bye; + } + + /* Grab VPD parameters. This should be done after we establish a + * connection to the firmware since some of the VPD parameters + * (notably the Core Clock frequency) are retrieved via requests to + * the firmware. On the other hand, we need these fairly early on + * so we do this right after getting ahold of the firmware. + * + * We need to do this after initializing the adapter because someone + * could have FLASHed a new VPD which won't be read by the firmware + * until we do the RESET ... + */ + ret = t4_get_vpd_params(adap, &adap->params.vpd); + if (ret < 0) + goto bye; + + /* Find out what ports are available to us. Note that we need to do + * this before calling adap_init0_no_config() since it needs nports + * and portvec ... + */ + v = + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); + if (ret < 0) + goto bye; + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + /* Give the SGE code a chance to pull in anything that it needs ... * Note that this must be called after we retrieve our VPD parameters * in order to know how to convert core ticks to seconds, etc. @@ -4797,10 +4822,12 @@ static inline bool is_x_10g_port(const struct link_config *lc) * of ports we found and the number of available CPUs. Most settings can be * modified by the admin prior to actual use. */ -static void cfg_queues(struct adapter *adap) +static int cfg_queues(struct adapter *adap) { struct sge *s = &adap->sge; - int i = 0, n10g = 0, qidx = 0; + int i, n10g = 0, qidx = 0; + int niqflint, neq, avail_eth_qsets; + int max_eth_qsets = 32; #ifndef CONFIG_CHELSIO_T4_DCB int q10g = 0; #endif @@ -4812,16 +4839,46 @@ static void cfg_queues(struct adapter *adap) adap->params.crypto = 0; } - n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + /* Calculate the number of Ethernet Queue Sets available based on + * resources provisioned for us. We always have an Asynchronous + * Firmware Event Ingress Queue. If we're operating in MSI or Legacy + * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt + * Ingress Queue. Meanwhile, we need two Egress Queues for each + * Queue Set: one for the Free List and one for the Ethernet TX Queue. + * + * Note that we should also take into account all of the various + * Offload Queues. But, in any situation where we're operating in + * a Resource Constrained Provisioning environment, doing any Offload + * at all is problematic ... + */ + niqflint = adap->params.pfres.niqflint - 1; + if (!(adap->flags & USING_MSIX)) + niqflint--; + neq = adap->params.pfres.neq / 2; + avail_eth_qsets = min(niqflint, neq); + + if (avail_eth_qsets > max_eth_qsets) + avail_eth_qsets = max_eth_qsets; + + if (avail_eth_qsets < adap->params.nports) { + dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", + avail_eth_qsets, adap->params.nports); + return -ENOMEM; + } + + /* Count the number of 10Gb/s or better ports */ + for_each_port(adap, i) + n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its * own TX Queue in order to prevent Head-Of-Line Blocking. */ - if (adap->params.nports * 8 > MAX_ETH_QSETS) { - dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", - MAX_ETH_QSETS, adap->params.nports * 8); - BUG_ON(1); + if (adap->params.nports * 8 > avail_eth_qsets) { + dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", + avail_eth_qsets, adap->params.nports * 8); + return -ENOMEM; } for_each_port(adap, i) { @@ -4837,7 +4894,7 @@ static void cfg_queues(struct adapter *adap) * per 10G port. */ if (n10g) - q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; if (q10g > netif_get_num_default_rss_queues()) q10g = netif_get_num_default_rss_queues(); @@ -4888,6 +4945,8 @@ static void cfg_queues(struct adapter *adap) init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); init_rspq(adap, &s->intrq, 0, 1, 512, 64); + + return 0; } /* @@ -5628,10 +5687,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } + if (!(adapter->flags & FW_OK)) + goto fw_attach_fail; + /* Configure queues and allocate tables now, they can be needed as * soon as the first register_netdev completes. */ - cfg_queues(adapter); + err = cfg_queues(adapter); + if (err) + goto out_free_dev; adapter->smt = t4_init_smt(); if (!adapter->smt) { @@ -5703,7 +5767,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { u32 hash_base, hash_reg; - if (chip <= CHELSIO_T5) { + if (chip_ver <= CHELSIO_T5) { hash_reg = LE_DB_TID_HASHBASE_A; hash_base = t4_read_reg(adapter, hash_reg); adapter->tids.hash_base = hash_base / 4; @@ -5738,6 +5802,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_dev; } +fw_attach_fail: /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 3ddd2c4acf68..623f73dd7738 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap) { int ret; + if (adap->tc_flower_initialized) + return -EEXIST; + adap->flower_ht_params = cxgb4_tc_flower_ht_params; ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); if (ret) @@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap) INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); + adap->tc_flower_initialized = true; return 0; } void cxgb4_cleanup_tc_flower(struct adapter *adap) { + if (!adap->tc_flower_initialized) + return; + if (adap->flower_stats_timer.function) del_timer_sync(&adap->flower_stats_timer); cancel_work_sync(&adap->flower_stats_work); rhashtable_destroy(&adap->flower_tbl); + adap->tc_flower_initialized = false; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 9148abb7994c..7fc656680299 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap) struct port_info *pi = netdev2pinfo(adap->port[j]); s = pi->sched_tbl; + if (!s) + continue; + for (i = 0; i < s->sched_size; i++) { struct sched_class *e; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 395e2a0e8d7f..6807bc3a44fb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, } /** - * t4_eth_xmit - add a packet to an Ethernet Tx queue + * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. */ -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid, ctrl0, op; u64 cntrl, *end, *sgl; @@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } +/* Constants ... */ +enum { + /* Egress Queue sizes, producer and consumer indices are all in units + * of Egress Context Units bytes. Note that as far as the hardware is + * concerned, the free list is an Egress Queue (the host produces free + * buffers which the hardware consumes) and free list entries are + * 64-bit PCI DMA addresses. + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + + T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), +}; + +/** + * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit completely as + * immediate data. + */ +static inline int t4vf_is_eth_imm(const struct sk_buff *skb) +{ + /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request + * which does not accommodate immediate data. We could dike out all + * of the support code for immediate data but that would tie our hands + * too much if we ever want to enhace the firmware. It would also + * create more differences between the PF and VF Drivers. + */ + return false; +} + +/** + * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR + * @skb: the packet + * + * Returns the number of flits needed for a TX Work Request for the + * given Ethernet packet, including the needed WR and CPL headers. + */ +static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + /* If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (t4vf_is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), + sizeof(__be64)); + + /* Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. + */ +static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const struct skb_shared_info *ssi; + struct fw_eth_tx_pkt_vm_wr *wr; + int qidx, credits, max_pkt_len; + struct cpl_tx_pkt_core *cpl; + const struct port_info *pi; + unsigned int flits, ndesc; + struct sge_eth_txq *txq; + struct adapter *adapter; + u64 cntrl, *end; + u32 wr_mid; + const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci); + + /* The chip minimum packet length is 10 octets but the firmware + * command that we are using requires that we copy the Ethernet header + * (including the VLAN tag) into the header so we reject anything + * smaller than that ... + */ + if (unlikely(skb->len < fw_hdr_copy_len)) + goto out_free; + + /* Discard the packet if the length is greater than mtu */ + max_pkt_len = ETH_HLEN + dev->mtu; + if (skb_vlan_tag_present(skb)) + max_pkt_len += VLAN_HLEN; + if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) + goto out_free; + + /* Figure out which TX Queue we're going to use. */ + pi = netdev_priv(dev); + adapter = pi->adapter; + qidx = skb_get_queue_mapping(skb); + WARN_ON(qidx >= pi->nqsets); + txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + + /* Take this opportunity to reclaim any TX Descriptors whose DMA + * transfers have completed. + */ + cxgb4_reclaim_completed_tx(adapter, &txq->q, true); + + /* Calculate the number of flits and TX Descriptors we're going to + * need along with how many TX Descriptors will be left over after + * we inject our Work Request. + */ + flits = t4vf_calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + /* Not enough room for this packet's Work Request. Stop the + * TX Queue and return a "busy" condition. The queue will get + * started later on when the firmware informs us that space + * has opened up. + */ + eth_txq_stop(txq); + dev_err(adapter->pdev_dev, + "%s: TX ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!t4vf_is_eth_imm(skb) && + unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) { + /* We need to map the skb into PCI DMA space (because it can't + * be in-lined directly into the Work Request) and the mapping + * operation failed. Record the error and drop the packet. + */ + txq->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ + eth_txq_stop(txq); + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + } + + /* Start filling in our Work Request. Note that we do _not_ handle + * the WR Header wrapping around the TX Descriptor Ring. If our + * maximum header size ever exceeds one TX Descriptor, we'll need to + * do something else here. + */ + WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = cpu_to_be32(wr_mid); + wr->r3[0] = cpu_to_be32(0); + wr->r3[1] = cpu_to_be32(0); + skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + end = (u64 *)wr + flits; + + /* If this is a Large Send Offload packet we'll put in an LSO CPL + * message with an encapsulated TX Packet CPL message. Otherwise we + * just use a TX Packet CPL message. + */ + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); + /* Fill in the LSO CPL message. */ + lso->lso_ctrl = + cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | + LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->ipid_ofst = cpu_to_be16(0); + lso->mss = cpu_to_be16(ssi->gso_size); + lso->seqno_offset = cpu_to_be32(0); + if (is_t4(adapter->params.chip)) + lso->len = cpu_to_be32(skb->len); + else + lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(lso + 1); + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); + txq->tso++; + txq->tx_cso += ssi->gso_segs; + } else { + int len; + + len = (t4vf_is_eth_imm(skb) + ? skb->len + sizeof(*cpl) + : sizeof(*cpl)); + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(adapter->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; + txq->tx_cso++; + } else { + cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; + } + } + + /* If there's a VLAN tag present, add that to the list of things to + * do in this Work Request. + */ + if (skb_vlan_tag_present(skb)) { + txq->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); + } + + /* Fill in the TX Packet CPL message header. */ + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | + TXPKT_INTF_V(pi->port_id) | + TXPKT_PF_V(0)); + cpl->pack = cpu_to_be16(0); + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + /* Fill in the body of the TX Packet CPL message with either in-lined + * data or a Scatter/Gather List. + */ + if (t4vf_is_eth_imm(skb)) { + /* In-line the packet's data and free the skb since we don't + * need it any longer. + */ + cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); + dev_consume_skb_any(skb); + } else { + /* Write the skb's Scatter/Gather list into the TX Packet CPL + * message and retain a pointer to the skb so we can free it + * later when its DMA completes. (We store the skb pointer + * in the Software Descriptor corresponding to the last TX + * Descriptor used by the Work Request.) + * + * The retained skb will be freed when the corresponding TX + * Descriptors are reclaimed after their DMAs complete. + * However, this could take quite a while since, in general, + * the hardware is set up to be lazy about sending DMA + * completion notifications to us and we mostly perform TX + * reclaims in the transmit routine. + * + * This is good for performamce but means that we rely on new + * TX packets arriving to run the destructors of completed + * packets, which open up space in their sockets' send queues. + * Sometimes we do not get such new packets causing TX to + * stall. A single UDP transmitter is a good example of this + * situation. We have a clean up timer that periodically + * reclaims completed packets but it doesn't run often enough + * (nor do we want it to) to prevent lengthy stalls. A + * solution to this problem is to run the destructor early, + * after the packet is queued but before it's DMAd. A con is + * that we lie to socket memory accounting, but the amount of + * extra memory is reasonable (limited by the number of TX + * descriptors), the packets do actually get freed quickly by + * new packets almost always, and for protocols like TCP that + * wait for acks to really free up the data the extra memory + * is even less. On the positive side we run the destructors + * on the sending CPU rather than on a potentially different + * completing CPU, usually a good thing. + * + * Run the destructor before telling the DMA engine about the + * packet to make sure it doesn't complete and get freed + * prematurely. + */ + struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); + struct sge_txq *tq = &txq->q; + int last_desc; + + /* If the Work Request header was an exact multiple of our TX + * Descriptor length, then it's possible that the starting SGL + * pointer lines up exactly with the end of our TX Descriptor + * ring. If that's the case, wrap around to the beginning + * here ... + */ + if (unlikely((void *)sgl == (void *)tq->stat)) { + sgl = (void *)tq->desc; + end = (void *)((void *)tq->desc + + ((void *)end - (void *)tq->stat)); + } + + cxgb4_write_sgl(skb, tq, sgl, end, 0, addr); + skb_orphan(skb); + + last_desc = tq->pidx + ndesc - 1; + if (last_desc >= tq->size) + last_desc -= tq->size; + tq->sdesc[last_desc].skb = skb; + tq->sdesc[last_desc].sgl = sgl; + } + + /* Advance our internal TX Queue state, tell the hardware about + * the new TX descriptors and return success. + */ + txq_advance(&txq->q, ndesc); + + cxgb4_ring_tx_db(adapter, &txq->q, ndesc); + return NETDEV_TX_OK; + +out_free: + /* An error of some sort happened. Free the TX skb and tell the + * OS that we've "dealt" with the packet ... + */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) + return cxgb4_vf_eth_xmit(skb, dev); + + return cxgb4_eth_xmit(skb, dev); +} + /** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue @@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); if (cong >= 0) - c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | + FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC + : FW_IQ_IQTYPE_OFLD)); if (fl) { enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 3720c3e11ebb..2d9943f90a75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p) return 0; } +/** + * t4_get_pfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a physical + * function. The results are stored in @adapter->pfres. + */ +int t4_get_pfres(struct adapter *adapter) +{ + struct pf_resources *pfres = &adapter->params.pfres; + struct fw_pfvf_cmd cmd, rpl; + int v; + u32 word; + + /* Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PFVF_CMD_PFN_V(adapter->pf) | + FW_PFVF_CMD_VFN_V(0)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* Extract PF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + pfres->niq = FW_PFVF_CMD_NIQ_G(word); + + word = be32_to_cpu(rpl.type_to_neq); + pfres->neq = FW_PFVF_CMD_NEQ_G(word); + pfres->pmask = FW_PFVF_CMD_PMASK_G(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + pfres->tc = FW_PFVF_CMD_TC_G(word); + pfres->nvi = FW_PFVF_CMD_NVI_G(word); + pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); + + return 0; +} + /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index c7f8d0441278..e3adf435913e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -188,6 +188,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 6b55aa2eb2a5..da885881c02f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1502,6 +1502,20 @@ #define TP_MIB_DATA_A 0x7e54 #define TP_INT_CAUSE_A 0x7e74 +#define TP_FLM_FREE_RX_CNT_A 0x7e84 + +#define FREERXPAGECOUNT_S 0 +#define FREERXPAGECOUNT_M 0x1fffffU +#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S) +#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M) + +#define TP_FLM_FREE_TX_CNT_A 0x7e88 + +#define FREETXPAGECOUNT_S 0 +#define FREETXPAGECOUNT_M 0x1fffffU +#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S) +#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M) + #define FLMTXFLSTEMPTY_S 30 #define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S) #define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U) @@ -1683,6 +1697,16 @@ #define ULP_TX_LA_RDPTR_0_A 0x8ec0 #define ULP_TX_LA_RDDATA_0_A 0x8ec4 #define ULP_TX_LA_WRPTR_0_A 0x8ec8 +#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70 + +#define ULP_TX_ASIC_DEBUG_0_A 0x8f74 +#define ULP_TX_ASIC_DEBUG_1_A 0x8f78 +#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c +#define ULP_TX_ASIC_DEBUG_3_A 0x8f80 +#define ULP_TX_ASIC_DEBUG_4_A 0x8f84 + +/* registers for module PM_RX */ +#define PM_RX_BASE_ADDR 0x8fc0 #define PMRX_E_PCMD_PAR_ERROR_S 0 #define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index f1967cf6d43c..5dc6c4154af8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1472,6 +1472,12 @@ enum fw_iq_type { FW_IQ_TYPE_NO_FL_INT_CAP }; +enum fw_iq_iqtype { + FW_IQ_IQTYPE_OTHER, + FW_IQ_IQTYPE_NIC, + FW_IQ_IQTYPE_OFLD, +}; + struct fw_iq_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; @@ -1586,6 +1592,12 @@ struct fw_iq_cmd { #define FW_IQ_CMD_IQFLINTISCSIC_S 26 #define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S) +#define FW_IQ_CMD_IQTYPE_S 24 +#define FW_IQ_CMD_IQTYPE_M 0x3 +#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S) +#define FW_IQ_CMD_IQTYPE_G(x) \ + (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M) + #define FW_IQ_CMD_FL0CNGCHMAP_S 20 #define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 6d7404f66f84..1c9ad3630c77 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -46,6 +46,11 @@ #define DRV_NAME "gmac-gemini" #define DRV_VERSION "1.0" +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + #define HSIZE_8 0x00 #define HSIZE_16 0x01 #define HSIZE_32 0x02 @@ -146,6 +151,7 @@ struct gemini_ethernet { void __iomem *base; struct gemini_ethernet_port *port0; struct gemini_ethernet_port *port1; + bool initialized; spinlock_t irq_lock; /* Locks IRQ-related registers */ unsigned int freeq_order; @@ -300,23 +306,26 @@ static void gmac_speed_set(struct net_device *netdev) status.bits.speed = GMAC_SPEED_1000; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_1000; - netdev_info(netdev, "connect to RGMII @ 1Gbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", + phydev_name(phydev)); break; case 100: status.bits.speed = GMAC_SPEED_100; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII @ 100 Mbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", + phydev_name(phydev)); break; case 10: status.bits.speed = GMAC_SPEED_10; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII @ 10 Mbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", + phydev_name(phydev)); break; default: - netdev_warn(netdev, "Not supported PHY speed (%d)\n", - phydev->speed); + netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n", + phydev->speed, phydev_name(phydev)); } if (phydev->duplex == DUPLEX_FULL) { @@ -363,12 +372,6 @@ static int gmac_setup_phy(struct net_device *netdev) return -ENODEV; netdev->phydev = phy; - netdev_info(netdev, "connected to PHY \"%s\"\n", - phydev_name(phy)); - phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n", - (unsigned long)phy->phy_id, - phy_modes(phy->interface)); - phy->supported &= PHY_GBIT_FEATURES; phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; phy->advertising = phy->supported; @@ -376,19 +379,19 @@ static int gmac_setup_phy(struct net_device *netdev) /* set PHY interface type */ switch (phy->interface) { case PHY_INTERFACE_MODE_MII: - netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + netdev_dbg(netdev, + "MII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_MII; - netdev_info(netdev, "connect to MII\n"); break; case PHY_INTERFACE_MODE_GMII: - netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + netdev_dbg(netdev, + "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_GMII; - netdev_info(netdev, "connect to GMII\n"); break; case PHY_INTERFACE_MODE_RGMII: - dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n"); + netdev_dbg(netdev, + "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII\n"); break; default: netdev_err(netdev, "Unsupported MII interface\n"); @@ -398,29 +401,63 @@ static int gmac_setup_phy(struct net_device *netdev) } writel(status.bits32, port->gmac_base + GMAC_STATUS); + if (netif_msg_link(port)) + phy_attached_info(phy); + return 0; } -static int gmac_pick_rx_max_len(int max_l3_len) -{ - /* index = CONFIG_MAXLEN_XXX values */ - static const int max_len[8] = { - 1536, 1518, 1522, 1542, - 9212, 10236, 1518, 1518 - }; - int i, n = 5; +/* The maximum frame length is not logically enumerated in the + * hardware, so we do a table lookup to find the applicable max + * frame length. + */ +struct gmac_max_framelen { + unsigned int max_l3_len; + u8 val; +}; + +static const struct gmac_max_framelen gmac_maxlens[] = { + { + .max_l3_len = 1518, + .val = CONFIG0_MAXLEN_1518, + }, + { + .max_l3_len = 1522, + .val = CONFIG0_MAXLEN_1522, + }, + { + .max_l3_len = 1536, + .val = CONFIG0_MAXLEN_1536, + }, + { + .max_l3_len = 1542, + .val = CONFIG0_MAXLEN_1542, + }, + { + .max_l3_len = 9212, + .val = CONFIG0_MAXLEN_9k, + }, + { + .max_l3_len = 10236, + .val = CONFIG0_MAXLEN_10k, + }, +}; - max_l3_len += ETH_HLEN + VLAN_HLEN; +static int gmac_pick_rx_max_len(unsigned int max_l3_len) +{ + const struct gmac_max_framelen *maxlen; + int maxtot; + int i; - if (max_l3_len > max_len[n]) - return -1; + maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN; - for (i = 0; i < 5; i++) { - if (max_len[i] >= max_l3_len && max_len[i] < max_len[n]) - n = i; + for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) { + maxlen = &gmac_maxlens[i]; + if (maxtot <= maxlen->max_l3_len) + return maxlen->val; } - return n; + return -1; } static int gmac_init(struct net_device *netdev) @@ -1276,8 +1313,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable) unsigned long flags; u32 val, mask; - netdev_info(netdev, "%s device %d %s\n", __func__, - netdev->dev_id, enable ? "enable" : "disable"); + netdev_dbg(netdev, "%s device %d %s\n", __func__, + netdev->dev_id, enable ? "enable" : "disable"); spin_lock_irqsave(&geth->irq_lock, flags); mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2); @@ -1753,7 +1790,10 @@ static int gmac_open(struct net_device *netdev) phy_start(netdev->phydev); err = geth_resize_freeq(port); - if (err) { + /* It's fine if it's just busy, the other port has set up + * the freeq in that case. + */ + if (err && (err != -EBUSY)) { netdev_err(netdev, "could not resize freeq\n"); goto err_stop_phy; } @@ -1782,7 +1822,7 @@ static int gmac_open(struct net_device *netdev) HRTIMER_MODE_REL); port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired; - netdev_info(netdev, "opened\n"); + netdev_dbg(netdev, "opened\n"); return 0; @@ -2264,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port) static void gemini_ethernet_init(struct gemini_ethernet *geth) { + /* Only do this once both ports are online */ + if (geth->initialized) + return; + if (geth->port0 && geth->port1) + geth->initialized = true; + else + return; + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); @@ -2354,6 +2402,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->id = id; port->geth = geth; port->dev = dev; + port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* DMA memory */ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2410,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) geth->port0 = port; else geth->port1 = port; + + /* This will just be done once both ports are up and reset */ + gemini_ethernet_init(geth); + platform_set_drvdata(pdev, port); /* Set up and register the netdev */ @@ -2423,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->hw_features = GMAC_OFFLOAD_FEATURES; netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; + /* We can handle jumbo frames up to 10236 bytes so, let's accept + * payloads of 10236 bytes minus VLAN and ethernet header + */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = 10236 - VLAN_ETH_HLEN; port->freeq_refill = 0; netif_napi_add(netdev, &port->napi, gmac_napi_poll, @@ -2435,7 +2493,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->mac_addr[0], port->mac_addr[1], port->mac_addr[2]); dev_info(dev, "using a random ethernet address\n"); - random_ether_addr(netdev->dev_addr); + eth_random_addr(netdev->dev_addr); } gmac_write_mac_address(netdev); @@ -2527,7 +2585,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev) spin_lock_init(&geth->irq_lock); spin_lock_init(&geth->freeq_lock); - gemini_ethernet_init(geth); /* The children will use this */ platform_set_drvdata(pdev, geth); @@ -2540,8 +2597,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev) { struct gemini_ethernet *geth = platform_get_drvdata(pdev); - gemini_ethernet_init(geth); geth_cleanup_freeq(geth); + geth->initialized = false; return 0; } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 382891f81e09..7005949dc17b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -185,34 +185,13 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; - char desc[32]; - - /* Adaptive interrupt coalescing (AIC) info */ - bool enable_aic; - u32 min_eqd; /* in usecs */ - u32 max_eqd; /* in usecs */ - u32 eqd; /* configured val when aic is off */ - u32 cur_eqd; /* in usecs */ + struct be_adapter *adapter; + struct napi_struct napi; u8 idx; /* array index */ u8 msix_idx; u16 spurious_intr; - struct napi_struct napi; - struct be_adapter *adapter; cpumask_var_t affinity_mask; - -#ifdef CONFIG_NET_RX_BUSY_POLL -#define BE_EQ_IDLE 0 -#define BE_EQ_NAPI 1 /* napi owns this EQ */ -#define BE_EQ_POLL 2 /* poll owns this EQ */ -#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL) -#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */ -#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */ -#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD) -#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD) - unsigned int state; - spinlock_t lock; /* lock to serialize napi and busy-poll */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ } ____cacheline_aligned_in_smp; struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ @@ -238,7 +217,6 @@ struct be_tx_stats { u64 tx_vxlan_offload_pkts; u64 tx_reqs; u64 tx_compl; - ulong tx_jiffies; u32 tx_stops; u32 tx_drv_drops; /* pkts dropped by driver */ /* the error counters are described in be_ethtool.c */ @@ -261,9 +239,9 @@ struct be_tx_compl_info { struct be_tx_obj { u32 db_offset; + struct be_tx_compl_info txcp; struct be_queue_info q; struct be_queue_info cq; - struct be_tx_compl_info txcp; /* Remember the skbs that were transmitted */ struct sk_buff *sent_skb_list[TX_Q_LEN]; struct be_tx_stats stats; @@ -458,10 +436,10 @@ struct be_port_resources { #define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC) struct rss_info { - u64 rss_flags; u8 rsstable[RSS_INDIR_TABLE_LEN]; u8 rss_queue[RSS_INDIR_TABLE_LEN]; u8 rss_hkey[RSS_HASH_KEY_LEN]; + u64 rss_flags; }; #define BE_INVALID_DIE_TEMP 0xFF @@ -544,11 +522,13 @@ enum { }; struct be_error_recovery { - /* Lancer error recovery variables */ - u8 recovery_retries; + union { + u8 recovery_retries; /* used for Lancer */ + u8 recovery_state; /* used for BEx and Skyhawk */ + }; /* BEx/Skyhawk error recovery variables */ - u8 recovery_state; + bool recovery_supported; u16 ue_to_reset_time; /* Time after UE, to soft reset * the chip - PF0 only */ @@ -556,7 +536,6 @@ struct be_error_recovery { * of SLIPORT_SEMAPHORE reg */ u16 last_err_code; - bool recovery_supported; unsigned long probe_time; unsigned long last_recovery_time; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 8f755009ff38..05e4c0bb25f4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3403,9 +3403,11 @@ static int be_msix_register(struct be_adapter *adapter) int status, i, vec; for_all_evt_queues(adapter, eqo, i) { - sprintf(eqo->desc, "%s-q%d", netdev->name, i); + char irq_name[IFNAMSIZ+4]; + + snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i); vec = be_msix_vec_get(adapter, eqo); - status = request_irq(vec, be_msix, 0, eqo->desc, eqo); + status = request_irq(vec, be_msix, 0, irq_name, eqo); if (status) goto err_msix; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index ab02057ac730..65a22cd9aef2 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1171,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; params.specific_params.non_rx_params.err_fqid = errq->fqid; @@ -1213,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; rx_p = ¶ms.specific_params.rx_params; @@ -1610,14 +1610,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, { const enum dma_data_direction dma_dir = DMA_TO_DEVICE; struct device *dev = priv->net_dev->dev.parent; + struct skb_shared_hwtstamps shhwtstamps; dma_addr_t addr = qm_fd_addr(fd); const struct qm_sg_entry *sgt; struct sk_buff **skbh, *skb; int nr_frags, i; + u64 ns; skbh = (struct sk_buff **)phys_to_virt(addr); skb = *skbh; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, + &ns)) { + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } else { + dev_warn(dev, "fman_port_get_tstamp failed!\n"); + } + } + if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; dma_unmap_single(dev, addr, @@ -2087,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) goto skb_to_fd_failed; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } + if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) return NETDEV_TX_OK; @@ -2228,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { + struct skb_shared_hwtstamps *shhwtstamps; struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; const struct qm_fd *fd = &dq->fd; @@ -2241,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct sk_buff *skb; int *count_ptr; void *vaddr; + u64 ns; fd_status = be32_to_cpu(fd->status); fd_format = qm_fd_get_format(fd); @@ -2305,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, if (!skb) return qman_cb_dqrr_consume; + if (priv->rx_tstamp) { + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) + shhwtstamps->hwtstamp = ns_to_ktime(ns); + else + dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n"); + } + skb->protocol = eth_type_trans(skb, net_dev); if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && @@ -2524,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev) return err; } +static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpaa_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->tx_tstamp = false; + break; + case HWTSTAMP_TX_ON: + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->tx_tstamp = true; + break; + default: + return -ERANGE; + } + + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->rx_tstamp = false; + } else { + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->rx_tstamp = true; + /* TS is set for all frame types, not only those requested */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { - if (!net_dev->phydev) - return -EINVAL; - return phy_mii_ioctl(net_dev->phydev, rq, cmd); + int ret = -EINVAL; + + if (cmd == SIOCGMIIREG) { + if (net_dev->phydev) + return phy_mii_ioctl(net_dev->phydev, rq, cmd); + } + + if (cmd == SIOCSHWTSTAMP) + return dpaa_ts_ioctl(net_dev, rq, cmd); + + return ret; } static const struct net_device_ops dpaa_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index bd9422082f83..af320f83c742 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -182,6 +182,9 @@ struct dpaa_priv { struct dpaa_buffer_layout buf_layout[2]; u16 rx_headroom; + + bool tx_tstamp; /* Tx timestamping enabled */ + bool rx_tstamp; /* Rx timestamping enabled */ }; /* from dpaa_ethtool.c */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 2f933b6b2f4e..3184c8f7cdd0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -32,6 +32,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/string.h> +#include <linux/of_platform.h> +#include <linux/net_tstamp.h> +#include <linux/fsl/ptp_qoriq.h> #include "dpaa_eth.h" #include "mac.h" @@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int dpaa_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *info) +{ + struct device *dev = net_dev->dev.parent; + struct device_node *mac_node = dev->of_node; + struct device_node *fman_node = NULL, *ptp_node = NULL; + struct platform_device *ptp_dev = NULL; + struct qoriq_ptp *ptp = NULL; + + info->phc_index = -1; + + fman_node = of_get_parent(mac_node); + if (fman_node) + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + + if (ptp_node) + ptp_dev = of_find_device_by_node(ptp_node); + + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + + if (ptp) + info->phc_index = ptp->phc_index; + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + const struct ethtool_ops dpaa_ethtool_ops = { .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, @@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = { .set_link_ksettings = dpaa_set_link_ksettings, .get_rxnfc = dpaa_get_rxnfc, .set_rxnfc = dpaa_set_rxnfc, + .get_ts_info = dpaa_get_ts_info, }; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 36c2d7d6ee1b..7e892b1cbd3d 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) { unsigned long flags; u32 val, tempval; - int inc; struct timespec64 ts; u64 ns; val = 0; @@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) fep->pps_channel = DEFAULT_PPS_CHANNEL; fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; - inc = fep->ptp_inc; spin_lock_irqsave(&fep->tmreg_lock, flags); diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 9530405030a7..c415ac67cb7b 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) of_node_put(muram_node); of_node_put(fm_node); - err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); + err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, + "fman", fman); if (err < 0) { dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", __func__, irq, err); diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index bfa02e0014ae..935c317fa696 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -41,6 +41,7 @@ /* Frame queue Context Override */ #define FM_FD_CMD_FCO 0x80000000 #define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */ +#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */ #define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */ /* TX-Port: Unsupported Format */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 57b1e2b47c0a..1ca543ac8f2c 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -123,11 +123,13 @@ #define DTSEC_ECNTRL_R100M 0x00000008 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 +#define TCTRL_TTSE 0x00000040 #define TCTRL_GTS 0x00000020 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_PAL_SHIFT 16 #define RCTRL_GHTX 0x00000400 +#define RCTRL_RTSE 0x00000040 #define RCTRL_GRS 0x00000020 #define RCTRL_MPROM 0x00000008 #define RCTRL_RSF 0x00000004 @@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable) return 0; } +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 rctrl, tctrl; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + rctrl = ioread32be(®s->rctrl); + tctrl = ioread32be(®s->tctrl); + + if (enable) { + rctrl |= RCTRL_RTSE; + tctrl |= TCTRL_TTSE; + } else { + rctrl &= ~RCTRL_RTSE; + tctrl &= ~TCTRL_TTSE; + } + + iowrite32be(rctrl, ®s->rctrl); + iowrite32be(tctrl, ®s->tctrl); + + return 0; +} + int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h index 1a689adf5a22..5149d96ec2c1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version); int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable); +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable); #endif /* __DTSEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 446a97b792e3..bc6eb30aa20f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable) return 0; } +int memac_set_tstamp(struct fman_mac *memac, bool enable) +{ + return 0; /* Always enabled. */ +} + int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) { struct memac_regs __iomem *regs = memac->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index b5a50338ed9a..b2c671ec0ce7 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac, int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_set_allmulti(struct fman_mac *memac, bool enable); +int memac_set_tstamp(struct fman_mac *memac, bool enable); #endif /* __MEMAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ecbf6187e13a..ee82ee1384eb 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1739,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset) } EXPORT_SYMBOL(fman_port_get_hash_result_offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp) +{ + if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE) + return -EINVAL; + + *tstamp = be64_to_cpu(*(__be64 *)(data + + port->buffer_offsets.time_stamp_offset)); + + return 0; +} +EXPORT_SYMBOL(fman_port_get_tstamp); + static int fman_port_probe(struct platform_device *of_dev) { struct fman_port *port; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h index e86ca6a34e4e..9dbb69f40121 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.h +++ b/drivers/net/ethernet/freescale/fman/fman_port.h @@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port); int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp); + struct fman_port *fman_port_bind(struct device *dev); #endif /* __FMAN_PORT_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 284735d4ebe9..40705938eecc 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -44,6 +44,7 @@ #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff /* Command and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_EN_TIMESTAMP 0x00100000 #define CMD_CFG_NO_LEN_CHK 0x00020000 #define CMD_CFG_PAUSE_IGNORE 0x00000100 #define CMF_CFG_CRC_FWD 0x00000040 @@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable) return 0; } +int tgec_set_tstamp(struct fman_mac *tgec, bool enable) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + + if (enable) + tmp |= CMD_CFG_EN_TIMESTAMP; + else + tmp &= ~CMD_CFG_EN_TIMESTAMP; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) { struct tgec_regs __iomem *regs = tgec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h index cbbd3b422a98..3bfd1062b386 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_get_version(struct fman_mac *tgec, u32 *mac_version); int tgec_set_allmulti(struct fman_mac *tgec, bool enable); +int tgec_set_tstamp(struct fman_mac *tgec, bool enable); #endif /* __TGEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 7b5b95f52c09..a847b9c3b31a 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev) mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; + mac_dev->set_tstamp = dtsec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev) mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; + mac_dev->set_tstamp = tgec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev) mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; + mac_dev->set_tstamp = memac_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index b520cec120ee..824a81a9f350 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -68,6 +68,7 @@ struct mac_device { int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); + int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, struct mac_device *mac_dev); int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 8cb98cae0a6f..395a5266ea30 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) { - unsigned int last_rule_idx = priv->cur_filer_idx; unsigned int cmp_rqfpr; unsigned int *local_rqfpr; unsigned int *local_rqfcr; @@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, } priv->cur_filer_idx = l - 1; - last_rule_idx = l; /* hash rules */ ethflow_to_filer_rules(priv, ethflow); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 42fca3208c0b..22a817da861e 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) ugeth_vdbg("%s: IN", __func__); + netdev_sent_queue(dev, skb->len); spin_lock_irqsave(&ugeth->lock, flags); dev->stats.tx_bytes += skb->len; @@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) { /* Start from the next BD that should be filled */ struct ucc_geth_private *ugeth = netdev_priv(dev); + unsigned int bytes_sent = 0; + int howmany = 0; u8 __iomem *bd; /* BD pointer */ u32 bd_status; @@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; if (!skb) break; - + howmany++; + bytes_sent += skb->len; dev->stats.tx_packets++; dev_consume_skb_any(skb); @@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) bd_status = in_be32((u32 __iomem *)bd); } ugeth->confBd[txQ] = bd; + netdev_completed_queue(dev, howmany, bytes_sent); return 0; } @@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev) phy_start(ugeth->phydev); napi_enable(&ugeth->napi); + netdev_reset_queue(dev); netif_start_queue(dev); device_set_wakeup_capable(&dev->dev, @@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev) free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); netif_stop_queue(dev); + netdev_reset_queue(dev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index fb1a7251f45d..25152715396b 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -85,10 +85,12 @@ config HNS3 drivers(like ODP)to register with HNAE devices and their associated operations. +if HNS3 + config HNS3_HCLGE tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" + default m depends on PCI_MSI - depends on HNS3 ---help--- This selects the HNS3_HCLGE network acceleration engine & its hardware compatibility layer. The engine would be used in Hisilicon hip08 family of @@ -97,16 +99,15 @@ config HNS3_HCLGE config HNS3_DCB bool "Hisilicon HNS3 Data Center Bridge Support" default n - depends on HNS3 && HNS3_HCLGE && DCB + depends on HNS3_HCLGE && DCB ---help--- Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver. If unsure, say N. config HNS3_HCLGEVF - tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" - depends on PCI_MSI - depends on HNS3 + tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI depends on HNS3_HCLGE ---help--- This selects the HNS3 VF drivers network acceleration engine & its hardware @@ -115,11 +116,13 @@ config HNS3_HCLGEVF config HNS3_ENET tristate "Hisilicon HNS3 Ethernet Device Support" + default m depends on 64BIT && PCI - depends on HNS3 ---help--- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 devices and their associated operations. +endif #HNS3 + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 340e28211135..14374a856d30 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev) hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); hip04_config_fifo(priv); - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); hip04_update_mac_address(ndev); ret = hip04_alloc_ring(ndev, d); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index ef9ef703d13a..948b3e0d18f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev, static u16 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; struct hns_nic_priv *priv = netdev_priv(ndev); @@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, is_multicast_ether_addr(eth_hdr->h_dest)) return 0; else - return fallback(ndev, skb); + return fallback(ndev, skb, NULL); } static const struct net_device_ops hns_nic_netdev_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 9d79dad2c6aa..0762ad18fdcc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -8,7 +8,6 @@ */ #include <linux/list.h> -#include <linux/slab.h> #include <linux/spinlock.h> #include "hnae3.h" @@ -41,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, { switch (client->type) { case HNAE3_CLIENT_KNIC: - hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_UNIC: - hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_ROCE: - hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); break; default: break; @@ -61,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, switch (client->type) { case HNAE3_CLIENT_KNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_UNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_ROCE: - inited = hnae_get_bit(ae_dev->flag, - HNAE3_ROCE_CLIENT_INITED_B); + inited = hnae3_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); break; default: break; @@ -86,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* check if this client matches the type of ae_dev */ if (!(hnae3_client_match(client->type, ae_dev->dev_type) && - hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { return 0; } @@ -95,7 +94,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, ret = ae_dev->ops->init_client_instance(client, ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, - "fail to instantiate client\n"); + "fail to instantiate client, ret = %d\n", ret); return ret; } @@ -135,7 +134,8 @@ int hnae3_register_client(struct hnae3_client *client) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed for port\n"); + "match and instantiation failed for port, ret = %d\n", + ret); } exit: @@ -185,11 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ae_dev->ops = ae_algo->ops; ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); continue; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and * initialize the figure out client instance @@ -198,7 +199,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } } @@ -218,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -232,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_algo->node); @@ -271,11 +273,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); goto out_err; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -286,7 +289,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } out_err: @@ -306,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -317,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_dev->node); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8acb1d116a02..da806fdfbbe6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -62,10 +62,10 @@ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) #define hnae3_dev_dcb_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) @@ -167,7 +167,6 @@ struct hnae3_client_ops { #define HNAE3_CLIENT_NAME_LENGTH 16 struct hnae3_client { char name[HNAE3_CLIENT_NAME_LENGTH]; - u16 version; unsigned long state; enum hnae3_client_type type; const struct hnae3_client_ops *ops; @@ -436,7 +435,6 @@ struct hnae3_dcb_ops { struct hnae3_ae_algo { const struct hnae3_ae_ops *ops; struct list_head node; - char name[HNAE3_CLASS_NAME_SIZE]; const struct pci_device_id *pdev_id_table; }; @@ -509,17 +507,17 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ }; -#define hnae_set_field(origin, mask, shift, val) \ +#define hnae3_set_field(origin, mask, shift, val) \ do { \ (origin) &= (~(mask)); \ (origin) |= ((val) << (shift)) & (mask); \ } while (0) -#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) +#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) -#define hnae_set_bit(origin, shift, val) \ - hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) -#define hnae_get_bit(origin, shift) \ - hnae_get_field((origin), (0x1 << (shift)), (shift)) +#define hnae3_set_bit(origin, shift, val) \ + hnae3_set_field((origin), (0x1 << (shift)), (shift), (val)) +#define hnae3_get_bit(origin, shift) \ + hnae3_get_field((origin), (0x1 << (shift)), (shift)) void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 25a73bb2e642..29be96e5cc47 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -239,7 +239,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; - int ret; + int i, ret; + + if (kinfo->num_tc <= 1) { + netdev_reset_tc(netdev); + } else { + ret = netdev_set_num_tc(netdev, kinfo->num_tc); + if (ret) { + netdev_err(netdev, + "netdev_set_num_tc fail, ret=%d!\n", ret); + return ret; + } + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!kinfo->tc_info[i].enable) + continue; + + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + } ret = netif_set_real_num_tx_queues(netdev, queue_size); if (ret) { @@ -312,7 +333,9 @@ out_start_err: static int hns3_nic_net_open(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo; + int i, ret; netif_carrier_off(netdev); @@ -327,6 +350,12 @@ static int hns3_nic_net_open(struct net_device *netdev) return ret; } + kinfo = &h->kinfo; + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + netdev_set_prio_tc_map(netdev, i, + kinfo->prio_tc[i]); + } + priv->ae_handle->last_reset_time = jiffies; return 0; } @@ -493,8 +522,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -586,21 +615,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute L2 header size for normal packet, defined in 2 Bytes */ l2_len = l3.hdr - skb->data; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); /* tunnel packet*/ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); /* compute OL3 header size, defined in 4 Bytes */ ol3_len = l4.hdr - l3.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); /* MAC in UDP, MAC in GRE (0x6558)*/ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { @@ -609,16 +638,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute OL4 header size, defined in 4 Bytes. */ ol4_len = l2_hdr - l4.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, ol4_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + ol4_len >> 2); /* switch IP header ptr from outer to inner header */ l3.hdr = skb_inner_network_header(skb); /* compute inner l2 header size, defined in 2 Bytes. */ l2_len = l3.hdr - l2_hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); } else { /* skb packet types not supported by hardware, * txbd len fild doesn't be filled. @@ -634,22 +664,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute inner(/normal) L3 header size, defined in 4 Bytes */ l3_len = l4.hdr - l3.hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; default: /* skb packet types not supported by hardware, @@ -703,32 +735,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* define outer network header type.*/ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); else - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); } /* define tunnel type(OL4).*/ switch (l4_proto) { case IPPROTO_UDP: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); break; case IPPROTO_GRE: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -749,43 +783,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, } if (l3.v4->version == 4) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); - - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); } else if (l3.v6->version == 6) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); } switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) break; - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -807,11 +841,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_S, 0); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc_vtags(struct sk_buff *skb, @@ -844,10 +878,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); *out_vtag = vlan_tag; } else { - hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; } } else if (skb->protocol == htons(ETH_P_8021Q)) { @@ -880,7 +914,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, u16 out_vtag = 0; u32 paylen = 0; u16 mss = 0; - __be16 protocol; u8 ol4_proto; u8 il4_proto; int ret; @@ -909,7 +942,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_mac_len(skb); - protocol = skb->protocol; ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (ret) @@ -1135,7 +1167,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) wmb(); /* Commit all data before submit */ - hnae_queue_xmit(ring->tqp, buf_num); + hnae3_queue_xmit(ring->tqp, buf_num); return NETDEV_TX_OK; @@ -1304,7 +1336,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) u16 mode = mqprio_qopt->mode; u8 hw = mqprio_qopt->qopt.hw; bool if_running; - unsigned int i; int ret; if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && @@ -1328,24 +1359,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) if (ret) goto out; - if (tc <= 1) { - netdev_reset_tc(netdev); - } else { - ret = netdev_set_num_tc(netdev, tc); - if (ret) - goto out; - - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) - continue; - - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); - } - } - ret = hns3_nic_set_real_num_queue(netdev); out: @@ -1703,7 +1716,7 @@ static void hns3_set_default_feature(struct net_device *netdev) static int hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - unsigned int order = hnae_page_order(ring); + unsigned int order = hnae3_page_order(ring); struct page *p; p = dev_alloc_pages(order); @@ -1714,7 +1727,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->page_offset = 0; cb->reuse_flag = 0; cb->buf = page_address(p); - cb->length = hnae_page_size(ring); + cb->length = hnae3_page_size(ring); cb->type = DESC_TYPE_PAGE; return 0; @@ -1780,33 +1793,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) /* free desc along with its attached buffer */ static void hns3_free_desc(struct hns3_enet_ring *ring) { + int size = ring->desc_num * sizeof(ring->desc[0]); + hns3_free_buffers(ring); - dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hns3_alloc_desc(struct hns3_enet_ring *ring) { int size = ring->desc_num * sizeof(ring->desc[0]); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, + &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } @@ -1887,7 +1894,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); @@ -1917,7 +1924,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) if (is_ring_empty(ring) || head == ring->next_to_clean) return true; /* no data to poll */ - if (!is_valid_clean_head(ring, head)) { + if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); @@ -2016,15 +2023,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, bool twobufs; twobufs = ((PAGE_SIZE < 8192) && - hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); desc = &ring->desc[ring->next_to_clean]; size = le16_to_cpu(desc->rx.size); - truesize = hnae_buf_size(ring); + truesize = hnae3_buf_size(ring); if (!twobufs) - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); @@ -2076,13 +2083,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; /* check if hardware has done checksum */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) return; - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { netdev_err(netdev, "L3/L4 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; @@ -2091,23 +2098,24 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); - ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, + HNS3_RXD_OL4ID_S); switch (ol4_type) { case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; case HNS3_OL4_TYPE_NO_TUN: /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ - if (l3_type == HNS3_L3_TYPE_IPV4 || - (l3_type == HNS3_L3_TYPE_IPV6 && - (l4_type == HNS3_L4_TYPE_UDP || - l4_type == HNS3_L4_TYPE_TCP || - l4_type == HNS3_L4_TYPE_SCTP))) + if ((l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; } @@ -2135,8 +2143,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 - switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, - HNS3_RXD_STRP_TAGP_S)) { + switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); break; @@ -2174,7 +2182,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) return -EFAULT; va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; @@ -2229,7 +2237,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); ring_ptr_move_fw(ring, next_to_clean); - while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); @@ -2257,7 +2265,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, vlan_tag); } - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); u64_stats_update_begin(&ring->syncp); @@ -2269,7 +2277,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely((!desc->rx.pkt_len) || - hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { netdev_err(netdev, "truncated pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.err_pkt_len++; @@ -2279,7 +2287,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, return -EFAULT; } - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { netdev_err(netdev, "L2 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l2_err++; @@ -2532,10 +2540,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, tx_ring = tqp_vector->tx_group.ring; if (tx_ring) { cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); cur_chain->next = NULL; @@ -2549,12 +2557,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + HNAE3_RING_GL_TX); cur_chain = chain; } @@ -2564,10 +2572,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, if (!tx_ring && rx_ring) { cur_chain->next = NULL; cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); rx_ring = rx_ring->next; } @@ -2579,10 +2587,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); cur_chain = chain; @@ -2745,10 +2753,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) if (ret) return ret; - ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); - if (ret) - return ret; - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { @@ -2809,7 +2813,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->io_base = q->io_base; } - hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); ring->tqp = q; ring->desc = NULL; @@ -3081,7 +3085,6 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->ae_handle->reset_level = HNAE3_NONE_RESET; priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; @@ -3102,6 +3105,11 @@ static int hns3_client_init(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + if (handle->flags & HNAE3_SUPPORT_VF) + handle->reset_level = HNAE3_VF_RESET; + else + handle->reset_level = HNAE3_FUNC_RESET; + ret = hns3_get_ring_config(priv); if (ret) { ret = -ENOMEM; @@ -3208,7 +3216,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) struct net_device *ndev = kinfo->netdev; bool if_running; int ret; - u8 i; if (tc > HNAE3_MAX_TC) return -EINVAL; @@ -3218,10 +3225,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if_running = netif_running(ndev); - ret = netdev_set_num_tc(ndev, tc); - if (ret) - return ret; - if (if_running) { (void)hns3_nic_net_stop(ndev); msleep(100); @@ -3232,27 +3235,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if (ret) goto err_out; - if (tc <= 1) { - netdev_reset_tc(ndev); - goto out; - } - - for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; - - if (tc_info->enable) - netdev_set_tc_queue(ndev, - tc_info->tc, - tc_info->tqp_count, - tc_info->tqp_offset); - } - - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - netdev_set_prio_tc_map(ndev, i, - kinfo->prio_tc[i]); - } - -out: ret = hns3_nic_set_real_num_queue(ndev); err_out: @@ -3418,7 +3400,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) struct net_device *ndev = kinfo->netdev; if (!netif_running(ndev)) - return -EIO; + return 0; return hns3_nic_net_stop(ndev); } @@ -3458,10 +3440,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - ret = hns3_get_ring_config(priv); - if (ret) - return ret; - ret = hns3_nic_init_vector_data(priv); if (ret) return ret; @@ -3493,10 +3471,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_put_ring_config(priv); - - priv->ring_data = NULL; - hns3_uninit_mac_addr(netdev); return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 3b083d5ae9ce..bf9aa02be994 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector { u16 num_tqps; /* total number of tqps in TQP vector */ - cpumask_t affinity_mask; char name[HNAE3_INT_NAME_LEN]; /* when 0 should adjust interrupt coalesce parameter */ @@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) -#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ +#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) #define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) @@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) -#define hnae_buf_size(_ring) ((_ring)->buf_size) -#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) -#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) +#define hnae3_buf_size(_ring) ((_ring)->buf_size) +#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring))) +#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring)) /* iterator for handling rings in ring group */ #define hns3_for_each_ring(pos, head) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 40c0425b4023..11620e003a8e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -201,7 +201,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) rx_group = &ring->tqp_vector->rx_group; pre_rx_pkt = rx_group->total_packets; + preempt_disable(); hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); + preempt_enable(); rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); rx_group->total_packets = pre_rx_pkt; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index c36d64710fa6..cf40afca66db 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -18,8 +18,7 @@ #include "hclge_main.h" #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) -#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \ - DMA_TO_DEVICE : DMA_FROM_DEVICE) + #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) static int hclge_ring_space(struct hclge_cmq_ring *ring) @@ -46,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); + int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) @@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, if (is_read) desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); - else - desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); } static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) @@ -123,24 +113,24 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) if (ring->flag == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); - hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); } else { hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); - hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); } } @@ -152,33 +142,22 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw) static int hclge_cmd_csq_clean(struct hclge_hw *hw) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclge_desc *desc; - int clean = 0; u32 head; + int clean; - desc = &csq->desc[ntc]; head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); rmb(); /* Make sure head is ready before touch any data */ if (!is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head, - csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); return 0; } - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; - } - csq->next_to_clean = ntc; - + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } @@ -216,7 +195,7 @@ static bool hclge_is_special_opcode(u16 opcode) **/ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_desc *desc_to_use; bool complete = false; u32 timeout = 0; @@ -227,7 +206,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) spin_lock_bh(&hw->cmq.csq.lock); - if (num > hclge_ring_space(&hw->cmq.csq)) { + if (num > hclge_ring_space(&hw->cmq.csq) || + test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -256,33 +236,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) */ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { do { - if (hclge_cmd_csq_done(hw)) + if (hclge_cmd_csq_done(hw)) { + complete = true; break; + } udelay(1); timeout++; } while (timeout < hw->cmq.tx_timeout); } - if (hclge_cmd_csq_done(hw)) { - complete = true; + if (!complete) { + retval = -EAGAIN; + } else { handle = 0; while (handle < num) { /* Get the result of hardware write back */ desc_to_use = &hw->cmq.csq.desc[ntc]; desc[handle] = *desc_to_use; - pr_debug("Get cmd desc:\n"); if (likely(!hclge_is_special_opcode(opcode))) desc_ret = le16_to_cpu(desc[handle].retval); else desc_ret = le16_to_cpu(desc[0].retval); - if ((enum hclge_cmd_return_status)desc_ret == - HCLGE_CMD_EXEC_SUCCESS) + if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) retval = 0; else retval = -EIO; - hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; + hw->cmq.last_status = desc_ret; ntc++; handle++; if (ntc == hw->cmq.csq.desc_num) @@ -290,9 +271,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) } } - if (!complete) - retval = -EAGAIN; - /* Clean the command send queue */ handle = hclge_cmd_csq_clean(hw); if (handle != num) { @@ -369,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) spin_lock_init(&hdev->hw.cmq.crq.lock); hclge_cmd_init_regs(&hdev->hw); + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index d9aaa76c76eb..656c3e622ec8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -571,7 +571,8 @@ struct hclge_config_auto_neg_cmd { struct hclge_config_max_frm_size_cmd { __le16 max_frm_size; - u8 rsv[22]; + u8 min_frm_size; + u8 rsv[21]; }; enum hclge_mac_vlan_tbl_opcode { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d318d35e598f..266c68607e53 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -939,8 +939,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) if (hnae3_dev_roce_supported(hdev)) { hdev->num_roce_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. @@ -948,8 +948,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; } else { hdev->num_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); } return 0; @@ -1038,38 +1038,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[0].data; /* get the configuration */ - cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_VMDQ_M, - HCLGE_CFG_VMDQ_S); - cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); - cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TQP_DESC_N_M, - HCLGE_CFG_TQP_DESC_N_S); - - cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_PHY_ADDR_M, - HCLGE_CFG_PHY_ADDR_S); - cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_MEDIA_TP_M, - HCLGE_CFG_MEDIA_TP_S); - cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_RX_BUF_LEN_M, - HCLGE_CFG_RX_BUF_LEN_S); + cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); + cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); /* get mac_address */ mac_addr_tmp = __le32_to_cpu(req->param[2]); - mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_MAC_ADDR_H_M, - HCLGE_CFG_MAC_ADDR_H_S); + mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; - cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_DEFAULT_SPEED_M, - HCLGE_CFG_DEFAULT_SPEED_S); - cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_RSS_SIZE_M, - HCLGE_CFG_RSS_SIZE_S); + cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1077,9 +1077,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[1].data; cfg->numa_node_map = __le32_to_cpu(req->param[0]); - cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_SPEED_ABILITY_M, - HCLGE_CFG_SPEED_ABILITY_S); + cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); } /* hclge_get_cfg: query the static parameter from flash @@ -1098,11 +1098,11 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) req = (struct hclge_cfg_param_cmd *)desc[i].data; hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, true); - hnae_set_field(offset, HCLGE_CFG_OFFSET_M, - HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); /* Len should be united by 4 bytes when send to hardware */ - hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, - HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); req->offset = cpu_to_le32(offset); } @@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev) /* Currently not support uncontiuous tc */ for (i = 0; i < hdev->tm_info.num_tc; i++) - hnae_set_bit(hdev->hw_tc_map, i, 1); + hnae3_set_bit(hdev->hw_tc_map, i, 1); hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; @@ -1208,13 +1208,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, req = (struct hclge_cfg_tso_status_cmd *)desc.data; tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_min); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_min); req->tso_mss_min = cpu_to_le16(tso_mss); tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_max); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_max); req->tso_mss_max = cpu_to_le16(tso_mss); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -1834,8 +1834,6 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, return 0; } -#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) - static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { @@ -1863,13 +1861,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, req->tc_wl[j].high = cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); req->tc_wl[j].high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->tc_wl[j].low = cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); req->tc_wl[j].low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); } } @@ -1911,13 +1907,11 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev, req->com_thrd[j].high = cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); req->com_thrd[j].high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->com_thrd[j].low = cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); req->com_thrd[j].low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); } } @@ -1943,14 +1937,10 @@ static int hclge_common_wl_config(struct hclge_dev *hdev, req = (struct hclge_rx_com_wl *)desc.data; req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); - req->com_wl.high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << - HCLGE_RX_PRIV_EN_B); + req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); - req->com_wl.low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << - HCLGE_RX_PRIV_EN_B); + req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -2118,48 +2108,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); - hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); switch (speed) { case HCLGE_MAC_SPEED_10M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 6); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 6); break; case HCLGE_MAC_SPEED_100M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 7); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 7); break; case HCLGE_MAC_SPEED_1G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 0); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 0); break; case HCLGE_MAC_SPEED_10G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 1); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 1); break; case HCLGE_MAC_SPEED_25G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 2); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 2); break; case HCLGE_MAC_SPEED_40G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 3); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 3); break; case HCLGE_MAC_SPEED_50G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 4); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 4); break; case HCLGE_MAC_SPEED_100G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 5); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 5); break; default: dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); return -EINVAL; } - hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, - 1); + hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -2201,9 +2191,9 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, return ret; } - *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); + *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); + speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, + HCLGE_QUERY_SPEED_S); ret = hclge_parse_speed(speed_tmp, speed); if (ret) { @@ -2225,7 +2215,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); req = (struct hclge_config_auto_neg_cmd *)desc.data; - hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); req->cfg_an_cmd_flag = cpu_to_le32(flag); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2269,8 +2259,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); + hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, + mask_vlan ? 1 : 0); ether_addr_copy(req->mac_mask, mac_mask); status = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2505,7 +2495,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) u32 cmdq_src_reg; /* fetch the events from their corresponding regs */ - rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); + rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); /* Assumption: If by any chance reset and mailbox events are reported @@ -2517,12 +2507,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) /* check for vector0 reset event sources */ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; } if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; @@ -2614,6 +2606,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; @@ -2705,7 +2703,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) } val = hclge_read_dev(&hdev->hw, reg); - while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { + while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); val = hclge_read_dev(&hdev->hw, reg); cnt++; @@ -2727,8 +2725,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); + hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); req->fun_reset_vfid = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2747,13 +2744,13 @@ static void hclge_do_reset(struct hclge_dev *hdev) switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Global Reset requested\n"); break; case HNAE3_CORE_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Core Reset requested\n"); break; @@ -2810,8 +2807,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); break; default: - dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", - hdev->reset_type); break; } @@ -2824,16 +2819,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { - /* perform reset of the stack & ae device for a client */ + struct hnae3_handle *handle; + /* perform reset of the stack & ae device for a client */ + handle = &hdev->vport[0].nic; + rtnl_lock(); hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); if (!hclge_reset_wait(hdev)) { - rtnl_lock(); hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - rtnl_unlock(); hclge_clear_reset_cause(hdev); } else { @@ -2843,6 +2839,8 @@ static void hclge_reset(struct hclge_dev *hdev) } hclge_notify_client(hdev, HNAE3_UP_CLIENT); + handle->last_reset_time = jiffies; + rtnl_unlock(); } static void hclge_reset_event(struct hnae3_handle *handle) @@ -2855,8 +2853,13 @@ static void hclge_reset_event(struct hnae3_handle *handle) * know this if last reset request did not occur very recently (watchdog * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) * In case of new request we reset the "reset level" to PF reset. + * And if it is a repeat reset request of the most recent one then we + * want to make sure we throttle the reset request. Therefore, we will + * not allow it again before 3*HZ times. */ - if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) + if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) + return; + else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) handle->reset_level = HNAE3_FUNC_RESET; dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", @@ -2868,8 +2871,6 @@ static void hclge_reset_event(struct hnae3_handle *handle) if (handle->reset_level < HNAE3_GLOBAL_RESET) handle->reset_level++; - - handle->last_reset_time = jiffies; } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -3110,11 +3111,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u16 mode = 0; - hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); - hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, - HCLGE_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, - HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); + hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, + HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, + HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); req->rss_tc_mode[i] = cpu_to_le16(mode); } @@ -3491,16 +3492,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, i = 0; for (node = ring_chain; node; node = node->next) { tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); - hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, - HCLGE_INT_TYPE_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, - HCLGE_TQP_ID_S, node->tqp_index); - hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S)); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S)); req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -3648,20 +3649,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); - hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3689,7 +3690,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3953,10 +3954,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, req = (struct hclge_mta_filter_mode_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, + enable); + hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, + HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -3980,8 +3981,8 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); + hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, + enable); req->function_id = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -4007,10 +4008,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); + hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + HCLGE_CFG_MTA_ITEM_IDX_S, idx); req->item_idx = cpu_to_le16(item_idx); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -4257,17 +4258,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, - HCLGE_MAC_EPORT_VFID_S, vport->vport_id); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, - HCLGE_MAC_EPORT_PFID_S, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); req.egress_port = cpu_to_le16(egress_port); @@ -4318,8 +4312,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); @@ -4351,10 +4345,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4418,10 +4412,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4802,19 +4796,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, - vcfg->accept_tag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, - vcfg->accept_untag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, - vcfg->accept_tag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, - vcfg->accept_untag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, - vcfg->insert_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, - vcfg->insert_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -4840,14 +4834,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, - vcfg->strip_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, - vcfg->strip_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, - vcfg->vlan1_vlan_prionly ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, - vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -4999,6 +4993,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) req = (struct hclge_config_max_frm_size_cmd *)desc.data; req->max_frm_size = cpu_to_le16(max_frm_size); + req->min_frm_size = HCLGE_MAC_MIN_FRAME; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -5043,7 +5038,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); - hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -5073,7 +5068,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return ret; } - return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, @@ -5380,12 +5375,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); retval = phy_read(phydev, HCLGE_PHY_CSC_REG); - mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, - HCLGE_PHY_MDIX_CTRL_S); + mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); retval = phy_read(phydev, HCLGE_PHY_CSS_REG); - mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); - is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); @@ -5531,7 +5526,6 @@ static int hclge_pci_init(struct hclge_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->back = hdev; hw->io_base = pcim_iomap(pdev, 2, 0); if (!hw->io_base) { dev_err(&pdev->dev, "Can't map configuration register space\n"); @@ -5562,6 +5556,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) pci_disable_device(pdev); } +static void hclge_state_init(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_state_uninit(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + if (hdev->service_timer.function) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + if (hdev->rst_service_task.func) + cancel_work_sync(&hdev->rst_service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -5702,12 +5720,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); - set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); - set_bit(HCLGE_STATE_DOWN, &hdev->state); - clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); + hclge_state_init(hdev); pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -5812,16 +5825,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; - set_bit(HCLGE_STATE_DOWN, &hdev->state); - - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); - if (hdev->rst_service_task.func) - cancel_work_sync(&hdev->rst_service_task); - if (hdev->mbx_service_task.func) - cancel_work_sync(&hdev->mbx_service_task); + hclge_state_uninit(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -6149,8 +6153,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); req = (struct hclge_set_led_state_cmd *)desc.data; - hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, - HCLGE_LED_LOCATE_STATE_S, locate_led_status); + hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, + HCLGE_LED_LOCATE_STATE_S, locate_led_status); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -6280,7 +6284,6 @@ static const struct hnae3_ae_ops hclge_ops = { static struct hnae3_ae_algo ae_algo = { .ops = &hclge_ops, - .name = HCLGE_NAME, .pdev_id_table = ae_algo_pci_tbl, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7488534528cd..a5abf8ee9b96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -89,6 +89,7 @@ /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 +#define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 #define HCLGE_GLOBAL_RESET_BIT 0x0 #define HCLGE_CORE_RESET_BIT 0x1 @@ -128,6 +129,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_STATISTICS_UPDATING, + HCLGE_STATE_CMD_DISABLE, HCLGE_STATE_MAX }; @@ -190,7 +192,6 @@ struct hclge_hw { int num_vec; struct hclge_cmq cmq; struct hclge_caps caps; - void *back; }; /* TQP stats */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7541cb9b71ce..f34851c91eb3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) } } -/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message +/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx + * from mailbox message * msg[0]: opcode * msg[1]: <not relevant to this function> * msg[2]: ring_num * msg[3]: first ring type (TX|RX) * msg[4]: first tqp id - * msg[5] ~ msg[14]: other ring type and tqp id + * msg[5]: first int_gl idx + * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx */ static int hclge_get_ring_chain_from_mbx( struct hclge_mbx_vf_to_pf_cmd *req, @@ -128,12 +130,12 @@ static int hclge_get_ring_chain_from_mbx( HCLGE_MBX_RING_NODE_VARIABLE_NUM)) return -ENOMEM; - hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); - hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[5]); + hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + req->msg[5]); cur_chain = ring_chain; @@ -142,19 +144,19 @@ static int hclge_get_ring_chain_from_mbx( if (!new_chain) goto err; - hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); + hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); new_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); - hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); + hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); cur_chain->next = new_chain; cur_chain = new_chain; @@ -460,7 +462,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 9f7932e423b5..b6cfe6ff988d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); mdio_cmd->data_wr = cpu_to_le16(data); @@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); /* Read out phy data */ ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) return ret; } - if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { dev_err(&hdev->pdev->dev, "mdio read data error\n"); return -EIO; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 262c125f8137..e2acf3bd6ba3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; - grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M, - HCLGE_BP_GRP_ID_S); - sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, - HCLGE_BP_SUB_GRP_ID_S); + grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, + HCLGE_BP_GRP_ID_S); + sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); @@ -1223,6 +1223,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) tx_en = true; rx_en = true; break; + case HCLGE_FC_PFC: + tx_en = false; + rx_en = false; + break; default: tx_en = true; rx_en = true; @@ -1240,8 +1244,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (ret) return ret; - if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) - return hclge_mac_pause_setup_hw(hdev); + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + return ret; /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index c2b6e8a6700f..c82d49ebd5bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd { }; #define hclge_tm_set_field(dest, string, val) \ - hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ - (HCLGE_TM_SHAP_##string##_LSH), val) + hnae3_set_field((dest), \ + (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) #define hclge_tm_get_field(src, string) \ - hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ + hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH)) int hclge_tm_schd_init(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 1bbfe131b596..fb471fe2c494 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - hclgevf_ring_to_dma_dir(ring)); + int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a17872aab168..d1f16f0c1646 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; @@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, - (tc_valid[i] & 0x1)); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, - HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, - HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, + HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, + HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); } status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle) } static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, - int vector, + int vector_id, struct hnae3_ring_chain_node *ring_chain) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_ring_chain_node *node; struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int i = 0, vector_id; + int i = 0; int status; u8 type; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; - vector_id = hclgevf_get_vector_index(hdev, vector); - if (vector_id < 0) { - dev_err(&handle->pdev->dev, - "Get vector index fail. ret =%d\n", vector_id); - return vector_id; - } for (node = ring_chain; node; node = node->next) { int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + @@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, } req->msg[idx_offset] = - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); req->msg[idx_offset + 1] = node->tqp_index; - req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S); + req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); i++; if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - @@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { - return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); } static int hclgevf_unmap_ring_from_vector( @@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector( return vector_id; } - ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); + ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vector=%d, ret =%d\n", @@ -648,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector( static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; - hclgevf_free_vector(hdev, vector); + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "hclgevf_put_vector get vector index fail. ret =%d\n", + vector_id); + return vector_id; + } + + hclgevf_free_vector(hdev, vector_id); return 0; } @@ -990,8 +1009,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) /* wait to check the hardware reset completion status */ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { + while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && + (cnt < HCLGEVF_RESET_WAIT_CNT)) { msleep(HCLGEVF_RESET_WAIT_MS); val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); cnt++; @@ -1582,9 +1601,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } -static int hclgevf_init_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static int hclgevf_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; int ret; switch (client->type) { @@ -1635,9 +1655,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, return 0; } -static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static void hclgevf_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; + /* un-init roce, if it exists */ if (hdev->roce_client) hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); @@ -1648,22 +1670,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, client->ops->uninit_instance(&hdev->nic, 0); } -static int hclgevf_register_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - return hclgevf_init_instance(hdev, client); -} - -static void hclgevf_unregister_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - hclgevf_uninit_instance(hdev, client); -} - static int hclgevf_pci_init(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -1924,8 +1930,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, - .init_client_instance = hclgevf_register_client, - .uninit_client_instance = hclgevf_unregister_client, + .init_client_instance = hclgevf_init_client_instance, + .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, .stop = hclgevf_ae_stop, .map_ring_to_vector = hclgevf_map_ring_to_vector, @@ -1962,7 +1968,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { static struct hnae3_ae_algo ae_algovf = { .ops = &hclgevf_ops, - .name = HCLGEVF_NAME, .pdev_id_table = ae_algovf_pci_tbl, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index b598c06af8e0..e9d5a4f96304 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); @@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) /* tail the async message in arq */ msg_q = hdev->arq.msg_q[hdev->arq.tail]; - memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE); + memcpy(&msg_q[0], req->msg, + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); hclge_mbx_tail_ptr_move_arq(hdev->arq); hdev->arq.count++; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 79b567447084..6b19607a4caa 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev) struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_cmd_fw_ctxt fw_ctxt; - struct hinic_pfhwdev *pfhwdev; u16 out_size; int err; @@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev) fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, &fw_ctxt, sizeof(fw_ctxt), &fw_ctxt, &out_size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c944bd10b03d..51762428b40e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7522,7 +7522,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -7554,7 +7554,7 @@ static int i40e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, - np, np); + np, np, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); return 0; @@ -11841,7 +11841,6 @@ static int i40e_xdp(struct net_device *dev, case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; default: @@ -11978,7 +11977,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); - random_ether_addr(mac_addr); + eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index a7b87f935411..5906c1c1d19d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 252440a418dc..8a28f3388f69 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1048,6 +1048,22 @@ #define E1000_TQAVCTRL_XMIT_MODE BIT(0) #define E1000_TQAVCTRL_DATAFETCHARB BIT(4) #define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) /* TX Qav Credit Control fields */ #define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9643b5b3d444..ca54e268d157 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -262,6 +262,7 @@ struct igb_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ bool cbs_enable; /* indicates if CBS is enabled */ s32 idleslope; /* idleSlope in kbps */ s32 sendslope; /* sendSlope in kbps */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f707709969ac..e3a0c02721c9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1654,33 +1654,65 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) wr32(E1000_I210_TQAVCC(queue), val); } +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + /** - * igb_configure_cbs - Configure Credit-Based Shaper (CBS) + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb * @adapter: pointer to adapter struct * @queue: queue number - * @enable: true = enable CBS, false = disable CBS - * @idleslope: idleSlope in kbps - * @sendslope: sendSlope in kbps - * @hicredit: hiCredit in bytes - * @locredit: loCredit in bytes * - * Configure CBS for a given hardware queue. When disabling, idleslope, - * sendslope, hicredit, locredit arguments are ignored. Returns 0 if - * success. Negative otherwise. + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used + * for setting those correctly prior to this function being called. **/ -static void igb_configure_cbs(struct igb_adapter *adapter, int queue, - bool enable, int idleslope, int sendslope, - int hicredit, int locredit) +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { + struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - u32 tqavcc; + u32 tqavcc, tqavctrl; u16 value; WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); - if (enable || queue == 0) { + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ + if (ring->cbs_enable || queue == 0) { /* i210 does not allow the queue 0 to be in the Strict * Priority mode while the Qav mode is enabled, so, * instead of disabling strict priority mode, we give @@ -1690,14 +1722,19 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * Queue0 QueueMode must be set to 1b when * TransmitMode is set to Qav." */ - if (queue == 0 && !enable) { + if (queue == 0 && !ring->cbs_enable) { /* max "linkspeed" idleslope in kbps */ - idleslope = 1000000; - hicredit = ETH_FRAME_LEN; + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; } - set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); - set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); /* According to i210 datasheet section 7.2.7.7, we should set * the 'idleSlope' field from TQAVCC register following the @@ -1756,17 +1793,16 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * calculated value, so the resulting bandwidth might * be slightly higher for some configurations. */ - value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000); + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); tqavcc = rd32(E1000_I210_TQAVCC(queue)); tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; tqavcc |= value; wr32(E1000_I210_TQAVCC(queue), tqavcc); - wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735); + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); } else { - set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); - set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); /* Set idleSlope to zero. */ tqavcc = rd32(E1000_I210_TQAVCC(queue)); @@ -1775,6 +1811,43 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, /* Set hiCredit to zero. */ wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } } /* XXX: In i210 controller the sendSlope and loCredit parameters from @@ -1782,9 +1855,27 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * configuration' in respect to these parameters. */ - netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", - (enable) ? "enabled" : "disabled", queue, - idleslope, sendslope, hicredit, locredit); + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ + idleslope %d sendslope %d hiCredit %d \ + locredit %d\n", + (ring->cbs_enable) ? "enabled" : "disabled", + (ring->launchtime_enable) ? "enabled" : "disabled", queue, + ring->idleslope, ring->sendslope, ring->hicredit, + ring->locredit); +} + +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; } static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, @@ -1807,21 +1898,15 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, return 0; } -static bool is_any_cbs_enabled(struct igb_adapter *adapter) -{ - struct igb_ring *ring; - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = adapter->tx_ring[i]; - - if (ring->cbs_enable) - return true; - } - - return false; -} - +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ static void igb_setup_tx_mode(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1836,11 +1921,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) int i, max_queue; /* Configure TQAVCTRL register: set transmit mode to 'Qav', - * set data fetch arbitration to 'round robin' and set data - * transfer arbitration to 'credit shaper algorithm. + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. */ val = rd32(E1000_I210_TQAVCTRL); - val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB; + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; val &= ~E1000_TQAVCTRL_DATAFETCHARB; wr32(E1000_I210_TQAVCTRL, val); @@ -1881,11 +1966,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) adapter->num_tx_queues : I210_SR_QUEUES_NUM; for (i = 0; i < max_queue; i++) { - struct igb_ring *ring = adapter->tx_ring[i]; - - igb_configure_cbs(adapter, i, ring->cbs_enable, - ring->idleslope, ring->sendslope, - ring->hicredit, ring->locredit); + igb_config_tx_modes(adapter, i); } } else { wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); @@ -2459,6 +2540,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) + enable_fqtss(adapter, false); +} + static int igb_offload_cbs(struct igb_adapter *adapter, struct tc_cbs_qopt_offload *qopt) { @@ -2479,17 +2573,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter, if (err) return err; - if (is_fqtss_enabled(adapter)) { - igb_configure_cbs(adapter, qopt->queue, qopt->enable, - qopt->idleslope, qopt->sendslope, - qopt->hicredit, qopt->locredit); - - if (!is_any_cbs_enabled(adapter)) - enable_fqtss(adapter, false); - - } else { - enable_fqtss(adapter, true); - } + igb_offload_apply(adapter, qopt->queue); return 0; } @@ -2698,7 +2782,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -2728,7 +2812,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, adapter); @@ -2738,6 +2822,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter, } } +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -2748,6 +2855,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: return igb_setup_tc_block(adapter, type_data); + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); default: return -EOPNOTSUPP; @@ -5568,11 +5677,14 @@ set_itr_now: } } -static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, - u32 type_tucmd, u32 mss_l4len_idx) +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; + struct timespec64 ts; context_desc = IGB_TX_CTXTDESC(tx_ring, i); @@ -5587,9 +5699,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, mss_l4len_idx |= tx_ring->reg_idx << 4; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ns_to_timespec64(first->skb->tstamp); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } } static int igb_tso(struct igb_ring *tx_ring, @@ -5672,7 +5793,8 @@ static int igb_tso(struct igb_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); return 1; } @@ -5727,7 +5849,7 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); } #define IGB_SET_FLAG(_input, _flag, _result) \ @@ -6015,8 +6137,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, } } - skb_tx_timestamp(skb); - if (skb_vlan_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); @@ -6032,6 +6152,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, else if (!tso) igb_tx_csum(tx_ring, first); + skb_tx_timestamp(skb); + if (igb_tx_map(tx_ring, first, hdr_len)) goto cleanup_tx_tstamp; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 62e57b05a0ae..5a6600f7b382 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5275,6 +5275,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct ixgbe_fwd_adapter *accel) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + int num_tc = netdev_get_num_tc(adapter->netdev); struct net_device *vdev = accel->netdev; int i, baseq, err; @@ -5286,6 +5288,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, accel->rx_base_queue = baseq; accel->tx_base_queue = baseq; + /* record configuration for macvlan interface in vdev */ + for (i = 0; i < num_tc; i++) + netdev_bind_sb_channel_queue(adapter->netdev, vdev, + i, rss_i, baseq + (rss_i * i)); + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = vdev; @@ -5310,6 +5317,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); @@ -8197,25 +8208,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring, input, common, ring->queue_index); } +#ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { - struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; struct ixgbe_adapter *adapter; - int txq; -#ifdef IXGBE_FCOE struct ixgbe_ring_feature *f; -#endif + int txq; - if (fwd_adapter) { - adapter = netdev_priv(dev); - txq = reciprocal_scale(skb_get_hash(skb), - adapter->num_rx_queues_per_pool); + if (sb_dev) { + u8 tc = netdev_get_prio_tc_map(dev, skb->priority); + struct net_device *vdev = sb_dev; - return txq + fwd_adapter->tx_base_queue; - } + txq = vdev->tc_to_txq[tc].offset; + txq += reciprocal_scale(skb_get_hash(skb), + vdev->tc_to_txq[tc].count); -#ifdef IXGBE_FCOE + return txq; + } /* * only execute the code below if protocol is FCoE @@ -8226,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, case htons(ETH_P_FIP): adapter = netdev_priv(dev); - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; /* fall through */ default: - return fallback(dev, skb); + return fallback(dev, skb, sb_dev); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -8242,11 +8253,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, txq -= f->indices; return txq + f->offset; -#else - return fallback(dev, skb); -#endif } +#endif static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf) { @@ -8766,6 +8775,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) /* if we cannot find a free pool then disable the offload */ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); macvlan_release_l2fw_offload(vdev); + + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + kfree(accel); return 0; @@ -9329,7 +9343,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, adapter); @@ -9769,6 +9783,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (!macvlan_supports_dest_filter(vdev)) return ERR_PTR(-EMEDIUMTYPE); + /* We need to lock down the macvlan to be a single queue device so that + * we can reuse the tc_to_txq field in the macvlan netdev to represent + * the queue mapping to our netdev. + */ + if (netif_is_multiqueue(vdev)) + return ERR_PTR(-ERANGE); + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); if (pool == adapter->num_rx_pools) { u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; @@ -9825,6 +9846,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) return ERR_PTR(-ENOMEM); set_bit(pool, adapter->fwd_bitmask); + netdev_set_sb_channel(vdev, pool); accel->pool = pool; accel->netdev = vdev; @@ -9866,6 +9888,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) ring->netdev = NULL; } + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(pdev, accel->netdev); + netdev_set_sb_channel(accel->netdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); } @@ -9966,7 +9992,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; @@ -10026,7 +10051,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, - .ndo_select_queue = ixgbe_select_queue, .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -10049,6 +10073,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_poll_controller = ixgbe_netpoll, #endif #ifdef IXGBE_FCOE + .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 59416eddd840..d86446d202d5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index afc810069440..7a637b51c7d2 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); } -static u16 -ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) -{ - /* we are currently only using the first queue */ - return 0; -} - static int ltq_etop_init(struct net_device *dev) { @@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = { .ndo_set_mac_address = ltq_etop_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = ltq_etop_set_multicast_list, - .ndo_select_queue = ltq_etop_select_queue, + .ndo_select_queue = dev_pick_tx_zero, .ndo_init = ltq_etop_init, .ndo_tx_timeout = ltq_etop_tx_timeout, }; diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile index 4d11dd9e3246..51f65a202c6e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/Makefile +++ b/drivers/net/ethernet/marvell/mvpp2/Makefile @@ -4,4 +4,4 @@ # obj-$(CONFIG_MVPP2) := mvpp2.o -mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o +mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index def00dc3eb4e..67b9e81b7c02 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1,17 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _MVPP2_H_ #define _MVPP2_H_ +#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/phy.h> @@ -66,15 +64,18 @@ #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) +#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240 +#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244 +#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0) /* RSS Registers */ #define MVPP22_RSS_INDEX 0x1500 #define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) #define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) #define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) -#define MVPP22_RSS_TABLE_ENTRY 0x1508 -#define MVPP22_RSS_TABLE 0x1510 +#define MVPP22_RXQ2RSS_TABLE 0x1504 #define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 #define MVPP22_RSS_WIDTH 0x150c /* Classifier Registers */ @@ -86,11 +87,28 @@ #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 #define MVPP2_CLS_LKP_TBL_REG 0x1818 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16) #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0) +#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL0_OFFS 1 +#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff +#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23) #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x) +#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9) +#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15) #define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6) +#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6)) #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 @@ -98,6 +116,32 @@ #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) +/* Classifier C2 engine Registers */ +#define MVPP22_CLS_C2_TCAM_IDX 0x1b00 +#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10 +#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14 +#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18 +#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c +#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20 +#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8) +#define MVPP22_CLS_C2_HIT_CTR 0x1b50 +#define MVPP22_CLS_C2_ACT 0x1b60 +#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19) +#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13) +#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11) +#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9) +#define MVPP22_CLS_C2_ATTR0 0x1b64 +#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24) +#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f +#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24 +#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21) +#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7 +#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21 +#define MVPP22_CLS_C2_ATTR1 0x1b68 +#define MVPP22_CLS_C2_ATTR2 0x1b6c +#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30) +#define MVPP22_CLS_C2_ATTR3 0x1b70 + /* Descriptor Manager Top Registers */ #define MVPP2_RXQ_NUM_REG 0x2040 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 @@ -275,6 +319,11 @@ #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 +/* Hit counters registers */ +#define MVPP2_CTRS_IDX 0x7040 +#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700 +#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704 + /* TX Scheduler registers */ #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 @@ -499,7 +548,7 @@ #define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) /* Dfault number of RXQs in use */ -#define MVPP2_DEFAULT_RXQ 4 +#define MVPP2_DEFAULT_RXQ 1 /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD_MAX 1024 @@ -553,6 +602,11 @@ ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) +#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) +#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) + +/* RSS constants */ +#define MVPP22_RSS_TABLE_ENTRIES 32 /* IPv6 max L3 address size */ #define MVPP2_MAX_L3_ADDR_SIZE 16 @@ -703,6 +757,9 @@ struct mvpp2 { /* Workqueue to gather hardware statistics */ char queue_name[30]; struct workqueue_struct *stats_queue; + + /* Debugfs root entry */ + struct dentry *dbgfs_dir; }; struct mvpp2_pcpu_stats { @@ -795,6 +852,9 @@ struct mvpp2_port { bool has_tx_irqs; u32 tx_time_coal; + + /* RSS indirection table */ + u32 indir[MVPP22_RSS_TABLE_ENTRIES]; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the @@ -831,52 +891,52 @@ struct mvpp2_port { /* HW TX descriptor for PPv2.1 */ struct mvpp21_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ + __le32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ - u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_dma_addr; /* physical addr of transmitted buffer */ - u32 buf_cookie; /* cookie for access to TX buffer in tx path */ - u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ - u32 reserved2; /* reserved (for future use) */ + __le16 data_size; /* data size of transmitted packet in bytes */ + __le32 buf_dma_addr; /* physical addr of transmitted buffer */ + __le32 buf_cookie; /* cookie for access to TX buffer in tx path */ + __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + __le32 reserved2; /* reserved (for future use) */ }; /* HW RX descriptor for PPv2.1 */ struct mvpp21_rx_desc { - u32 status; /* info about received packet */ - u16 reserved1; /* parser_info (for future use, PnC) */ - u16 data_size; /* size of received packet in bytes */ - u32 buf_dma_addr; /* physical address of the buffer */ - u32 buf_cookie; /* cookie for access to RX buffer in rx path */ - u16 reserved2; /* gem_port_id (for future use, PON) */ - u16 reserved3; /* csum_l4 (for future use, PnC) */ + __le32 status; /* info about received packet */ + __le16 reserved1; /* parser_info (for future use, PnC) */ + __le16 data_size; /* size of received packet in bytes */ + __le32 buf_dma_addr; /* physical address of the buffer */ + __le32 buf_cookie; /* cookie for access to RX buffer in rx path */ + __le16 reserved2; /* gem_port_id (for future use, PON) */ + __le16 reserved3; /* csum_l4 (for future use, PnC) */ u8 reserved4; /* bm_qset (for future use, BM) */ u8 reserved5; - u16 reserved6; /* classify_info (for future use, PnC) */ - u32 reserved7; /* flow_id (for future use, PnC) */ - u32 reserved8; + __le16 reserved6; /* classify_info (for future use, PnC) */ + __le32 reserved7; /* flow_id (for future use, PnC) */ + __le32 reserved8; }; /* HW TX descriptor for PPv2.2 */ struct mvpp22_tx_desc { - u32 command; + __le32 command; u8 packet_offset; u8 phys_txq; - u16 data_size; - u64 reserved1; - u64 buf_dma_addr_ptp; - u64 buf_cookie_misc; + __le16 data_size; + __le64 reserved1; + __le64 buf_dma_addr_ptp; + __le64 buf_cookie_misc; }; /* HW RX descriptor for PPv2.2 */ struct mvpp22_rx_desc { - u32 status; - u16 reserved1; - u16 data_size; - u32 reserved2; - u32 reserved3; - u64 buf_dma_addr_key_hash; - u64 buf_cookie_misc; + __le32 status; + __le16 reserved1; + __le16 data_size; + __le32 reserved2; + __le32 reserved3; + __le64 buf_dma_addr_key_hash; + __le64 buf_cookie_misc; }; /* Opaque type used by the driver to manipulate the HW TX and RX @@ -1043,4 +1103,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, u32 data); +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); + #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 8581d5b17dd5..efdb7a656835 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1,17 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 /* * RSS and Classifier helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include "mvpp2.h" #include "mvpp2_cls.h" +#include "mvpp2_prs.h" + +#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \ +{ \ + .flow_type = _type, \ + .flow_id = _id, \ + .supported_hash_opts = _opts, \ + .prs_ri = { \ + .ri = _ri, \ + .ri_mask = _ri_mask \ + } \ +} + +static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = { + /* TCP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* IPv4 flows, no vlan tag */ + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv4 flows, with vlan tag */ + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, no vlan tag */ + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, with vlan tag */ + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* Non IP flow, no vlan tag */ + MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG, + 0, + MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK), + /* Non IP flow, with vlan tag */ + MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG, + MVPP22_CLS_HEK_OPT_VLAN, + 0, 0), +}; + +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR); +} + +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe) +{ + fe->index = index; + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index); + fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG); + fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG); + fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG); +} /* Update classification flow table registers */ static void mvpp2_cls_flow_write(struct mvpp2 *priv, @@ -23,6 +349,25 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); } +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR); +} + +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + le->way = way; + le->lkpid = lkpid; + le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG); +} + /* Update classification lookup table register */ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, struct mvpp2_cls_lookup_entry *le) @@ -34,6 +379,439 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); } +/* Operations on flow entry */ +static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe) +{ + return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; +} + +static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe, + int num_of_fields) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields); +} + +static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe, + int field_index) +{ + return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) & + MVPP2_CLS_FLOW_TBL2_FLD_MASK; +} + +static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe, + int field_index, int field_id) +{ + fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index, + MVPP2_CLS_FLOW_TBL2_FLD_MASK); + fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id); +} + +static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe, + int engine) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK); + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine); +} + +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe) +{ + return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) & + MVPP2_CLS_FLOW_TBL0_ENG_MASK; +} + +static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe, + bool from_packet) +{ + if (from_packet) + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; + else + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; +} + +static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq); +} + +static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe, + bool is_last) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST; + fe->data[0] |= !!is_last; +} + +static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio); +} + +static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe, + u32 port) +{ + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); +} + +/* Initialize the parser entry for the given flow */ +static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv, + struct mvpp2_cls_flow *flow) +{ + mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri, + flow->prs_ri.ri_mask); +} + +/* Initialize the Lookup Id table entry for the given flow */ +static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, + struct mvpp2_cls_flow *flow) +{ + struct mvpp2_cls_lookup_entry le; + + le.way = 0; + le.lkpid = flow->flow_id; + + /* The default RxQ for this port is set in the C2 lookup */ + le.data = 0; + + /* We point on the first lookup in the sequence for the flow, that is + * the C2 lookup. + */ + le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id)); + + /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */ + le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + mvpp2_cls_lookup_write(priv, &le); +} + +/* Initialize the flow table entries for the given flow */ +static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow) +{ + struct mvpp2_cls_flow_entry fe; + int i; + + /* C2 lookup */ + memset(&fe, 0, sizeof(fe)); + fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id); + + mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_last_set(&fe, 0); + mvpp2_cls_flow_pri_set(&fe, 0); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1); + + /* Add all ports */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); + + /* C3Hx lookups */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + memset(&fe, 0, sizeof(fe)); + fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id); + + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_pri_set(&fe, i + 1); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE); + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); + } + + /* Update the last entry */ + mvpp2_cls_flow_last_set(&fe, 1); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST); + + mvpp2_cls_flow_write(priv, &fe); +} + +/* Adds a field to the Header Extracted Key generation parameters*/ +static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe, + u32 field_id) +{ + int nb_fields = mvpp2_cls_flow_hek_num_get(fe); + + if (nb_fields == MVPP2_FLOW_N_FIELDS) + return -EINVAL; + + mvpp2_cls_flow_hek_set(fe, nb_fields, field_id); + + mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1); + + return 0; +} + +static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, + unsigned long hash_opts) +{ + u32 field_id; + int i; + + /* Clear old fields */ + mvpp2_cls_flow_hek_num_set(fe, 0); + fe->data[2] = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_VLAN: + field_id = MVPP22_CLS_FIELD_VLAN; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + field_id = MVPP22_CLS_FIELD_IP4SA; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + field_id = MVPP22_CLS_FIELD_IP4DA; + break; + case MVPP22_CLS_HEK_OPT_IP6SA: + field_id = MVPP22_CLS_FIELD_IP6SA; + break; + case MVPP22_CLS_HEK_OPT_IP6DA: + field_id = MVPP22_CLS_FIELD_IP6DA; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + field_id = MVPP22_CLS_FIELD_L4SIP; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + field_id = MVPP22_CLS_FIELD_L4DIP; + break; + default: + return -EINVAL; + } + if (mvpp2_flow_add_hek_field(fe, field_id)) + return -EINVAL; + } + + return 0; +} + +struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +{ + if (flow >= MVPP2_N_FLOWS) + return NULL; + + return &cls_flows[flow]; +} + +/* Set the hash generation options for the given traffic flow. + * One traffic flow (in the ethtool sense) has multiple classification flows, + * to handle specific cases such as fragmentation, or the presence of a + * VLAN / DSA Tag. + * + * Each of these individual flows has different constraints, for example we + * can't hash fragmented packets on L4 data (else we would risk having packet + * re-ordering), so each classification flows masks the options with their + * supported ones. + * + */ +static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type, + u16 requested_opts) +{ + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *flow; + int i, engine, flow_index; + u16 hash_opts; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return -EINVAL; + + if (flow->flow_type != flow_type) + continue; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, + flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = flow->supported_hash_opts & requested_opts; + + /* Use C3HB engine to access L4 infos. This adds L4 infos to the + * hash parameters + */ + if (hash_opts & MVPP22_CLS_HEK_L4_OPTS) + engine = MVPP22_CLS_ENGINE_C3HB; + else + engine = MVPP22_CLS_ENGINE_C3HA; + + if (mvpp2_flow_set_hek_fields(&fe, hash_opts)) + return -EINVAL; + + mvpp2_cls_flow_eng_set(&fe, engine); + + mvpp2_cls_flow_write(port->priv, &fe); + } + + return 0; +} + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe) +{ + u16 hash_opts = 0; + int n_fields, i, field; + + n_fields = mvpp2_cls_flow_hek_num_get(fe); + + for (i = 0; i < n_fields; i++) { + field = mvpp2_cls_flow_hek_get(fe, i); + + switch (field) { + case MVPP22_CLS_FIELD_MAC_DA: + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + break; + case MVPP22_CLS_FIELD_VLAN: + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + break; + case MVPP22_CLS_FIELD_L3_PROTO: + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + break; + case MVPP22_CLS_FIELD_IP4SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA; + break; + case MVPP22_CLS_FIELD_IP4DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA; + break; + case MVPP22_CLS_FIELD_IP6SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA; + break; + case MVPP22_CLS_FIELD_IP6DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA; + break; + case MVPP22_CLS_FIELD_L4SIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + break; + case MVPP22_CLS_FIELD_L4DIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + break; + default: + break; + } + } + return hash_opts; +} + +/* Returns the hash opts for this flow. There are several classifier flows + * for one traffic flow, this returns an aggregation of all configurations. + */ +static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type) +{ + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *flow; + int i, flow_index; + u16 hash_opts = 0; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + if (flow->flow_type != flow_type) + continue; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, + flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts |= mvpp2_flow_get_hek_fields(&fe); + } + + return hash_opts; +} + +static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) +{ + struct mvpp2_cls_flow *flow; + int i; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + break; + + mvpp2_cls_flow_prs_init(priv, flow); + mvpp2_cls_flow_lkp_init(priv, flow); + mvpp2_cls_flow_init(priv, flow); + } +} + +static void mvpp2_cls_c2_write(struct mvpp2 *priv, + struct mvpp2_cls_c2_entry *c2) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index); + + /* Write TCAM */ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]); + + mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act); + + mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); +} + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); + + c2->index = index; + + c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0); + c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1); + c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2); + c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3); + c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4); + + c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT); + + c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0); + c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1); + c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2); + c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3); +} + +static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + u8 qh, ql, pmap; + + memset(&c2, 0, sizeof(c2)); + + c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id); + + pmap = BIT(port->id); + c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); + + /* Update RSS status after matching this entry */ + c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); + + /* Mark packet as "forwarded to software", needed for RSS */ + c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); + + /* Configure the default rx queue : Update Queue Low and Queue High, but + * don't lock, since the rx queue selection might be overridden by RSS + */ + c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) | + MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD); + + qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + + mvpp2_cls_c2_write(port->priv, &c2); +} + /* Classifier default initialization */ void mvpp2_cls_init(struct mvpp2 *priv) { @@ -61,6 +839,8 @@ void mvpp2_cls_init(struct mvpp2 *priv) le.way = 1; mvpp2_cls_lookup_write(priv, &le); } + + mvpp2_cls_port_init_flows(priv); } void mvpp2_cls_port_config(struct mvpp2_port *port) @@ -89,6 +869,47 @@ void mvpp2_cls_port_config(struct mvpp2_port *port) /* Update lookup ID table entry */ mvpp2_cls_lookup_write(port->priv, &le); + + mvpp2_port_c2_cls_init(port); +} + +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index); + + return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR); +} + +static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +void mvpp22_rss_enable(struct mvpp2_port *port) +{ + mvpp2_rss_port_c2_enable(port); +} + +void mvpp22_rss_disable(struct mvpp2_port *port) +{ + mvpp2_rss_port_c2_disable(port); } /* Set CPU queue number for oversize packets */ @@ -107,7 +928,116 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } -void mvpp22_init_rss(struct mvpp2_port *port) +static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) +{ + int nrxqs, cpu, cpus = num_possible_cpus(); + + /* Number of RXQs per CPU */ + nrxqs = port->nrxqs / cpus; + + /* CPU that will handle this rx queue */ + cpu = rxq / nrxqs; + + if (!cpu_online(cpu)) + return port->first_rxq; + + /* Indirection to better distribute the paquets on the CPUs when + * configuring the RSS queues. + */ + return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs); +} + +void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) +{ + struct mvpp2 *priv = port->priv; + int i; + + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(table) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, + mvpp22_rxfh_indir(port, port->indir[i])); + } +} + +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + u16 hash_opts = 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + if (info->data & RXH_L4_B_0_1) + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + if (info->data & RXH_L4_B_2_3) + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + /* Fallthrough */ + case IPV4_FLOW: + case IPV6_FLOW: + if (info->data & RXH_L2DA) + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + if (info->data & RXH_VLAN) + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + if (info->data & RXH_L3_PROTO) + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + if (info->data & RXH_IP_SRC) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA | + MVPP22_CLS_HEK_OPT_IP6SA); + if (info->data & RXH_IP_DST) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA | + MVPP22_CLS_HEK_OPT_IP6DA); + break; + default: return -EOPNOTSUPP; + } + + return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts); +} + +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + unsigned long hash_opts; + int i; + + hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type); + info->data = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + info->data |= RXH_L2DA; + break; + case MVPP22_CLS_HEK_OPT_VLAN: + info->data |= RXH_VLAN; + break; + case MVPP22_CLS_HEK_OPT_L3_PROTO: + info->data |= RXH_L3_PROTO; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + case MVPP22_CLS_HEK_OPT_IP6SA: + info->data |= RXH_IP_SRC; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + case MVPP22_CLS_HEK_OPT_IP6DA: + info->data |= RXH_IP_DST; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + info->data |= RXH_L4_B_0_1; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + info->data |= RXH_L4_B_2_3; + break; + default: + return -EINVAL; + } + } + return 0; +} + +void mvpp22_rss_port_init(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; int i; @@ -115,27 +1045,30 @@ void mvpp22_init_rss(struct mvpp2_port *port) /* Set the table width: replace the whole classifier Rx queue number * with the ones configured in RSS table entries. */ - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id)); mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); - /* Loop through the classifier Rx Queues and map them to a RSS table. - * Map them all to the first table (0) by default. + /* The default RxQ is used as a key to select the RSS table to use. + * We use one RSS table per port. */ - for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); - mvpp2_write(priv, MVPP22_RSS_TABLE, - MVPP22_RSS_TABLE_POINTER(0)); - } + mvpp2_write(priv, MVPP22_RSS_INDEX, + MVPP22_RSS_INDEX_QUEUE(port->first_rxq)); + mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, + MVPP22_RSS_TABLE_POINTER(port->id)); /* Configure the first table to evenly distribute the packets across - * real Rx Queues. The table entries map a hash to an port Rx Queue. + * real Rx Queues. The table entries map a hash to a port Rx Queue. */ - for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { - u32 sel = MVPP22_RSS_INDEX_TABLE(0) | - MVPP22_RSS_INDEX_TABLE_ENTRY(i); - mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) + port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs); - mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); - } + mvpp22_rss_fill_table(port, port->id); + /* Configure default flows */ + mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T); + mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T); + mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); + mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 8e1d7f9ffa0b..089f05f29891 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -1,27 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * RSS and Classifier definitions for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _MVPP2_CLS_H_ #define _MVPP2_CLS_H_ +#include "mvpp2.h" +#include "mvpp2_prs.h" + /* Classifier constants */ #define MVPP2_CLS_FLOWS_TBL_SIZE 512 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 #define MVPP2_CLS_LKP_TBL_SIZE 64 #define MVPP2_CLS_RX_QUEUES 256 -/* RSS constants */ -#define MVPP22_RSS_TABLE_ENTRIES 32 +/* Classifier flow constants */ + +#define MVPP2_FLOW_N_FIELDS 4 + +enum mvpp2_cls_engine { + MVPP22_CLS_ENGINE_C2 = 1, + MVPP22_CLS_ENGINE_C3A, + MVPP22_CLS_ENGINE_C3B, + MVPP22_CLS_ENGINE_C4, + MVPP22_CLS_ENGINE_C3HA = 6, + MVPP22_CLS_ENGINE_C3HB = 7, +}; + +#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0) +#define MVPP22_CLS_HEK_OPT_VLAN BIT(1) +#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2) +#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3) +#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4) +#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5) +#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6) +#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7) +#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8) +#define MVPP22_CLS_HEK_N_FIELDS 9 + +#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \ + MVPP22_CLS_HEK_OPT_L4DIP) + +#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \ + MVPP22_CLS_HEK_OPT_IP4DA) + +#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \ + MVPP22_CLS_HEK_OPT_IP6DA) + +/* The fifth tuple in "5T" is the L4_Info field */ +#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + +#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + +enum mvpp2_cls_field_id { + MVPP22_CLS_FIELD_MAC_DA = 0x03, + MVPP22_CLS_FIELD_VLAN = 0x06, + MVPP22_CLS_FIELD_L3_PROTO = 0x0f, + MVPP22_CLS_FIELD_IP4SA = 0x10, + MVPP22_CLS_FIELD_IP4DA = 0x11, + MVPP22_CLS_FIELD_IP6SA = 0x17, + MVPP22_CLS_FIELD_IP6DA = 0x1a, + MVPP22_CLS_FIELD_L4SIP = 0x1d, + MVPP22_CLS_FIELD_L4DIP = 0x1e, +}; + +enum mvpp2_cls_flow_seq { + MVPP2_CLS_FLOW_SEQ_NORMAL = 0, + MVPP2_CLS_FLOW_SEQ_FIRST1, + MVPP2_CLS_FLOW_SEQ_FIRST2, + MVPP2_CLS_FLOW_SEQ_LAST, + MVPP2_CLS_FLOW_SEQ_MIDDLE +}; + +/* Classifier C2 engine constants */ +#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16) + +enum mvpp22_cls_c2_action { + MVPP22_C2_NO_UPD = 0, + MVPP22_C2_NO_UPD_LOCK, + MVPP22_C2_UPD, + MVPP22_C2_UPD_LOCK, +}; + +enum mvpp22_cls_c2_fwd_action { + MVPP22_C2_FWD_NO_UPD = 0, + MVPP22_C2_FWD_NO_UPD_LOCK, + MVPP22_C2_FWD_SW, + MVPP22_C2_FWD_SW_LOCK, + MVPP22_C2_FWD_HW, + MVPP22_C2_FWD_HW_LOCK, + MVPP22_C2_FWD_HW_LOW_LAT, + MVPP22_C2_FWD_HW_LOW_LAT_LOCK, +}; + +#define MVPP2_CLS_C2_TCAM_WORDS 5 +#define MVPP2_CLS_C2_ATTR_WORDS 5 + +struct mvpp2_cls_c2_entry { + u32 index; + u32 tcam[MVPP2_CLS_C2_TCAM_WORDS]; + u32 act; + u32 attr[MVPP2_CLS_C2_ATTR_WORDS]; +}; + +/* Classifier C2 engine entries */ +#define MVPP22_CLS_C2_RSS_ENTRY(port) (port) +#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS +/* RSS flow entries in the flow table. We have 2 entries per port for RSS. + * + * The first performs a lookup using the C2 TCAM engine, to tag the + * packet for software forwarding (needed for RSS), enable or disable RSS, and + * assign the default rx queue. + * + * The second configures the hash generation, by specifying which fields of the + * packet header are used to generate the hash, and specifies the relevant hash + * engine to use. + */ +#define MVPP22_RSS_FLOW_C2_OFFS 0 +#define MVPP22_RSS_FLOW_HASH_OFFS 1 +#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1) + +#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ + MVPP22_RSS_FLOW_C2_OFFS) +#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ + MVPP22_RSS_FLOW_HASH_OFFS) +#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port) + +/* Packet flow ID */ +enum mvpp2_prs_flow { + MVPP2_FL_START = 8, + MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START, + MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP2_FL_IP4_TCP_NF_TAG, + MVPP2_FL_IP4_UDP_NF_TAG, + MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP2_FL_IP6_TCP_NF_TAG, + MVPP2_FL_IP6_UDP_NF_TAG, + MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */ + MVPP2_FL_IP4_TAG, + MVPP2_FL_IP6_UNTAG, + MVPP2_FL_IP6_TAG, + MVPP2_FL_NON_IP_UNTAG, + MVPP2_FL_NON_IP_TAG, + MVPP2_FL_LAST, +}; + +struct mvpp2_cls_flow { + /* The L2-L4 traffic flow type */ + int flow_type; + + /* The first id in the flow table for this flow */ + u16 flow_id; + + /* The supported HEK fields for this flow */ + u16 supported_hash_opts; + + /* The Header Parser result_info that matches this flow */ + struct mvpp2_prs_result_info prs_ri; +}; + +#define MVPP2_N_FLOWS 52 + +#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1) +#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW) +#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \ + (port) + 1) struct mvpp2_cls_flow_entry { u32 index; u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; @@ -33,7 +193,15 @@ struct mvpp2_cls_lookup_entry { u32 data; }; -void mvpp22_init_rss(struct mvpp2_port *port); +void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table); + +void mvpp22_rss_port_init(struct mvpp2_port *port); + +void mvpp22_rss_enable(struct mvpp2_port *port); +void mvpp22_rss_disable(struct mvpp2_port *port); + +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); void mvpp2_cls_init(struct mvpp2 *priv); @@ -41,4 +209,25 @@ void mvpp2_cls_port_config(struct mvpp2_port *port); void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe); + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe); + +struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow); + +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index); + +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe); + +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index); + +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le); + +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index); + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2); + #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c new file mode 100644 index 000000000000..f9744a61e5dd --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2018 Marvell + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/debugfs.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" +#include "mvpp2_cls.h" + +struct mvpp2_dbgfs_prs_entry { + int tid; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_flow_entry { + int flow; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_port_flow_entry { + struct mvpp2_port *port; + struct mvpp2_dbgfs_flow_entry *dbg_fe; +}; + +static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + int id = MVPP2_FLOW_C2_ENTRY(entry->flow); + + u32 hits = mvpp2_cls_flow_hits(entry->priv, id); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits); + +static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + + u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits); + +static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + struct mvpp2_cls_flow *f; + const char *flow_name; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + switch (f->flow_type) { + case IPV4_FLOW: + flow_name = "ipv4"; + break; + case IPV6_FLOW: + flow_name = "ipv6"; + break; + case TCP_V4_FLOW: + flow_name = "tcp4"; + break; + case TCP_V6_FLOW: + flow_name = "tcp6"; + break; + case UDP_V4_FLOW: + flow_name = "udp4"; + break; + case UDP_V6_FLOW: + flow_name = "udp6"; + break; + default: + flow_name = "other"; + } + + seq_printf(s, "%s\n", flow_name); + + return 0; +} + +static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file) +{ + return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private); +} + +static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private; + + kfree(flow_entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_flow_type_fops = { + .open = mvpp2_dbgfs_flow_type_open, + .read = seq_read, + .release = mvpp2_dbgfs_flow_type_release, +}; + +static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + struct mvpp2_cls_flow *f; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + seq_printf(s, "%d\n", f->flow_id); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id); + +static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *f; + int flow_index; + u16 hash_opts; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = mvpp2_flow_get_hek_fields(&fe); + + seq_printf(s, "0x%04x\n", hash_opts); + + return 0; +} + +static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode, + struct file *file) +{ + return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show, + inode->i_private); +} + +static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode, + struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private; + + kfree(flow_entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = { + .open = mvpp2_dbgfs_port_flow_hash_opt_open, + .read = seq_read, + .release = mvpp2_dbgfs_port_flow_hash_opt_release, +}; + +static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *f; + int flow_index, engine; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + engine = mvpp2_cls_flow_eng_get(&fe); + + seq_printf(s, "%d\n", engine); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine); + +static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + u32 hits; + + hits = mvpp2_cls_c2_hit_count(port->priv, + MVPP22_CLS_C2_RSS_ENTRY(port->id)); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits); + +static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2_cls_c2_entry c2; + u8 qh, ql; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) & + MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + + ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) & + MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + seq_printf(s, "%d\n", (qh << 3 | ql)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq); + +static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2_cls_c2_entry c2; + int enabled; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN); + + seq_printf(s, "%d\n", enabled); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable); + +static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + unsigned char byte[2], enable[2]; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + u16 rvid; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (!priv->prs_shadow[tid].valid) + continue; + + if (!test_bit(port->id, &pmap)) + continue; + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + + seq_printf(s, "%u\n", rvid); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid); + +static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int i; + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + mvpp2_prs_init_from_hw(port->priv, &pe, i); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap)) + seq_printf(s, "%03d\n", i); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser); + +static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC || + priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + seq_printf(s, "%pM\n", da); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter); + +static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + + seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu); + +static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned int pmap; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + pmap &= MVPP2_PRS_PORT_MASK; + + seq_printf(s, "%02x\n", pmap); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap); + +static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char ai, ai_mask; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; + ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK; + + seq_printf(s, "%02x %02x\n", ai, ai_mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai); + +static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char data[8], mask[8]; + int i; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + for (i = 0; i < 8; i++) + mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]); + + seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata); + +static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + seq_printf(s, "%*phN\n", 14, pe.sram); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram); + +static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + int val; + + val = mvpp2_prs_hits(entry->priv, entry->tid); + if (val < 0) + return val; + + seq_printf(s, "%d\n", val); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits); + +static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + int tid = entry->tid; + + seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0); + + return 0; +} + +static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file) +{ + return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private); +} + +static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_prs_entry *entry = seq->private; + + kfree(entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_prs_valid_fops = { + .open = mvpp2_dbgfs_prs_valid_open, + .read = seq_read, + .release = mvpp2_dbgfs_prs_valid_release, +}; + +static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, + struct mvpp2_port *port, + struct mvpp2_dbgfs_flow_entry *entry) +{ + struct mvpp2_dbgfs_port_flow_entry *port_entry; + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + if (IS_ERR(port_dir)) + return PTR_ERR(port_dir); + + /* This will be freed by 'hash_opts' release op */ + port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL); + if (!port_entry) + return -ENOMEM; + + port_entry->port = port; + port_entry->dbg_fe = entry; + + debugfs_create_file("hash_opts", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_hash_opt_fops); + + debugfs_create_file("engine", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_engine_fops); + + return 0; +} + +static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, + struct mvpp2 *priv, int flow) +{ + struct mvpp2_dbgfs_flow_entry *entry; + struct dentry *flow_entry_dir; + char flow_entry_name[10]; + int i, ret; + + sprintf(flow_entry_name, "%02d", flow); + + flow_entry_dir = debugfs_create_dir(flow_entry_name, parent); + if (!flow_entry_dir) + return -ENOMEM; + + /* This will be freed by 'type' release op */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->flow = flow; + entry->priv = priv; + + debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_flt_hits_fops); + + debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_dec_hits_fops); + + debugfs_create_file("type", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_type_fops); + + debugfs_create_file("id", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_id_fops); + + /* Create entry for each port */ + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir, + priv->port_list[i], entry); + if (ret) + return ret; + } + return 0; +} + +static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *flow_dir; + int i, ret; + + flow_dir = debugfs_create_dir("flows", parent); + if (!flow_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, + struct mvpp2 *priv, int tid) +{ + struct mvpp2_dbgfs_prs_entry *entry; + struct dentry *prs_entry_dir; + char prs_entry_name[10]; + + if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + sprintf(prs_entry_name, "%03d", tid); + + prs_entry_dir = debugfs_create_dir(prs_entry_name, parent); + if (!prs_entry_dir) + return -ENOMEM; + + /* The 'valid' entry's ops will free that */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->tid = tid; + entry->priv = priv; + + /* Create each attr */ + debugfs_create_file("sram", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_sram_fops); + + debugfs_create_file("valid", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_valid_fops); + + debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_lu_fops); + + debugfs_create_file("ai", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_ai_fops); + + debugfs_create_file("header_data", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hdata_fops); + + debugfs_create_file("hits", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hits_fops); + + return 0; +} + +static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *prs_dir; + int i, ret; + + prs_dir = debugfs_create_dir("parser", parent); + if (!prs_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_port_init(struct dentry *parent, + struct mvpp2_port *port) +{ + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + if (IS_ERR(port_dir)) + return PTR_ERR(port_dir); + + debugfs_create_file("parser_entries", 0444, port_dir, port, + &mvpp2_dbgfs_port_parser_fops); + + debugfs_create_file("mac_filter", 0444, port_dir, port, + &mvpp2_dbgfs_filter_fops); + + debugfs_create_file("vid_filter", 0444, port_dir, port, + &mvpp2_dbgfs_port_vid_fops); + + debugfs_create_file("c2_hits", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_hits_fops); + + debugfs_create_file("default_rxq", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_rxq_fops); + + debugfs_create_file("rss_enable", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_enable_fops); + + return 0; +} + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) +{ + debugfs_remove_recursive(priv->dbgfs_dir); +} + +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) +{ + struct dentry *mvpp2_dir, *mvpp2_root; + int ret, i; + + mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); + if (!mvpp2_root) { + mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); + if (IS_ERR(mvpp2_root)) + return; + } + + mvpp2_dir = debugfs_create_dir(name, mvpp2_root); + if (IS_ERR(mvpp2_dir)) + return; + + priv->dbgfs_dir = mvpp2_dir; + + ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv); + if (ret) + goto err; + + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]); + if (ret) + goto err; + } + + ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv); + if (ret) + goto err; + + return; +err: + mvpp2_dbgfs_cleanup(priv); +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 0319ed9ef8b8..32d785b616e1 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/acpi.h> @@ -66,7 +63,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, #define MVPP2_QDIST_SINGLE_MODE 0 #define MVPP2_QDIST_MULTI_MODE 1 -static int queue_mode = MVPP2_QDIST_SINGLE_MODE; +static int queue_mode = MVPP2_QDIST_MULTI_MODE; module_param(queue_mode, int, 0444); MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); @@ -151,9 +148,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.buf_dma_addr; + return le32_to_cpu(tx_desc->pp21.buf_dma_addr); else - return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & + MVPP2_DESC_DMA_MASK; } static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, @@ -166,12 +164,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, offset = dma_addr & MVPP2_TX_DESC_ALIGN; if (port->priv->hw_version == MVPP21) { - tx_desc->pp21.buf_dma_addr = addr; + tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); tx_desc->pp21.packet_offset = offset; } else { - u64 val = (u64)addr; + __le64 val = cpu_to_le64(addr); - tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; + tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); tx_desc->pp22.buf_dma_addr_ptp |= val; tx_desc->pp22.packet_offset = offset; } @@ -181,9 +179,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.data_size; + return le16_to_cpu(tx_desc->pp21.data_size); else - return tx_desc->pp22.data_size; + return le16_to_cpu(tx_desc->pp22.data_size); } static void mvpp2_txdesc_size_set(struct mvpp2_port *port, @@ -191,9 +189,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port, size_t size) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.data_size = size; + tx_desc->pp21.data_size = cpu_to_le16(size); else - tx_desc->pp22.data_size = size; + tx_desc->pp22.data_size = cpu_to_le16(size); } static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, @@ -211,9 +209,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, unsigned int command) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.command = command; + tx_desc->pp21.command = cpu_to_le32(command); else - tx_desc->pp22.command = command; + tx_desc->pp22.command = cpu_to_le32(command); } static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, @@ -229,36 +227,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_dma_addr; + return le32_to_cpu(rx_desc->pp21.buf_dma_addr); else - return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & + MVPP2_DESC_DMA_MASK; } static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_cookie; + return le32_to_cpu(rx_desc->pp21.buf_cookie); else - return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & + MVPP2_DESC_DMA_MASK; } static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.data_size; + return le16_to_cpu(rx_desc->pp21.data_size); else - return rx_desc->pp22.data_size; + return le16_to_cpu(rx_desc->pp22.data_size); } static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.status; + return le32_to_cpu(rx_desc->pp21.status); else - return rx_desc->pp22.status; + return le32_to_cpu(rx_desc->pp22.status); } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) @@ -1735,7 +1735,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); command |= MVPP2_TXD_IP_CSUM_DISABLE; - if (l3_proto == swab16(ETH_P_IP)) { + if (l3_proto == htons(ETH_P_IP)) { command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ } else { @@ -3273,6 +3273,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) } } +static bool mvpp22_rss_is_supported(void) +{ + return queue_mode == MVPP2_QDIST_MULTI_MODE; +} + static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); @@ -3365,9 +3370,6 @@ static int mvpp2_open(struct net_device *dev) mvpp2_start_dev(port); - if (priv->hw_version == MVPP22) - mvpp22_init_rss(port); - /* Start hardware statistics gathering */ queue_delayed_work(priv->stats_queue, &port->stats_work, MVPP2_MIB_COUNTERS_STATS_DELAY); @@ -3626,6 +3628,13 @@ static int mvpp2_set_features(struct net_device *dev, } } + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + mvpp22_rss_enable(port); + else + mvpp22_rss_disable(port); + } + return 0; } @@ -3813,6 +3822,94 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, return phylink_ethtool_ksettings_set(port->phylink, cmd); } +static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXFH: + ret = mvpp2_ethtool_rxfh_get(port, info); + break; + case ETHTOOL_GRXRINGS: + info->data = port->nrxqs; + break; + default: + return -ENOTSUPP; + } + + return ret; +} + +static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = mvpp2_ethtool_rxfh_set(port, info); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; +} + +static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (indir) + memcpy(indir, port->indir, + ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); + + if (hfunc) + *hfunc = ETH_RSS_HASH_CRC32; + + return 0; +} + +static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) + return -EOPNOTSUPP; + + if (key) + return -EOPNOTSUPP; + + if (indir) { + memcpy(port->indir, indir, + ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); + mvpp22_rss_fill_table(port, port->id); + } + + return 0; +} + /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { @@ -3844,6 +3941,12 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_pauseparam = mvpp2_ethtool_set_pause_param, .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, + .get_rxnfc = mvpp2_ethtool_get_rxnfc, + .set_rxnfc = mvpp2_ethtool_set_rxnfc, + .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, + .get_rxfh = mvpp2_ethtool_get_rxfh, + .set_rxfh = mvpp2_ethtool_set_rxfh, + }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that @@ -3985,8 +4088,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; - if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || - (port->ntxqs > MVPP2_MAX_TXQ)) + if (port->nrxqs % MVPP2_DEFAULT_RXQ || + port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) return -EINVAL; /* Disable port */ @@ -4075,6 +4178,9 @@ static int mvpp2_port_init(struct mvpp2_port *port) mvpp2_cls_oversize_rxq_set(port); mvpp2_cls_port_config(port); + if (mvpp22_rss_is_supported()) + mvpp22_rss_port_init(port); + /* Provide an initial Rx packet size */ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); @@ -4681,6 +4787,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_FILTER; + if (mvpp22_rss_is_supported()) + dev->hw_features |= NETIF_F_RXHASH; + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); @@ -5011,6 +5120,12 @@ static int mvpp2_probe(struct platform_device *pdev) (unsigned long)of_device_get_match_data(&pdev->dev); } + /* multi queue mode isn't supported on PPV2.1, fallback to single + * mode + */ + if (priv->hw_version == MVPP21) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) @@ -5174,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_port_probe; } + mvpp2_dbgfs_init(priv, pdev->name); + platform_set_drvdata(pdev, priv); return 0; @@ -5207,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev) struct fwnode_handle *port_fwnode; int i = 0; + mvpp2_dbgfs_cleanup(priv); + flush_workqueue(priv->stats_queue); destroy_workqueue(priv->stats_queue); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 6bb69f086794..392fd895f278 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Header Parser helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> @@ -30,24 +27,24 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) return -EINVAL; /* Clear entry invalidation bit */ - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); return 0; } /* Initialize tcam entry from hw */ -static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, - struct mvpp2_prs_entry *pe, int tid) +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) { int i; @@ -60,18 +57,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); - if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) return MVPP2_PRS_TCAM_ENTRY_INVALID; for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); return 0; } @@ -103,42 +100,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, /* Update lookup field in tcam sw entry */ static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; - pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); } /* Update mask for single port in tcam sw entry */ static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, unsigned int port, bool add) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - if (add) - pe->tcam.byte[enable_off] &= ~(1 << port); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); else - pe->tcam.byte[enable_off] |= 1 << port; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); } /* Update port map in tcam sw entry */ static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, unsigned int ports) { - unsigned char port_mask = MVPP2_PRS_PORT_MASK; - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; - pe->tcam.byte[enable_off] &= ~port_mask; - pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); } /* Obtain port map from tcam sw entry */ -static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; + return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; } /* Set byte of data and its enable bits in tcam sw entry */ @@ -146,55 +136,58 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char byte, unsigned char enable) { - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); } /* Get byte of data and its enable bits from tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char *byte, - unsigned char *enable) +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) { - *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; - *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; + *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; } /* Compare tcam data bytes with a pattern */ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, u16 data) { - int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); u16 tcam_data; - tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; - if (tcam_data != data) - return false; - return true; + tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; + return tcam_data == data; } /* Update ai bits in tcam sw entry */ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int enable) { - int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + int i; for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { if (!(enable & BIT(i))) continue; if (bits & BIT(i)) - pe->tcam.byte[ai_idx] |= 1 << i; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); else - pe->tcam.byte[ai_idx] &= ~(1 << i); + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); } - pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); } /* Get ai bits from tcam sw entry */ static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) { - return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; + return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; } /* Set ethertype in tcam sw entry */ @@ -215,16 +208,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Clear bits in sram sw entry */ static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Update ri bits in sram sw entry */ @@ -234,15 +227,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, unsigned int i; for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { - int ri_off = MVPP2_PRS_SRAM_RI_OFFS; - if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_RI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); } @@ -251,7 +245,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, /* Obtain ri bits from sram sw entry */ static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) { - return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; + return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; } /* Update ai bits in sram sw entry */ @@ -259,16 +253,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; - int ai_off = MVPP2_PRS_SRAM_AI_OFFS; for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_AI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); } @@ -278,12 +274,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) { u8 bits; - int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); - int ai_en_off = ai_off + 1; - int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + /* ai is stored on bits 90->97; so it spreads across two u32 */ + int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); + int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); - bits = (pe->sram.byte[ai_off] >> ai_shift) | - (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + bits = (pe->sram[ai_off] >> ai_shift) | + (pe->sram[ai_off + 1] << (32 - ai_shift)); return bits; } @@ -316,8 +312,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, } /* Set value */ - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = - (unsigned char)shift; + pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK; /* Reset and set operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, @@ -346,13 +341,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set value */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] |= - (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, + offset & MVPP2_PRS_SRAM_UDF_MASK); /* Set offset type */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, @@ -362,16 +352,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set offset operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> - (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= - (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); @@ -662,7 +644,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -790,8 +772,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && - mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && + mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); if (!match) continue; @@ -932,8 +914,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, pe.index = tid; /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, @@ -1433,17 +1415,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) pe.index = tid; - /* Clear tcam data before updating */ - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); @@ -1644,8 +1622,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) MVPP2_PRS_IPV4_IHL_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); @@ -2428,6 +2406,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) return 0; } +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) +{ + struct mvpp2_prs_entry pe; + u8 *ri_byte, *ri_byte_mask; + int tid, i; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + ri_byte = (u8 *)&ri; + ri_byte_mask = (u8 *)&ri_mask; + + mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + for (i = 0; i < 4; i++) { + mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i], + ri_byte_mask[i]); + } + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + /* Set prs flow for the port */ int mvpp2_prs_def_flow(struct mvpp2_port *port) { @@ -2465,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) return 0; } + +int mvpp2_prs_hits(struct mvpp2 *priv, int index) +{ + u32 val; + + if (index > MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); + + val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); + + val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + + return val; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index 22fbbc4c8b28..e22f6c85d380 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -1,22 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Header Parser definitions for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ +#ifndef _MVPP2_PRS_H_ +#define _MVPP2_PRS_H_ + #include <linux/kernel.h> #include <linux/netdevice.h> +#include <linux/platform_device.h> #include "mvpp2.h" -#ifndef _MVPP2_PRS_H_ -#define _MVPP2_PRS_H_ - /* Parser constants */ #define MVPP2_PRS_TCAM_SRAM_SIZE 256 #define MVPP2_PRS_TCAM_WORDS 6 @@ -50,17 +48,25 @@ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). */ #define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_AI_MASK 0xff #define MVPP2_PRS_PORT_MASK 0xff #define MVPP2_PRS_LU_MASK 0xf -#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ - (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) -#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ - (((offs) * 2) - ((offs) % 2) + 2) -#define MVPP2_PRS_TCAM_AI_BYTE 16 -#define MVPP2_PRS_TCAM_PORT_BYTE 17 -#define MVPP2_PRS_TCAM_LU_BYTE 20 -#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) -#define MVPP2_PRS_TCAM_INV_WORD 5 + +/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */ +#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2) +#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2) + +#define MVPP2_PRS_TCAM_EN(data) ((data) << 16) +#define MVPP2_PRS_TCAM_AI_WORD 4 +#define MVPP2_PRS_TCAM_AI(ai) (ai) +#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai)) +#define MVPP2_PRS_TCAM_PORT_WORD 4 +#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8) +#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p)) +#define MVPP2_PRS_TCAM_LU_WORD 5 +#define MVPP2_PRS_TCAM_LU(lu) (lu) +#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu)) +#define MVPP2_PRS_TCAM_INV_WORD 5 #define MVPP2_PRS_VID_TCAM_BYTE 2 @@ -146,6 +152,7 @@ #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff #define MVPP2_PRS_SRAM_UDF_OFFS 73 #define MVPP2_PRS_SRAM_UDF_BITS 8 #define MVPP2_PRS_SRAM_UDF_MASK 0xff @@ -214,6 +221,10 @@ #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) #define MVPP2_PRS_RI_DROP_MASK 0x80000000 +#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \ + MVPP2_PRS_RI_IP_FRAG_MASK | \ + MVPP2_PRS_RI_L4_PROTO_MASK) + /* Sram additional info bits assignment */ #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) @@ -255,20 +266,15 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_LAST, }; -union mvpp2_prs_tcam_entry { - u32 word[MVPP2_PRS_TCAM_WORDS]; - u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; -}; - -union mvpp2_prs_sram_entry { - u32 word[MVPP2_PRS_SRAM_WORDS]; - u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; -}; - struct mvpp2_prs_entry { u32 index; - union mvpp2_prs_tcam_entry tcam; - union mvpp2_prs_sram_entry sram; + u32 tcam[MVPP2_PRS_TCAM_WORDS]; + u32 sram[MVPP2_PRS_SRAM_WORDS]; +}; + +struct mvpp2_prs_result_info { + u32 ri; + u32 ri_mask; }; struct mvpp2_prs_shadow { @@ -288,10 +294,21 @@ struct mvpp2_prs_shadow { int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv); +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid); + +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe); + +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable); + int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask); + int mvpp2_prs_def_flow(struct mvpp2_port *port); void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port); @@ -311,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port); int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da); +int mvpp2_prs_hits(struct mvpp2 *priv, int index); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile index 16b10d01fcf4..3f400770fcd8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \ main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \ - srq.o resource_tracker.o + srq.o resource_tracker.o crdump.o obj-$(CONFIG_MLX4_EN) += mlx4_en.o diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index e2b6b0cac1ac..c81d15bf259c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) dev = persist->dev; mlx4_err(dev, "device is going to be reset\n"); - if (mlx4_is_slave(dev)) + if (mlx4_is_slave(dev)) { err = mlx4_reset_slave(dev); - else + } else { + mlx4_crdump_collect(dev); err = mlx4_reset_master(dev); + } if (!err) { mlx4_err(dev, "device was reset successfully\n"); @@ -212,7 +214,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP && !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { - err = mlx4_restart_one(persist->pdev); + err = mlx4_restart_one(persist->pdev, false, NULL); mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", err); } diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c new file mode 100644 index 000000000000..88316c743820 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx4.h" + +#define BAD_ACCESS 0xBADACCE5 +#define HEALTH_BUFFER_SIZE 0x40 +#define CR_ENABLE_BIT swab32(BIT(6)) +#define CR_ENABLE_BIT_OFFSET 0xF3F04 +#define MAX_NUM_OF_DUMPS_TO_STORE (8) + +static const char *region_cr_space_str = "cr-space"; +static const char *region_fw_health_str = "fw-health"; + +/* Set to true in case cr enable bit was set to true before crdump */ +static bool crdump_enbale_bit_set; + +static void crdump_enable_crspace_access(struct mlx4_dev *dev, + u8 __iomem *cr_space) +{ + /* Get current enable bit value */ + crdump_enbale_bit_set = + readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT; + + /* Enable FW CR filter (set bit6 to 0) */ + if (crdump_enbale_bit_set) + writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT, + cr_space + CR_ENABLE_BIT_OFFSET); + + /* Enable block volatile crspace accesses */ + writel(swab32(1), cr_space + dev->caps.health_buffer_addrs + + HEALTH_BUFFER_SIZE); +} + +static void crdump_disable_crspace_access(struct mlx4_dev *dev, + u8 __iomem *cr_space) +{ + /* Disable block volatile crspace accesses */ + writel(0, cr_space + dev->caps.health_buffer_addrs + + HEALTH_BUFFER_SIZE); + + /* Restore FW CR filter value (set bit6 to original value) */ + if (crdump_enbale_bit_set) + writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT, + cr_space + CR_ENABLE_BIT_OFFSET); +} + +static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev, + u8 __iomem *cr_space, + u32 id) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + unsigned long cr_res_size; + u8 *crspace_data; + int offset; + int err; + + if (!crdump->region_crspace) { + mlx4_err(dev, "crdump: cr-space region is NULL\n"); + return; + } + + /* Try to collect CR space */ + cr_res_size = pci_resource_len(pdev, 0); + crspace_data = kvmalloc(cr_res_size, GFP_KERNEL); + if (crspace_data) { + for (offset = 0; offset < cr_res_size; offset += 4) + *(u32 *)(crspace_data + offset) = + readl(cr_space + offset); + + err = devlink_region_snapshot_create(crdump->region_crspace, + cr_res_size, crspace_data, + id, &kvfree); + if (err) { + kvfree(crspace_data); + mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", + region_cr_space_str, id, err); + } else { + mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n", + id, region_cr_space_str); + } + } else { + mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n"); + } +} + +static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev, + u8 __iomem *cr_space, + u32 id) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + u8 *health_data; + int offset; + int err; + + if (!crdump->region_fw_health) { + mlx4_err(dev, "crdump: fw-health region is NULL\n"); + return; + } + + /* Try to collect health buffer */ + health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL); + if (health_data) { + u8 __iomem *health_buf_start = + cr_space + dev->caps.health_buffer_addrs; + + for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4) + *(u32 *)(health_data + offset) = + readl(health_buf_start + offset); + + err = devlink_region_snapshot_create(crdump->region_fw_health, + HEALTH_BUFFER_SIZE, + health_data, + id, &kvfree); + if (err) { + kvfree(health_data); + mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", + region_fw_health_str, id, err); + } else { + mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n", + id, region_fw_health_str); + } + } else { + mlx4_err(dev, "crdump: Failed to allocate health buffer\n"); + } +} + +int mlx4_crdump_collect(struct mlx4_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + unsigned long cr_res_size; + u8 __iomem *cr_space; + u32 id; + + if (!dev->caps.health_buffer_addrs) { + mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n"); + return 0; + } + + if (!crdump->snapshot_enable) { + mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n"); + return 0; + } + + cr_res_size = pci_resource_len(pdev, 0); + + cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size); + if (!cr_space) { + mlx4_err(dev, "crdump: Failed to map pci cr region\n"); + return -ENODEV; + } + + crdump_enable_crspace_access(dev, cr_space); + + /* Get the available snapshot ID for the dumps */ + id = devlink_region_shapshot_id_get(devlink); + + /* Try to capture dumps */ + mlx4_crdump_collect_crspace(dev, cr_space, id); + mlx4_crdump_collect_fw_health(dev, cr_space, id); + + crdump_disable_crspace_access(dev, cr_space); + + iounmap(cr_space); + return 0; +} + +int mlx4_crdump_init(struct mlx4_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + + crdump->snapshot_enable = false; + + /* Create cr-space region */ + crdump->region_crspace = + devlink_region_create(devlink, + region_cr_space_str, + MAX_NUM_OF_DUMPS_TO_STORE, + pci_resource_len(pdev, 0)); + if (IS_ERR(crdump->region_crspace)) + mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", + region_cr_space_str, + PTR_ERR(crdump->region_crspace)); + + /* Create fw-health region */ + crdump->region_fw_health = + devlink_region_create(devlink, + region_fw_health_str, + MAX_NUM_OF_DUMPS_TO_STORE, + HEALTH_BUFFER_SIZE); + if (IS_ERR(crdump->region_fw_health)) + mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", + region_fw_health_str, + PTR_ERR(crdump->region_fw_health)); + + return 0; +} + +void mlx4_crdump_end(struct mlx4_dev *dev) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + + devlink_region_destroy(crdump->region_fw_health); + devlink_region_destroy(crdump->region_crspace); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 65eb06e017e4..6785661d1a72 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) return mlx4_xdp_set(dev, xdp->prog); case XDP_QUERY_PROG: xdp->prog_id = mlx4_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0227786308af..1857ee0f0871 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, } u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); - return fallback(dev, skb) % rings_p_up; + return fallback(dev, skb, NULL) % rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, const void *src, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 46dcbfbe4c5e..babcfd9c0571 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 - +#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4 dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->rl_caps.min_unit = size >> 14; } + MLX4_GET(dev_cap->health_buffer_addrs, outbox, + QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index cd6399c76bfd..650ae08c71de 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -128,6 +128,7 @@ struct mlx4_dev_cap { u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; struct mlx4_rate_limit_caps rl_caps; + u32 health_buffer_addrs; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; bool wol_port[MLX4_MAX_PORTS + 1]; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 872014702fc1..2d979a652b7b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -177,6 +177,131 @@ struct mlx4_port_config { static atomic_t pf_loading = ATOMIC_INIT(0); +static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + ctx->val.vbool = !!mlx4_internal_err_reset; + return 0; +} + +static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + mlx4_internal_err_reset = ctx->val.vbool; + return 0; +} + +static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + + ctx->val.vbool = dev->persist->crdump.snapshot_enable; + return 0; +} + +static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + + dev->persist->crdump.snapshot_enable = ctx->val.vbool; + return 0; +} + +static int +mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + u32 value = val.vu32; + + if (value < 1 || value > 128) + return -ERANGE; + + if (!is_power_of_2(value)) { + NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2"); + return -EINVAL; + } + + return 0; +} + +enum mlx4_devlink_param_id { + MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, +}; + +static const struct devlink_param mlx4_devlink_params[] = { + DEVLINK_PARAM_GENERIC(INT_ERR_RESET, + BIT(DEVLINK_PARAM_CMODE_RUNTIME) | + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + mlx4_devlink_ierr_reset_get, + mlx4_devlink_ierr_reset_set, NULL), + DEVLINK_PARAM_GENERIC(MAX_MACS, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx4_devlink_max_macs_validate), + DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT, + BIT(DEVLINK_PARAM_CMODE_RUNTIME) | + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + mlx4_devlink_crdump_snapshot_get, + mlx4_devlink_crdump_snapshot_set, NULL), + DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), + DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), +}; + +static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + int err; + + err = devlink_param_driverinit_value_set(devlink, param_id, init_val); + if (err) + mlx4_warn(dev, + "devlink set parameter %u value failed (err = %d)", + param_id, err); +} + +static void mlx4_devlink_set_params_init_values(struct devlink *devlink) +{ + union devlink_param_value value; + + value.vbool = !!mlx4_internal_err_reset; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + value); + + value.vu32 = 1UL << log_num_mac; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); + + value.vbool = enable_64b_cqe_eqe; + mlx4_devlink_set_init_value(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + value); + + value.vbool = enable_4k_uar; + mlx4_devlink_set_init_value(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + value); + + value.vbool = false; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + value); +} + static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { @@ -428,6 +553,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; dev->caps.wol_port[1] = dev_cap->wol_port[1]; dev->caps.wol_port[2] = dev_cap->wol_port[2]; + dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs; /* Save uar page shift */ if (!mlx4_is_slave(dev)) { @@ -3711,10 +3837,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, } } - err = mlx4_catas_init(&priv->dev); + err = mlx4_crdump_init(&priv->dev); if (err) goto err_release_regions; + err = mlx4_catas_init(&priv->dev); + if (err) + goto err_crdump; + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); if (err) goto err_catas; @@ -3724,6 +3854,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, err_catas: mlx4_catas_end(&priv->dev); +err_crdump: + mlx4_crdump_end(&priv->dev); + err_release_regions: pci_release_regions(pdev); @@ -3757,8 +3890,68 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, return __set_port_type(info, mlx4_port_type); } +static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + union devlink_param_value saved_value; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + &saved_value); + if (!err && mlx4_internal_err_reset != saved_value.vbool) { + mlx4_internal_err_reset = saved_value.vbool; + /* Notify on value changed on runtime configuration mode */ + devlink_param_value_changed(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET); + } + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + &saved_value); + if (!err) + log_num_mac = order_base_2(saved_value.vu32); + err = devlink_param_driverinit_value_get(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + &saved_value); + if (!err) + enable_64b_cqe_eqe = saved_value.vbool; + err = devlink_param_driverinit_value_get(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + &saved_value); + if (!err) + enable_4k_uar = saved_value.vbool; + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + &saved_value); + if (!err && crdump->snapshot_enable != saved_value.vbool) { + crdump->snapshot_enable = saved_value.vbool; + devlink_param_value_changed(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT); + } +} + +static int mlx4_devlink_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_dev_persistent *persist = dev->persist; + int err; + + if (persist->num_vfs) + mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n"); + err = mlx4_restart_one(persist->pdev, true, devlink); + if (err) + mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err); + + return err; +} + static const struct devlink_ops mlx4_devlink_ops = { .port_type_set = mlx4_devlink_port_type_set, + .reload = mlx4_devlink_reload, }; static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) @@ -3792,14 +3985,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ret = devlink_register(devlink, &pdev->dev); if (ret) goto err_persist_free; - - ret = __mlx4_init_one(pdev, id->driver_data, priv); + ret = devlink_params_register(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); if (ret) goto err_devlink_unregister; + mlx4_devlink_set_params_init_values(devlink); + ret = __mlx4_init_one(pdev, id->driver_data, priv); + if (ret) + goto err_params_unregister; pci_save_state(pdev); return 0; +err_params_unregister: + devlink_params_unregister(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); err_devlink_unregister: devlink_unregister(devlink); err_persist_free: @@ -3929,6 +4129,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) else mlx4_info(dev, "%s: interface is down\n", __func__); mlx4_catas_end(dev); + mlx4_crdump_end(dev); if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); @@ -3936,6 +4137,8 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); mlx4_pci_disable_device(dev); + devlink_params_unregister(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); @@ -3960,7 +4163,7 @@ static int restore_current_port_types(struct mlx4_dev *dev, return err; } -int mlx4_restart_one(struct pci_dev *pdev) +int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; @@ -3973,6 +4176,8 @@ int mlx4_restart_one(struct pci_dev *pdev) memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mlx4_unload_one(pdev); + if (reload) + mlx4_devlink_param_load_driverinit_values(devlink); err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); if (err) { mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cb9e923e8399..6e016092a6f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1042,7 +1042,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); int mlx4_catas_init(struct mlx4_dev *dev); void mlx4_catas_end(struct mlx4_dev *dev); -int mlx4_restart_one(struct pci_dev *pdev); +int mlx4_crdump_init(struct mlx4_dev *dev); +void mlx4_crdump_end(struct mlx4_dev *dev); +int mlx4_restart_one(struct pci_dev *pdev, bool reload, + struct devlink *devlink); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, @@ -1227,6 +1230,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); int mlx4_comm_internal_err(u32 slave_read); +int mlx4_crdump_collect(struct mlx4_dev *dev); + int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type); void mlx4_do_sense_ports(struct mlx4_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ace6545f82e6..c3228b89df46 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_rx_alloc *frame, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9efbf193ad5a..d923f2f58608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o + en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \ + vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h new file mode 100644 index 000000000000..c13260467750 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h @@ -0,0 +1,37 @@ +#ifndef __MLX5E_ACCEL_H__ +#define __MLX5E_ACCEL_H__ + +#ifdef CONFIG_MLX5_ACCEL + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include "en.h" + +static inline bool is_metadata_hdr_valid(struct sk_buff *skb) +{ + __be16 *ethtype; + + if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)) + return false; + ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); + if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + return false; + return true; +} + +static inline void remove_metadata_hdr(struct sk_buff *skb) +{ + struct ethhdr *old_eth; + struct ethhdr *new_eth; + + /* Remove the metadata from the buffer */ + old_eth = (struct ethhdr *)skb->data; + new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); + memmove(new_eth, old_eth, 2 * ETH_ALEN); + /* Ethertype is already in its new place */ + skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); +} + +#endif /* CONFIG_MLX5_ACCEL */ + +#endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c index 77ac19f38cbe..da7bd26368f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -37,17 +37,26 @@ #include "mlx5_core.h" #include "fpga/tls.h" -int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) +int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { - return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, p_swid); + return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, p_swid, + direction_sx); } -void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) +void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx) { - mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL); + mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); +} + +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn) +{ + return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); } bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index 6f9c9f446ecc..def4093ebfae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits { u8 reserved_at_2[0x1e]; }; -int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid); -void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid); +int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx); +void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx); +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn); bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); @@ -72,10 +76,14 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); #else static inline int -mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) { return 0; } -static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { } +mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { return -ENOTSUPP; } +static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx) { } +static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, + u32 seq, u64 rcd_sn) { return 0; } static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index eb9eb7aa953a..e1b237ccdf56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -137,7 +137,6 @@ struct page_pool; #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 -#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ #define MLX5E_UMR_WQE_INLINE_SZ \ @@ -866,7 +865,8 @@ struct mlx5e_profile { void mlx5e_build_ptys2ethtool_map(void); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe *wqe, u16 pi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index f20074dbef32..39a5d13ba459 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -34,12 +34,11 @@ #ifndef __MLX5E_EN_ACCEL_H__ #define __MLX5E_EN_ACCEL_H__ -#ifdef CONFIG_MLX5_ACCEL - #include <linux/skbuff.h> #include <linux/netdevice.h> #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" +#include "en_accel/rxtx.h" #include "en.h" static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, @@ -64,9 +63,13 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, } #endif + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi); + if (unlikely(!skb)) + return NULL; + } + return skb; } -#endif /* CONFIG_MLX5_ACCEL */ - #endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index c245d8e78509..128a82b1dbfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -37,6 +37,7 @@ #include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec.h" +#include "accel/accel.h" #include "en.h" enum { @@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, } struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb) + struct sk_buff *skb, u32 *cqe_bcnt) { struct mlx5e_ipsec_metadata *mdata; - struct ethhdr *old_eth; - struct ethhdr *new_eth; struct xfrm_state *xs; - __be16 *ethtype; - /* Detect inline metadata */ - if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN) - return skb; - ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); - if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + if (!is_metadata_hdr_valid(skb)) return skb; /* Use the metadata */ @@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, return NULL; } - /* Remove the metadata from the buffer */ - old_eth = (struct ethhdr *)skb->data; - new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); - memmove(new_eth, old_eth, 2 * ETH_ALEN); - /* Ethertype is already in its new place */ - skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); + remove_metadata_hdr(skb); + *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; return skb; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 2bfbbef1b054..ca47c0540904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -41,7 +41,7 @@ #include "en.h" struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb); + struct sk_buff *skb, u32 *cqe_bcnt); void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_inverse_table_init(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c new file mode 100644 index 000000000000..7b7ec3998e84 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c @@ -0,0 +1,109 @@ +#include "en_accel/rxtx.h" + +static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb, + struct sk_buff *nskb, + int remaining) +{ + int bytes_needed = remaining, remaining_headlen, remaining_page_offset; + int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); + int payload_len = remaining + sizeof(struct udphdr); + int k = 0, i, j; + + skb_copy_bits(skb, 0, nskb->data, headlen); + nskb->dev = skb->dev; + skb_reset_mac_header(nskb); + skb_set_network_header(nskb, skb_network_offset(skb)); + skb_set_transport_header(nskb, skb_transport_offset(skb)); + skb_set_tail_pointer(nskb, headlen); + + /* How many frags do we need? */ + for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]); + k++; + if (bytes_needed <= 0) + break; + } + + /* Fill the first frag and split it if necessary */ + j = skb_shinfo(skb)->nr_frags - k; + remaining_page_offset = -bytes_needed; + skb_fill_page_desc(nskb, 0, + skb_shinfo(skb)->frags[j].page.p, + skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset, + skb_shinfo(skb)->frags[j].size - remaining_page_offset); + + skb_frag_ref(skb, j); + + /* Fill the rest of the frags */ + for (i = 1; i < k; i++) { + j = skb_shinfo(skb)->nr_frags - k + i; + + skb_fill_page_desc(nskb, i, + skb_shinfo(skb)->frags[j].page.p, + skb_shinfo(skb)->frags[j].page_offset, + skb_shinfo(skb)->frags[j].size); + skb_frag_ref(skb, j); + } + skb_shinfo(nskb)->nr_frags = k; + + remaining_headlen = remaining - skb->data_len; + + /* headlen contains remaining data? */ + if (remaining_headlen > 0) + skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen, + remaining_headlen); + nskb->len = remaining + headlen; + nskb->data_len = payload_len - sizeof(struct udphdr) + + max_t(int, 0, remaining_headlen); + nskb->protocol = skb->protocol; + if (nskb->protocol == htons(ETH_P_IP)) { + ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) + + skb_shinfo(skb)->gso_segs); + ip_hdr(nskb)->tot_len = + htons(payload_len + sizeof(struct iphdr)); + } else { + ipv6_hdr(nskb)->payload_len = htons(payload_len); + } + udp_hdr(nskb)->len = htons(payload_len); + skb_shinfo(nskb)->gso_size = 0; + nskb->ip_summed = skb->ip_summed; + nskb->csum_start = skb->csum_start; + nskb->csum_offset = skb->csum_offset; + nskb->queue_mapping = skb->queue_mapping; +} + +/* might send skbs and update wqe and pi */ +struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); + int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size; + struct sk_buff *nskb; + + if (skb->protocol == htons(ETH_P_IP)) + ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr)); + else + ipv6_hdr(skb)->payload_len = htons(payload_len); + udp_hdr(skb)->len = htons(payload_len); + if (!remaining) + return skb; + + sq->stats->udp_seg_rem++; + nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC); + if (unlikely(!nskb)) { + sq->stats->dropped++; + return NULL; + } + + mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining); + + skb_shinfo(skb)->gso_segs--; + pskb_trim(skb, skb->len - remaining); + mlx5e_sq_xmit(sq, skb, *wqe, *pi); + mlx5e_sq_fetch_wqe(sq, wqe, pi); + return nskb; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h new file mode 100644 index 000000000000..ed42699a78b3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h @@ -0,0 +1,14 @@ + +#ifndef __MLX5E_EN_ACCEL_RX_TX_H__ +#define __MLX5E_EN_ACCEL_RX_TX_H__ + +#include <linux/skbuff.h> +#include "en.h" + +struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index d167845271c3..eddd7702680b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, u32 caps = mlx5_accel_tls_device_caps(mdev); int ret = -ENOMEM; void *flow; - - if (direction != TLS_OFFLOAD_CTX_DIR_TX) - return -EINVAL; + u32 swid; flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); if (!flow) @@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, if (ret) goto free_flow; + ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, &swid, + direction == TLS_OFFLOAD_CTX_DIR_TX); + if (ret < 0) + goto free_flow; + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - struct mlx5e_tls_offload_context *tx_ctx = + struct mlx5e_tls_offload_context_tx *tx_ctx = mlx5e_get_tls_tx_context(tls_ctx); - u32 swid; - - ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, &swid); - if (ret < 0) - goto free_flow; tx_ctx->swid = htonl(swid); tx_ctx->expected_seq = start_offload_tcp_sn; + } else { + struct mlx5e_tls_offload_context_rx *rx_ctx = + mlx5e_get_tls_rx_context(tls_ctx); + + rx_ctx->handle = htonl(swid); } return 0; @@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev, enum tls_offload_ctx_dir direction) { struct mlx5e_priv *priv = netdev_priv(netdev); + unsigned int handle; - if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid); + handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ? + mlx5e_get_tls_tx_context(tls_ctx)->swid : + mlx5e_get_tls_rx_context(tls_ctx)->handle); - mlx5_accel_tls_del_tx_flow(priv->mdev, swid); - } else { - netdev_err(netdev, "unsupported direction %d\n", direction); - } + mlx5_accel_tls_del_flow(priv->mdev, handle, + direction == TLS_OFFLOAD_CTX_DIR_TX); +} + +static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk, + u32 seq, u64 rcd_sn) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_tls_offload_context_rx *rx_ctx; + + rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); + + netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq, + be64_to_cpu(rcd_sn)); + mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); + atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); } static const struct tlsdev_ops mlx5e_tls_ops = { .tls_dev_add = mlx5e_tls_add, .tls_dev_del = mlx5e_tls_del, + .tls_dev_resync_rx = mlx5e_tls_resync_rx, }; void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { + u32 caps = mlx5_accel_tls_device_caps(priv->mdev); struct net_device *netdev = priv->netdev; if (!mlx5_accel_is_tls_device(priv->mdev)) return; - netdev->features |= NETIF_F_HW_TLS_TX; - netdev->hw_features |= NETIF_F_HW_TLS_TX; + if (caps & MLX5_ACCEL_TLS_TX) { + netdev->features |= NETIF_F_HW_TLS_TX; + netdev->hw_features |= NETIF_F_HW_TLS_TX; + } + + if (caps & MLX5_ACCEL_TLS_RX) { + netdev->features |= NETIF_F_HW_TLS_RX; + netdev->hw_features |= NETIF_F_HW_TLS_RX; + } + + if (!(caps & MLX5_ACCEL_TLS_LRO)) { + netdev->features &= ~NETIF_F_LRO; + netdev->hw_features &= ~NETIF_F_LRO; + } + netdev->tlsdev_ops = &mlx5e_tls_ops; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index b6162178f621..3f5d72163b56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats { atomic64_t tx_tls_drop_resync_alloc; atomic64_t tx_tls_drop_no_sync_data; atomic64_t tx_tls_drop_bypass_required; + atomic64_t rx_tls_drop_resync_request; + atomic64_t rx_tls_resync_request; + atomic64_t rx_tls_resync_reply; + atomic64_t rx_tls_auth_fail; }; struct mlx5e_tls { struct mlx5e_tls_sw_stats sw_stats; }; -struct mlx5e_tls_offload_context { - struct tls_offload_context base; +struct mlx5e_tls_offload_context_tx { + struct tls_offload_context_tx base; u32 expected_seq; __be32 swid; }; -static inline struct mlx5e_tls_offload_context * +static inline struct mlx5e_tls_offload_context_tx * mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) { - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) > - TLS_OFFLOAD_CONTEXT_SIZE); - return container_of(tls_offload_ctx(tls_ctx), - struct mlx5e_tls_offload_context, + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) > + TLS_OFFLOAD_CONTEXT_SIZE_TX); + return container_of(tls_offload_ctx_tx(tls_ctx), + struct mlx5e_tls_offload_context_tx, + base); +} + +struct mlx5e_tls_offload_context_rx { + struct tls_offload_context_rx base; + __be32 handle; +}; + +static inline struct mlx5e_tls_offload_context_rx * +mlx5e_get_tls_rx_context(struct tls_context *tls_ctx) +{ + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) > + TLS_OFFLOAD_CONTEXT_SIZE_RX); + return container_of(tls_offload_ctx_rx(tls_ctx), + struct mlx5e_tls_offload_context_rx, base); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 15aef71d1957..92d37459850e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -33,6 +33,14 @@ #include "en_accel/tls.h" #include "en_accel/tls_rxtx.h" +#include "accel/accel.h" + +#include <net/inet6_hashtables.h> +#include <linux/ipv6.h> + +#define SYNDROM_DECRYPTED 0x30 +#define SYNDROM_RESYNC_REQUEST 0x31 +#define SYNDROM_AUTH_FAILED 0x32 #define SYNDROME_OFFLOAD_REQUIRED 32 #define SYNDROME_SYNC 33 @@ -44,10 +52,26 @@ struct sync_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; -struct mlx5e_tls_metadata { +struct recv_metadata_content { + u8 syndrome; + u8 reserved; + __be32 sync_seq; +} __packed; + +struct send_metadata_content { /* One byte of syndrome followed by 3 bytes of swid */ __be32 syndrome_swid; __be16 first_seq; +} __packed; + +struct mlx5e_tls_metadata { + union { + /* from fpga to host */ + struct recv_metadata_content recv; + /* from host to fpga */ + struct send_metadata_content send; + unsigned char raw[6]; + } __packed content; /* packet type ID field */ __be16 ethertype; } __packed; @@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) 2 * ETH_ALEN); eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); - pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; + pet->content.send.syndrome_swid = + htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; return 0; } -static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context, +static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context, u32 tcp_seq, struct sync_info *info) { int remaining, i = 0, ret = -EINVAL; @@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); memcpy(pet, &syndrome, sizeof(syndrome)); - pet->first_seq = htons(tcp_seq); + pet->content.send.first_seq = htons(tcp_seq); /* MLX5 devices don't care about the checksum partial start, offset * and pseudo header @@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, } static struct sk_buff * -mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, +mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe, u16 *pi, @@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, u16 *pi) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_tls_offload_context *context; + struct mlx5e_tls_offload_context_tx *context; struct tls_context *tls_ctx; u32 expected_seq; int datalen; @@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, out: return skb; } + +static int tls_update_resync_sn(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5e_tls_metadata *mdata) +{ + struct sock *sk = NULL; + struct iphdr *iph; + struct tcphdr *th; + __be32 seq; + + if (mdata->ethertype != htons(ETH_P_IP)) + return -EINVAL; + + iph = (struct iphdr *)(mdata + 1); + + th = ((void *)iph) + iph->ihl * 4; + + if (iph->version == 4) { + sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, netdev->ifindex); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; + + sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, + &ipv6h->saddr, th->source, + &ipv6h->daddr, th->dest, + netdev->ifindex, 0); +#endif + } + if (!sk || sk->sk_state == TCP_TIME_WAIT) { + struct mlx5e_priv *priv = netdev_priv(netdev); + + atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request); + goto out; + } + + skb->sk = sk; + skb->destructor = sock_edemux; + + memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq)); + tls_offload_rx_resync_request(sk, seq); +out: + return 0; +} + +void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, + u32 *cqe_bcnt) +{ + struct mlx5e_tls_metadata *mdata; + struct mlx5e_priv *priv; + + if (!is_metadata_hdr_valid(skb)) + return; + + /* Use the metadata */ + mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); + switch (mdata->content.recv.syndrome) { + case SYNDROM_DECRYPTED: + skb->decrypted = 1; + break; + case SYNDROM_RESYNC_REQUEST: + tls_update_resync_sn(netdev, skb, mdata); + priv = netdev_priv(netdev); + atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); + break; + case SYNDROM_AUTH_FAILED: + /* Authentication failure will be observed and verified by kTLS */ + priv = netdev_priv(netdev); + atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); + break; + default: + /* Bypass the metadata header to others */ + return; + } + + remove_metadata_hdr(skb); + *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 405dfd302225..311667ec71b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_tx_wqe **wqe, u16 *pi); +void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, + u32 *cqe_bcnt); + #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dae4156a710d..712b9766485f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -270,12 +270,9 @@ void mlx5e_update_stats_work(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); + mutex_lock(&priv->state_lock); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->profile->update_stats(priv); - queue_delayed_work(priv->wq, dwork, - msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); - } + priv->profile->update_stats(priv); mutex_unlock(&priv->state_lock); } @@ -352,8 +349,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); - rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info), - GFP_KERNEL, cpu_to_node(c->cpu)); + rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, + sizeof(*rq->mpwqe.info)), + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) return -ENOMEM; @@ -670,7 +668,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err_free: switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -702,7 +700,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -965,15 +963,15 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { - kfree(sq->db.di); + kvfree(sq->db.di); } static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di), - GFP_KERNEL, numa); + sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)), + GFP_KERNEL, numa); if (!sq->db.di) { mlx5e_free_xdpsq_db(sq); return -ENOMEM; @@ -1024,15 +1022,16 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) { - kfree(sq->db.ico_wqe); + kvfree(sq->db.ico_wqe); } static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) { u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe), - GFP_KERNEL, numa); + sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz, + sizeof(*sq->db.ico_wqe)), + GFP_KERNEL, numa); if (!sq->db.ico_wqe) return -ENOMEM; @@ -1077,8 +1076,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq) static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) { - kfree(sq->db.wqe_info); - kfree(sq->db.dma_fifo); + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); } static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) @@ -1086,10 +1085,12 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; - sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo), - GFP_KERNEL, numa); - sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info), - GFP_KERNEL, numa); + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, + sizeof(*sq->db.dma_fifo)), + GFP_KERNEL, numa); + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, + sizeof(*sq->db.wqe_info)), + GFP_KERNEL, numa); if (!sq->db.dma_fifo || !sq->db.wqe_info) { mlx5e_free_txqsq_db(sq); return -ENOMEM; @@ -1893,7 +1894,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, int err; int eqn; - c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1979,7 +1980,7 @@ err_close_icosq_cq: err_napi_del: netif_napi_del(&c->napi); - kfree(c); + kvfree(c); return err; } @@ -2018,7 +2019,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); - kfree(c); + kvfree(c); } #define DEFAULT_FRAG_SIZE (2048) @@ -2276,7 +2277,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, chs->num = chs->params.num_channels; chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); - cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); + cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); if (!chs->c || !cparam) goto err_free; @@ -2287,7 +2288,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - kfree(cparam); + kvfree(cparam); return 0; err_close_channels: @@ -2296,7 +2297,7 @@ err_close_channels: err_free: kfree(chs->c); - kfree(cparam); + kvfree(cparam); chs->num = 0; return err; } @@ -3371,7 +3372,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb, priv); @@ -3405,6 +3406,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + if (mlx5e_is_uplink_rep(priv)) { stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); @@ -4192,7 +4196,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) return mlx5e_xdp_set(dev, xdp->prog); case XDP_QUERY_PROG: xdp->prog_id = mlx5e_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; @@ -4592,6 +4595,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + netdev->features |= NETIF_F_GSO_UDP_L4; + netdev->hw_features |= NETIF_F_GSO_UDP_L4; + netdev->priv_flags |= IFF_UNICAST_FLT; mlx5e_set_netdev_dev_addr(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2b8040a3cdbd..8e3c5b4b90ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv); return 0; @@ -897,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d3a1dd20e41d..1d5295ee863c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -44,6 +44,7 @@ #include "en_rep.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls_rxtx.h" #include "lib/clock.h" static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) @@ -487,7 +488,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->pc += MLX5E_UMR_WQEBBS; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); return 0; @@ -601,6 +602,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (!rq->mpwqe.umr_in_progress) mlx5e_alloc_rx_mpwqe(rq, wq->head); + else + rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2; return false; } @@ -795,6 +798,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct net_device *netdev = rq->netdev; skb->mac_len = ETH_HLEN; + +#ifdef CONFIG_MLX5_EN_TLS + mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt); +#endif + if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); @@ -1261,7 +1269,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } if (unlikely(mpwrq_is_filler_cqe(cqe))) { - rq->stats->mpwqe_filler++; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->mpwqe_filler_cqes++; + stats->mpwqe_filler_strides += cstrides; goto mpwrq_cqe_out; } @@ -1383,6 +1394,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + rq->stats->xdp_tx_cqe += i; + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ @@ -1534,7 +1547,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_free_rx_wqe(rq, wi); goto wq_cyc_pop; } - skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb); + skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); if (unlikely(!skb)) { mlx5e_free_rx_wqe(rq, wi); goto wq_cyc_pop; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1646859974ce..c0507fada0be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, @@ -59,6 +60,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, @@ -67,10 +69,13 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, @@ -80,6 +85,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, }; @@ -133,9 +143,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_tx += rq_stats->xdp_tx; + s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; s->rx_xdp_tx_full += rq_stats->xdp_tx_full; s->rx_wqe_err += rq_stats->wqe_err; - s->rx_mpwqe_filler += rq_stats->mpwqe_filler; + s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; + s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; @@ -145,6 +157,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; + s->rx_congst_umr += rq_stats->congst_umr; + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_arm += ch_stats->arm; + s->ch_aff_change += ch_stats->aff_change; s->ch_eq_rearm += ch_stats->eq_rearm; for (j = 0; j < priv->max_opened_tc; j++) { @@ -157,8 +174,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tso_inner_packets += sq_stats->tso_inner_packets; s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_added_vlan_packets += sq_stats->added_vlan_packets; + s->tx_nop += sq_stats->nop; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; + s->tx_udp_seg_rem += sq_stats->udp_seg_rem; s->tx_queue_dropped += sq_stats->dropped; s->tx_cqe_err += sq_stats->cqe_err; s->tx_recover += sq_stats->recover; @@ -170,6 +189,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_ooo += sq_stats->tls_ooo; s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; #endif + s->tx_cqes += sq_stats->cqes; } } @@ -1107,12 +1127,14 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, @@ -1122,6 +1144,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, }; static const struct counter_desc sq_stats_desc[] = { @@ -1140,11 +1163,16 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; static const struct counter_desc ch_stats_desc[] = { + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 643153bb3607..fc3f66003edd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -61,6 +61,7 @@ struct mlx5e_sw_stats { u64 tx_tso_inner_packets; u64 tx_tso_inner_bytes; u64 tx_added_vlan_packets; + u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; u64 rx_removed_vlan_packets; @@ -70,6 +71,7 @@ struct mlx5e_sw_stats { u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_tx; + u64 rx_xdp_tx_cqe; u64 rx_xdp_tx_full; u64 tx_csum_none; u64 tx_csum_partial; @@ -78,10 +80,13 @@ struct mlx5e_sw_stats { u64 tx_queue_dropped; u64 tx_xmit_more; u64 tx_recover; + u64 tx_cqes; u64 tx_queue_wake; + u64 tx_udp_seg_rem; u64 tx_cqe_err; u64 rx_wqe_err; - u64 rx_mpwqe_filler; + u64 rx_mpwqe_filler_cqes; + u64 rx_mpwqe_filler_strides; u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; @@ -91,6 +96,11 @@ struct mlx5e_sw_stats { u64 rx_cache_empty; u64 rx_cache_busy; u64 rx_cache_waive; + u64 rx_congst_umr; + u64 ch_events; + u64 ch_poll; + u64 ch_arm; + u64 ch_aff_change; u64 ch_eq_rearm; #ifdef CONFIG_MLX5_EN_TLS @@ -169,9 +179,11 @@ struct mlx5e_rq_stats { u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_tx; + u64 xdp_tx_cqe; u64 xdp_tx_full; u64 wqe_err; - u64 mpwqe_filler; + u64 mpwqe_filler_cqes; + u64 mpwqe_filler_strides; u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; @@ -181,6 +193,7 @@ struct mlx5e_rq_stats { u64 cache_empty; u64 cache_busy; u64 cache_waive; + u64 congst_umr; }; struct mlx5e_sq_stats { @@ -196,6 +209,7 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; + u64 udp_seg_rem; #ifdef CONFIG_MLX5_EN_TLS u64 tls_ooo; u64 tls_resync_bytes; @@ -206,11 +220,16 @@ struct mlx5e_sq_stats { u64 dropped; u64 recover; /* dirtied @completion */ - u64 wake ____cacheline_aligned_in_smp; + u64 cqes ____cacheline_aligned_in_smp; + u64 wake; u64 cqe_err; }; struct mlx5e_ch_stats { + u64 events; + u64 poll; + u64 arm; + u64 aff_change; u64 eq_rearm; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f29deb44bf3b..9106ea45e3cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -111,10 +111,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb #endif u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct mlx5e_priv *priv = netdev_priv(dev); - int channel_ix = fallback(dev, skb); + int channel_ix = fallback(dev, skb, NULL); u16 num_channels; int up = 0; @@ -228,7 +229,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) stats->tso_inner_packets++; stats->tso_inner_bytes += skb->len - ihs; } else { - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); stats->tso_packets++; stats->tso_bytes += skb->len - ihs; } @@ -443,12 +447,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) sq = priv->txq2sq[skb_get_queue_mapping(skb)]; mlx5e_sq_fetch_wqe(sq, &wqe, &pi); -#ifdef CONFIG_MLX5_ACCEL /* might send skbs and update wqe and pi */ skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); if (unlikely(!skb)) return NETDEV_TX_OK; -#endif + return mlx5e_sq_xmit(sq, skb, wqe, pi); } @@ -466,6 +469,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { + struct mlx5e_sq_stats *stats; struct mlx5e_txqsq *sq; struct mlx5_cqe64 *cqe; u32 dma_fifo_cc; @@ -483,6 +487,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) if (!cqe) return false; + stats = sq->stats; + npkts = 0; nbytes = 0; @@ -511,7 +517,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) queue_work(cq->channel->priv->wq, &sq->recover.recover_work); } - sq->stats->cqe_err++; + stats->cqe_err++; } do { @@ -556,6 +562,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + stats->cqes += i; + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ @@ -571,7 +579,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) MLX5E_SQ_STOP_ROOM) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); - sq->stats->wake++; + stats->wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 1b17f682693b..4e1f99a98d5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -74,10 +74,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); + struct mlx5e_ch_stats *ch_stats = c->stats; bool busy = false; int work_done = 0; int i; + ch_stats->poll++; + for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); @@ -94,6 +97,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (busy) { if (likely(mlx5e_channel_no_affinity_change(c))) return budget; + ch_stats->aff_change++; if (budget && work_done == budget) work_done--; } @@ -101,6 +105,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (unlikely(!napi_complete_done(napi, work_done))) return work_done; + ch_stats->arm++; + for (i = 0; i < c->num_tc; i++) { mlx5e_handle_tx_dim(&c->sq[i]); mlx5e_cq_arm(&c->sq[i].cq); @@ -118,8 +124,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - cq->event_ctr++; napi_schedule(cq->napi); + cq->event_ctr++; + cq->channel->stats->events++; } void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index c9736238604a..5cf5f2a9d51f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, void *ptr) { + unsigned long flags; int ret; /* TLS metadata format is 1 byte for syndrome followed @@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); idr_preload(GFP_KERNEL); - spin_lock_irq(idr_spinlock); + spin_lock_irqsave(idr_spinlock, flags); ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); - spin_unlock_irq(idr_spinlock); + spin_unlock_irqrestore(idr_spinlock, flags); idr_preload_end(); return ret; @@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr, spin_unlock_irqrestore(idr_spinlock, flags); } +static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_dma_buf *buf, u8 status) +{ + kfree(buf); +} + struct mlx5_teardown_stream_context { struct mlx5_fpga_tls_command_context cmd; u32 swid; @@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_err(fdev, "Teardown stream failed with syndrome = %d", syndrome); - else + else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, - &fdev->tls->idr_spinlock, + &fdev->tls->tx_idr_spinlock, + ctx->swid); + else + mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, + &fdev->tls->rx_idr_spinlock, ctx->swid); } mlx5_fpga_tls_put_command_ctx(cmd); @@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) MLX5_GET(tls_flow, flow, direction_sx)); } +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn) +{ + struct mlx5_fpga_dma_buf *buf; + int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; + void *flow; + void *cmd; + int ret; + + buf = kzalloc(size, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + cmd = (buf + 1); + + rcu_read_lock(); + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); + rcu_read_unlock(); + mlx5_fpga_tls_flow_to_cmd(flow, cmd); + + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); + MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); + MLX5_SET(tls_cmd, cmd, tcp_sn, seq); + MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX); + + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + buf->complete = mlx_tls_kfree_complete; + + ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); + + return ret; +} + static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, u32 swid, gfp_t flags) { @@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, mlx5_fpga_tls_teardown_completion); } -void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags) +void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags, bool direction_sx) { struct mlx5_fpga_tls *tls = mdev->fpga->tls; void *flow; rcu_read_lock(); - flow = idr_find(&tls->tx_idr, swid); + if (direction_sx) + flow = idr_find(&tls->tx_idr, swid); + else + flow = idr_find(&tls->rx_idr, swid); + rcu_read_unlock(); if (!flow) { @@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, * the command context because we might not have received * the tx completion yet. */ - mlx5_fpga_tls_del_tx_flow(fdev->mdev, - MLX5_GET(tls_cmd, tls_cmd, swid), - GFP_ATOMIC); + mlx5_fpga_tls_del_flow(fdev->mdev, + MLX5_GET(tls_cmd, tls_cmd, swid), + GFP_ATOMIC, + MLX5_GET(tls_cmd, tls_cmd, + direction_sx)); } mlx5_fpga_tls_put_command_ctx(cmd); @@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) if (err) goto error; - if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 | - MLX5_ACCEL_TLS_AES_GCM128))) { + if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) { err = -ENOTSUPP; goto error; } @@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) INIT_LIST_HEAD(&tls->pending_cmds); idr_init(&tls->tx_idr); - spin_lock_init(&tls->idr_spinlock); + idr_init(&tls->rx_idr); + spin_lock_init(&tls->tx_idr_spinlock); + spin_lock_init(&tls->rx_idr_spinlock); fdev->tls = tls; return 0; @@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, return 0; } -static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, u32 swid, - u32 tcp_sn) +static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 swid, u32 tcp_sn) { u32 caps = mlx5_fpga_tls_device_caps(mdev); struct mlx5_setup_stream_context *ctx; @@ -533,30 +586,42 @@ out: return ret; } -int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) +int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { struct mlx5_fpga_tls *tls = mdev->fpga->tls; int ret = -ENOMEM; u32 swid; - ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow); + if (direction_sx) + ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, flow); + else + ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, flow); + if (ret < 0) return ret; swid = ret; - MLX5_SET(tls_flow, flow, direction_sx, 1); + MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0); - ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, - start_offload_tcp_sn); + ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, + start_offload_tcp_sn); if (ret && ret != -EINTR) goto free_swid; *p_swid = swid; return 0; free_swid: - mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid); + if (direction_sx) + mlx5_fpga_tls_release_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, swid); + else + mlx5_fpga_tls_release_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, swid); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h index 800a214e4e49..3b2e37bf76fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h @@ -46,15 +46,18 @@ struct mlx5_fpga_tls { struct mlx5_fpga_conn *conn; struct idr tx_idr; - spinlock_t idr_spinlock; /* protects the IDR */ + struct idr rx_idr; + spinlock_t tx_idr_spinlock; /* protects the IDR */ + spinlock_t rx_idr_spinlock; /* protects the IDR */ }; -int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid); +int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx); -void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags); +void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags, bool direction_sx); bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); @@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) return mdev->fpga->tls->caps; } +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn); + #endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 0b47126815b6..2bd4c3184eba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) return !wq->cur_sz; } +static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq) +{ + return wq->fbc.sz_m1 - wq->cur_sz; +} + static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { return mlx5_frag_buf_get_wqe(&wq->fbc, ix); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 0cadcabfe86f..4a1069166119 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -15,11 +15,18 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum_kvdl.o spectrum_acl_tcam.o \ - spectrum_acl.o spectrum_flower.o \ - spectrum_cnt.o spectrum_fid.o \ - spectrum_ipip.o spectrum_acl_flex_actions.o \ - spectrum_mr.o spectrum_mr_tcam.o \ + spectrum1_kvdl.o spectrum2_kvdl.o \ + spectrum_kvdl.o \ + spectrum_acl_tcam.o spectrum_acl_ctcam.o \ + spectrum_acl_atcam.o \ + spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ + spectrum_acl.o \ + spectrum_flower.o spectrum_cnt.o \ + spectrum_fid.o spectrum_ipip.o \ + spectrum_acl_flex_actions.o \ + spectrum_acl_flex_keys.o \ + spectrum1_mr_tcam.o spectrum2_mr_tcam.o \ + spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 3c0d882ba183..9a473628831e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -351,9 +351,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block->first_set = mlxsw_afa_set_create(true); if (!block->first_set) goto err_first_set_create; - block->cur_set = block->first_set; + + /* In case user instructs to have dummy first set, we leave it + * empty here and create another, real, set right away. + */ + if (mlxsw_afa->ops->dummy_first_set) { + block->cur_set = mlxsw_afa_set_create(false); + if (!block->cur_set) + goto err_second_set_create; + block->cur_set->prev = block->first_set; + block->first_set->next = block->cur_set; + } else { + block->cur_set = block->first_set; + } + return block; +err_second_set_create: + mlxsw_afa_set_destroy(block->first_set); err_first_set_create: kfree(block); return NULL; @@ -415,11 +430,31 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_first_set); -u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) +char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block) { - return block->first_set->kvdl_index; + return block->cur_set->ht_key.enc_actions; +} +EXPORT_SYMBOL(mlxsw_afa_block_cur_set); + +u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block) +{ + /* First set is never in KVD linear. So the first set + * with valid KVD linear index is always the second one. + */ + if (WARN_ON(!block->first_set->next)) + return 0; + return block->first_set->next->kvdl_index; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index); + +int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity) +{ + u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block); + + return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv, + kvdl_index, activity); } -EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); +EXPORT_SYMBOL(mlxsw_afa_block_activity_get); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 3a155d104384..69628582fb4b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -45,6 +45,8 @@ struct mlxsw_afa_ops { int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index, char *enc_actions, bool is_first); void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); + int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index, + bool *activity); int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); int (*counter_index_get)(void *priv, unsigned int *p_counter_index); @@ -54,6 +56,7 @@ struct mlxsw_afa_ops { bool ingress, int *p_span_id); void (*mirror_del)(void *priv, u8 local_in_port, int span_id, bool ingress); + bool dummy_first_set; }; struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, @@ -64,7 +67,9 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); -u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); +char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block); +u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block); +int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index b32a00972e83..5f8485c7640e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -43,6 +43,7 @@ struct mlxsw_afk { struct list_head key_info_list; unsigned int max_blocks; + const struct mlxsw_afk_ops *ops; const struct mlxsw_afk_block *blocks; unsigned int blocks_count; }; @@ -69,8 +70,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) } struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, - const struct mlxsw_afk_block *blocks, - unsigned int blocks_count) + const struct mlxsw_afk_ops *ops) { struct mlxsw_afk *mlxsw_afk; @@ -79,8 +79,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, return NULL; INIT_LIST_HEAD(&mlxsw_afk->key_info_list); mlxsw_afk->max_blocks = max_blocks; - mlxsw_afk->blocks = blocks; - mlxsw_afk->blocks_count = blocks_count; + mlxsw_afk->ops = ops; + mlxsw_afk->blocks = ops->blocks; + mlxsw_afk->blocks_count = ops->blocks_count; WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk)); return mlxsw_afk; } @@ -415,61 +416,74 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, } EXPORT_SYMBOL(mlxsw_afk_values_add_buf); -static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) +static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output) { u32 value; value = __mlxsw_item_get32(storage, storage_item, 0); - __mlxsw_item_set32(output_indexed, output_item, 0, value); + __mlxsw_item_set32(output, output_item, 0, value); } -static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) +static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output) { char *storage_data = __mlxsw_item_data(storage, storage_item, 0); - char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); + char *output_data = __mlxsw_item_data(output, output_item, 0); size_t len = output_item->size.bytes; memcpy(output_data, storage_data, len); } -#define MLXSW_AFK_KEY_BLOCK_SIZE 16 - -static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, - int block_index, char *storage, char *output) +static void +mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + char *output, char *storage) { - char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE; const struct mlxsw_item *storage_item = &elinst->info->item; const struct mlxsw_item *output_item = &elinst->item; if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) - mlxsw_afk_encode_u32(storage_item, output_item, - storage, output_indexed); + mlxsw_sp_afk_encode_u32(storage_item, output_item, + storage, output); else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) - mlxsw_afk_encode_buf(storage_item, output_item, - storage, output_indexed); + mlxsw_sp_afk_encode_buf(storage_item, output_item, + storage, output); } -void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, +#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16 + +void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, char *key, char *mask) { + char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; + char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; const struct mlxsw_afk_element_inst *elinst; enum mlxsw_afk_element element; - int block_index; + int block_index, i; + + for (i = 0; i < key_info->blocks_count; i++) { + memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); + memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); + + mlxsw_afk_element_usage_for_each(element, &values->elusage) { + elinst = mlxsw_afk_key_info_elinst_get(key_info, + element, + &block_index); + if (!elinst || block_index != i) + continue; + + mlxsw_sp_afk_encode_one(elinst, block_key, + values->storage.key); + mlxsw_sp_afk_encode_one(elinst, block_mask, + values->storage.mask); + } - mlxsw_afk_element_usage_for_each(element, &values->elusage) { - elinst = mlxsw_afk_key_info_elinst_get(key_info, element, - &block_index); - if (!elinst) - continue; - mlxsw_afk_encode_one(elinst, block_index, - values->storage.key, key); - mlxsw_afk_encode_one(elinst, block_index, - values->storage.mask, mask); + mlxsw_afk->ops->encode_block(block_key, i, key); + mlxsw_afk->ops->encode_block(block_mask, i, mask); } } EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 122506daa586..2ffde915349b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -42,16 +42,20 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, - MLXSW_AFK_ELEMENT_DMAC, - MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_DMAC_32_47, + MLXSW_AFK_ELEMENT_DMAC_0_31, + MLXSW_AFK_ELEMENT_SMAC_32_47, + MLXSW_AFK_ELEMENT_SMAC_0_31, MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP4, - MLXSW_AFK_ELEMENT_DST_IP4, - MLXSW_AFK_ELEMENT_SRC_IP6_HI, - MLXSW_AFK_ELEMENT_SRC_IP6_LO, - MLXSW_AFK_ELEMENT_DST_IP6_HI, - MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_SRC_IP_96_127, + MLXSW_AFK_ELEMENT_SRC_IP_64_95, + MLXSW_AFK_ELEMENT_SRC_IP_32_63, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_96_127, + MLXSW_AFK_ELEMENT_DST_IP_64_95, + MLXSW_AFK_ELEMENT_DST_IP_32_63, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, MLXSW_AFK_ELEMENT_VID, @@ -99,9 +103,11 @@ struct mlxsw_afk_element_info { * define an internal storage geometry. */ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { - MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), - MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6), - MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4), MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), @@ -112,12 +118,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), - MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4), }; #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 @@ -208,9 +216,14 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small, struct mlxsw_afk; +struct mlxsw_afk_ops { + const struct mlxsw_afk_block *blocks; + unsigned int blocks_count; + void (*encode_block)(char *block, int block_index, char *output); +}; + struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, - const struct mlxsw_afk_block *blocks, - unsigned int blocks_count); + const struct mlxsw_afk_ops *ops); void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk); struct mlxsw_afk_key_info; @@ -243,7 +256,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, enum mlxsw_afk_element element, const char *key_value, const char *mask_value, unsigned int len); -void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, +void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, char *key, char *mask); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index d65582325cd5..7461f8fe1133 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -39,6 +39,7 @@ #define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 #define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c #define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20 #define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1877d9f8a11a..596fddfb3850 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -39,6 +39,7 @@ #ifndef _MLXSW_REG_H #define _MLXSW_REG_H +#include <linux/kernel.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/if_vlan.h> @@ -1943,6 +1944,28 @@ static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, mlxsw_reg_cwtpm_ntcp_r_set(payload, profile); } +/* PGCR - Policy-Engine General Configuration Register + * --------------------------------------------------- + * This register configures general Policy-Engine settings. + */ +#define MLXSW_REG_PGCR_ID 0x3001 +#define MLXSW_REG_PGCR_LEN 0x20 + +MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN); + +/* reg_pgcr_default_action_pointer_base + * Default action pointer base. Each region has a default action pointer + * which is equal to default_action_pointer_base + region_id. + * Access: RW + */ +MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24); + +static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base) +{ + MLXSW_REG_ZERO(pgcr, payload); + mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base); +} + /* PPBT - Policy-Engine Port Binding Table * --------------------------------------- * This register is used for configuration of the Port Binding Table. @@ -2132,14 +2155,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4); /* reg_ptar_action_set_type * Type of action set to be used on this region. - * For Spectrum, this is always type 2 - "flexible" + * For Spectrum and Spectrum-2, this is always type 2 - "flexible" * Access: WO */ MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8); +enum mlxsw_reg_ptar_key_type { + MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */ + MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */ +}; + /* reg_ptar_key_type * TCAM key type for the region. - * For Spectrum, this is always type 0x50 - "FLEX_KEY" * Access: WO */ MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8); @@ -2182,13 +2209,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8, MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false); static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op, + enum mlxsw_reg_ptar_key_type key_type, u16 region_size, u16 region_id, const char *tcam_region_info) { MLXSW_REG_ZERO(ptar, payload); mlxsw_reg_ptar_op_set(payload, op); mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */ - mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */ + mlxsw_reg_ptar_key_type_set(payload, key_type); mlxsw_reg_ptar_region_size_set(payload, region_size); mlxsw_reg_ptar_region_id_set(payload, region_id); mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info); @@ -2327,6 +2355,23 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); */ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); +/* reg_pefa_a + * Index in the KVD Linear Centralized Database. + * Activity + * For a new entry: set if ca=0, clear if ca=1 + * Set if a packet lookup has hit on the specific entry + * Access: RO + */ +MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1); + +/* reg_pefa_ca + * Clear activity + * When write: activity is according to this field + * When read: after reading the activity is cleared according to ca + * Access: OP + */ +MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1); + #define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8 /* reg_pefa_flex_action_set @@ -2336,12 +2381,20 @@ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); */ MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN); -static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, +static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca, const char *flex_action_set) { MLXSW_REG_ZERO(pefa, payload); mlxsw_reg_pefa_index_set(payload, index); - mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set); + mlxsw_reg_pefa_ca_set(payload, ca); + if (flex_action_set) + mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, + flex_action_set); +} + +static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a) +{ + *p_a = mlxsw_reg_pefa_a_get(payload); } /* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 @@ -2397,6 +2450,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3); */ MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); +/* reg_ptce2_priority + * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1. + * Note: priority does not have to be unique per rule. + * Within a region, higher priority should have lower offset (no limitation + * between regions in a multi-region). + * Access: RW + */ +MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24); + /* reg_ptce2_tcam_region_info * Opaque object that represents the TCAM region. * Access: Index @@ -2432,15 +2494,244 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, enum mlxsw_reg_ptce2_op op, const char *tcam_region_info, - u16 offset) + u16 offset, u32 priority) { MLXSW_REG_ZERO(ptce2, payload); mlxsw_reg_ptce2_v_set(payload, valid); mlxsw_reg_ptce2_op_set(payload, op); mlxsw_reg_ptce2_offset_set(payload, offset); + mlxsw_reg_ptce2_priority_set(payload, priority); mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); } +/* PERAR - Policy-Engine Region Association Register + * ------------------------------------------------- + * This register associates a hw region for region_id's. Changing on the fly + * is supported by the device. + */ +#define MLXSW_REG_PERAR_ID 0x3026 +#define MLXSW_REG_PERAR_LEN 0x08 + +MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN); + +/* reg_perar_region_id + * Region identifier + * Range 0 .. cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16); + +static inline unsigned int +mlxsw_reg_perar_hw_regions_needed(unsigned int block_num) +{ + return DIV_ROUND_UP(block_num, 4); +} + +/* reg_perar_hw_region + * HW Region + * Range 0 .. cap_max_regions-1 + * Default: hw_region = region_id + * For a 8 key block region, 2 consecutive regions are used + * For a 12 key block region, 3 consecutive regions are used + * Access: RW + */ +MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16); + +static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id, + u16 hw_region) +{ + MLXSW_REG_ZERO(perar, payload); + mlxsw_reg_perar_region_id_set(payload, region_id); + mlxsw_reg_perar_hw_region_set(payload, hw_region); +} + +/* PERCR - Policy-Engine Region Configuration Register + * --------------------------------------------------- + * This register configures the region parameters. The region_id must be + * allocated. + */ +#define MLXSW_REG_PERCR_ID 0x302A +#define MLXSW_REG_PERCR_LEN 0x80 + +MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN); + +/* reg_percr_region_id + * Region identifier. + * Range 0..cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16); + +/* reg_percr_atcam_ignore_prune + * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule. + * Access: RW + */ +MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1); + +/* reg_percr_ctcam_ignore_prune + * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule. + * Access: RW + */ +MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1); + +/* reg_percr_bf_bypass + * Bloom filter bypass. + * 0 - Bloom filter is used (default) + * 1 - Bloom filter is bypassed. The bypass is an OR condition of + * region_id or eRP. See PERPT.bf_bypass + * Access: RW + */ +MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1); + +/* reg_percr_master_mask + * Master mask. Logical OR mask of all masks of all rules of a region + * (both A-TCAM and C-TCAM). When there are no eRPs + * (erpt_pointer_valid = 0), then this provides the mask. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96); + +static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id) +{ + MLXSW_REG_ZERO(percr, payload); + mlxsw_reg_percr_region_id_set(payload, region_id); + mlxsw_reg_percr_atcam_ignore_prune_set(payload, false); + mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false); + mlxsw_reg_percr_bf_bypass_set(payload, true); + memset(payload + 0x20, 0xff, 96); +} + +/* PERERP - Policy-Engine Region eRP Register + * ------------------------------------------ + * This register configures the region eRP. The region_id must be + * allocated. + */ +#define MLXSW_REG_PERERP_ID 0x302B +#define MLXSW_REG_PERERP_LEN 0x1C + +MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN); + +/* reg_pererp_region_id + * Region identifier. + * Range 0..cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16); + +/* reg_pererp_ctcam_le + * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0. + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1); + +/* reg_pererp_erpt_pointer_valid + * erpt_pointer is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1); + +/* reg_pererp_erpt_bank_pointer + * Pointer to eRP table bank. May be modified at any time. + * Range 0..cap_max_erp_table_banks-1 + * Reserved when erpt_pointer_valid = 0 + */ +MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4); + +/* reg_pererp_erpt_pointer + * Pointer to eRP table within the eRP bank. Can be changed for an + * existing region. + * Range 0..cap_max_erp_table_size-1 + * Reserved when erpt_pointer_valid = 0 + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8); + +/* reg_pererp_erpt_vector + * Vector of allowed eRP indexes starting from erpt_pointer within the + * erpt_bank_pointer. Next entries will be in next bank. + * Note that eRP index is used and not eRP ID. + * Reserved when erpt_pointer_valid = 0 + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1); + +/* reg_pererp_master_rp_id + * Master RP ID. When there are no eRPs, then this provides the eRP ID + * for the lookup. Can be changed for an existing region. + * Reserved when erpt_pointer_valid = 1 + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4); + +static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id) +{ + MLXSW_REG_ZERO(pererp, payload); + mlxsw_reg_pererp_region_id_set(payload, region_id); + mlxsw_reg_pererp_ctcam_le_set(payload, true); + mlxsw_reg_pererp_erpt_pointer_valid_set(payload, true); +} + +/* IEDR - Infrastructure Entry Delete Register + * ---------------------------------------------------- + * This register is used for deleting entries from the entry tables. + * It is legitimate to attempt to delete a nonexisting entry (the device will + * respond as a good flow). + */ +#define MLXSW_REG_IEDR_ID 0x3804 +#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */ +#define MLXSW_REG_IEDR_REC_MAX_COUNT 64 +#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \ + MLXSW_REG_IEDR_REC_LEN * \ + MLXSW_REG_IEDR_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN); + +/* reg_iedr_num_rec + * Number of records. + * Access: OP + */ +MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8); + +/* reg_iedr_rec_type + * Resource type. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8, + MLXSW_REG_IEDR_REC_LEN, 0x00, false); + +/* reg_iedr_rec_size + * Size of entries do be deleted. The unit is 1 entry, regardless of entry type. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11, + MLXSW_REG_IEDR_REC_LEN, 0x00, false); + +/* reg_iedr_rec_index_start + * Resource index start. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24, + MLXSW_REG_IEDR_REC_LEN, 0x04, false); + +static inline void mlxsw_reg_iedr_pack(char *payload) +{ + MLXSW_REG_ZERO(iedr, payload); +} + +static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index, + u8 rec_type, u16 rec_size, + u32 rec_index_start) +{ + u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload); + + if (rec_index >= num_rec) + mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1); + mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type); + mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size); + mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -3350,6 +3641,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2, MLXSW_REG_PPCNT_EXT_CNT = 0x5, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, @@ -3508,6 +3800,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); +/* Ethernet RFC 2819 Counter Group */ + +/* reg_ppcnt_ether_stats_pkts64octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64); + +/* reg_ppcnt_ether_stats_pkts65to127octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64); + +/* reg_ppcnt_ether_stats_pkts128to255octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64); + +/* reg_ppcnt_ether_stats_pkts256to511octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); + +/* reg_ppcnt_ether_stats_pkts512to1023octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64); + +/* reg_ppcnt_ether_stats_pkts1024to1518octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64); + +/* reg_ppcnt_ether_stats_pkts1519to2047octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64); + +/* reg_ppcnt_ether_stats_pkts2048to4095octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); + +/* reg_ppcnt_ether_stats_pkts4096to8191octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64); + +/* reg_ppcnt_ether_stats_pkts8192to10239octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64); + /* Ethernet Extended Counter Group Counters */ /* reg_ppcnt_ecn_marked @@ -4338,6 +4692,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8); */ MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6); +/* reg_ritr_if_vrrp_id_ipv6 + * VRRP ID for IPv6 + * Note: Reserved for RIF types other than VLAN, FID and Sub-port. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8); + +/* reg_ritr_if_vrrp_id_ipv4 + * VRRP ID for IPv4 + * Note: Reserved for RIF types other than VLAN, FID and Sub-port. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8); + /* VLAN Interface */ /* reg_ritr_vlan_if_vid @@ -7871,6 +8239,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvmlr), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), + MLXSW_REG(pgcr), MLXSW_REG(ppbt), MLXSW_REG(pacl), MLXSW_REG(pagt), @@ -7879,6 +8248,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(prcr), MLXSW_REG(pefa), MLXSW_REG(ptce2), + MLXSW_REG(perar), + MLXSW_REG(percr), + MLXSW_REG(pererp), + MLXSW_REG(iedr), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index fd9299ccec72..f672a7b71de7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -42,6 +42,8 @@ enum mlxsw_res_id { MLXSW_RES_ID_KVD_SIZE, MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, + MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE, + MLXSW_RES_ID_MAX_KVD_ACTION_SETS, MLXSW_RES_ID_MAX_TRAP_GROUPS, MLXSW_RES_ID_CQE_V0, MLXSW_RES_ID_CQE_V1, @@ -83,6 +85,8 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SIZE] = 0x1001, [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, + [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005, + [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, [MLXSW_RES_ID_CQE_V0] = 0x2210, [MLXSW_RES_ID_CQE_V1] = 0x2211, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 968b88af2ef5..317c92d6496e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -74,17 +74,25 @@ #include "spectrum_span.h" #include "../mlxfw/mlxfw.h" -#define MLXSW_FWREV_MAJOR 13 -#define MLXSW_FWREV_MINOR 1620 -#define MLXSW_FWREV_SUBMINOR 192 -#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) +#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) -#define MLXSW_SP_FW_FILENAME \ - "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ - "." __stringify(MLXSW_FWREV_MINOR) \ - "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" +#define MLXSW_SP1_FWREV_MAJOR 13 +#define MLXSW_SP1_FWREV_MINOR 1620 +#define MLXSW_SP1_FWREV_SUBMINOR 192 -static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; +static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { + .major = MLXSW_SP1_FWREV_MAJOR, + .minor = MLXSW_SP1_FWREV_MINOR, + .subminor = MLXSW_SP1_FWREV_SUBMINOR, +}; + +#define MLXSW_SP1_FW_FILENAME \ + "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ + "." __stringify(MLXSW_SP1_FWREV_MINOR) \ + "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" + +static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; +static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp_driver_version[] = "1.0"; /* tx_hdr_version @@ -338,29 +346,35 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) { const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; + const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; + const char *fw_filename = mlxsw_sp->fw_filename; const struct firmware *firmware; int err; + /* Don't check if driver does not require it */ + if (!req_rev || !fw_filename) + return 0; + /* Validate driver & FW are compatible */ - if (rev->major != MLXSW_FWREV_MAJOR) { + if (rev->major != req_rev->major) { WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", - rev->major, MLXSW_FWREV_MAJOR); + rev->major, req_rev->major); return -EINVAL; } - if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR)) + if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) return 0; dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", rev->major, rev->minor, rev->subminor); dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", - MLXSW_SP_FW_FILENAME); + fw_filename); - err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, + err = request_firmware_direct(&firmware, fw_filename, mlxsw_sp->bus_info->dev); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", - MLXSW_SP_FW_FILENAME); + fw_filename); return err; } @@ -1503,7 +1517,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, static int mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, - struct tcf_block *block, bool ingress) + struct tcf_block *block, bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_block *acl_block; @@ -1518,7 +1533,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOMEM; block_cb = __tcf_block_cb_register(block, mlxsw_sp_setup_tc_block_cb_flower, - mlxsw_sp, acl_block); + mlxsw_sp, acl_block, extack); if (IS_ERR(block_cb)) { err = PTR_ERR(block_cb); goto err_cb_register; @@ -1541,7 +1556,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, err_block_bind: if (!tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); err_cb_register: mlxsw_sp_acl_block_destroy(acl_block); } @@ -1571,7 +1586,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, mlxsw_sp_port, ingress); if (!err && !tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); mlxsw_sp_acl_block_destroy(acl_block); } } @@ -1596,11 +1611,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, switch (f->command) { case TC_BLOCK_BIND: err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, - mlxsw_sp_port); + mlxsw_sp_port, f->extack); if (err) return err; err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, - f->block, ingress); + f->block, ingress, + f->extack); if (err) { tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); return err; @@ -1712,7 +1728,8 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, + sizeof(drvinfo->driver)); strlcpy(drvinfo->version, mlxsw_sp_driver_version, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), @@ -1873,6 +1890,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { + { + .str = "ether_pkts64octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, + }, + { + .str = "ether_pkts65to127octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, + }, + { + .str = "ether_pkts128to255octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, + }, + { + .str = "ether_pkts256to511octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, + }, + { + .str = "ether_pkts512to1023octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, + }, + { + .str = "ether_pkts1024to1518octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, + }, + { + .str = "ether_pkts1519to2047octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, + }, + { + .str = "ether_pkts2048to4095octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, + }, + { + .str = "ether_pkts4096to8191octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, + }, + { + .str = "ether_pkts8192to10239octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) + static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { { .str = "rx_octets_prio", @@ -1964,6 +2027,11 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) mlxsw_sp_port_get_prio_strings(&p, i); @@ -2003,10 +2071,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, int *p_len, enum mlxsw_reg_ppcnt_grp grp) { switch (grp) { - case MLXSW_REG_PPCNT_IEEE_8023_CNT: + case MLXSW_REG_PPCNT_IEEE_8023_CNT: *p_hw_stats = mlxsw_sp_port_hw_stats; *p_len = MLXSW_SP_PORT_HW_STATS_LEN; break; + case MLXSW_REG_PPCNT_RFC_2819_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + break; case MLXSW_REG_PPCNT_PRIO_CNT: *p_hw_stats = mlxsw_sp_port_hw_prio_stats; *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; @@ -2056,6 +2128,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, data, data_index); data_index = MLXSW_SP_PORT_HW_STATS_LEN; + /* RFC 2819 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + /* Per-Priority Counters */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, @@ -3371,6 +3448,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -3757,6 +3836,36 @@ err_fids_init: return err; } +static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; + mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; + mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); +} + +static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); +} + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -3777,7 +3886,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_kvdl_fini(mlxsw_sp); } -static const struct mlxsw_config_profile mlxsw_sp_config_profile = { +static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { .used_max_mid = 1, .max_mid = MLXSW_SP_MID_MAX, .used_flood_tables = 1, @@ -3803,6 +3912,28 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { }, }; +static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { + .used_max_mid = 1, + .max_mid = MLXSW_SP_MID_MAX, + .used_flood_tables = 1, + .used_flood_mode = 1, + .flood_mode = 3, + .max_fid_offset_flood_tables = 3, + .fid_offset_flood_table_size = VLAN_N_VID - 1, + .max_fid_flood_tables = 3, + .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, + .used_max_ib_mc = 1, + .max_ib_mc = 0, + .used_max_pkey = 1, + .max_pkey = 0, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_ETH, + } + }, +}; + static void mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, struct devlink_resource_size_params *kvd_size_params, @@ -3839,7 +3970,7 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, DEVLINK_RESOURCE_UNIT_ENTRY); } -static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) +static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) { struct devlink *devlink = priv_to_devlink(mlxsw_core); struct devlink_resource_size_params hash_single_size_params; @@ -3850,7 +3981,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) const struct mlxsw_config_profile *profile; int err; - profile = &mlxsw_sp_config_profile; + profile = &mlxsw_sp1_config_profile; if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) return -EIO; @@ -3876,7 +4007,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) if (err) return err; - err = mlxsw_sp_kvdl_resources_register(mlxsw_core); + err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); if (err) return err; @@ -3905,6 +4036,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) return 0; } +static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_sp1_resources_kvd_register(mlxsw_core); +} + +static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) +{ + return 0; +} + static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, u64 *p_single_size, u64 *p_double_size, @@ -3960,10 +4101,10 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, return 0; } -static struct mlxsw_driver mlxsw_sp_driver = { - .kind = mlxsw_sp_driver_name, +static struct mlxsw_driver mlxsw_sp1_driver = { + .kind = mlxsw_sp1_driver_name, .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp_init, + .init = mlxsw_sp1_init, .fini = mlxsw_sp_fini, .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, @@ -3979,10 +4120,35 @@ static struct mlxsw_driver mlxsw_sp_driver = { .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .txhdr_construct = mlxsw_sp_txhdr_construct, - .resources_register = mlxsw_sp_resources_register, + .resources_register = mlxsw_sp1_resources_register, .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, .txhdr_len = MLXSW_TXHDR_LEN, - .profile = &mlxsw_sp_config_profile, + .profile = &mlxsw_sp1_config_profile, + .res_query_enabled = true, +}; + +static struct mlxsw_driver mlxsw_sp2_driver = { + .kind = mlxsw_sp2_driver_name, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp2_init, + .fini = mlxsw_sp_fini, + .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .resources_register = mlxsw_sp2_resources_register, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp2_config_profile, .res_query_enabled = true, }; @@ -4397,7 +4563,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, if (!is_vlan_dev(upper_dev) && !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && - !netif_is_ovs_master(upper_dev)) { + !netif_is_ovs_master(upper_dev) && + !netif_is_macvlan(upper_dev)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } @@ -4423,6 +4590,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); return -EINVAL; @@ -4461,6 +4633,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); else mlxsw_sp_port_ovs_leave(mlxsw_sp_port); + } else if (netif_is_macvlan(upper_dev)) { + if (!info->linking) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); } break; } @@ -4545,8 +4720,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!netif_is_bridge_master(upper_dev)) { - NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers"); + if (!netif_is_bridge_master(upper_dev) && + !netif_is_macvlan(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } if (!info->linking) @@ -4558,6 +4734,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -4571,6 +4752,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, mlxsw_sp_port_bridge_leave(mlxsw_sp_port, vlan_dev, upper_dev); + } else if (netif_is_macvlan(upper_dev)) { + if (!info->linking) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); } else { err = -EINVAL; WARN_ON(1); @@ -4620,6 +4804,64 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return 0; } +static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *upper_dev; + + if (!mlxsw_sp) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EOPNOTSUPP; + } + if (!info->linking) + break; + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (info->linking) + break; + if (netif_is_macvlan(upper_dev)) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); + break; + } + + return 0; +} + +static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + + if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + + return -EOPNOTSUPP; +} + static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) { struct netdev_notifier_changeupper_info *info = ptr; @@ -4661,6 +4903,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); else if (is_vlan_dev(dev)) err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); + else if (netif_is_macvlan(dev)) + err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); return notifier_from_errno(err); } @@ -4681,14 +4927,24 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { .notifier_call = mlxsw_sp_inet6addr_event, }; -static const struct pci_device_id mlxsw_sp_pci_id_table[] = { +static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, {0, }, }; -static struct pci_driver mlxsw_sp_pci_driver = { - .name = mlxsw_sp_driver_name, - .id_table = mlxsw_sp_pci_id_table, +static struct pci_driver mlxsw_sp1_pci_driver = { + .name = mlxsw_sp1_driver_name, + .id_table = mlxsw_sp1_pci_id_table, +}; + +static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sp2_pci_driver = { + .name = mlxsw_sp2_driver_name, + .id_table = mlxsw_sp2_pci_id_table, }; static int __init mlxsw_sp_module_init(void) @@ -4700,19 +4956,31 @@ static int __init mlxsw_sp_module_init(void) register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); - err = mlxsw_core_driver_register(&mlxsw_sp_driver); + err = mlxsw_core_driver_register(&mlxsw_sp1_driver); + if (err) + goto err_sp1_core_driver_register; + + err = mlxsw_core_driver_register(&mlxsw_sp2_driver); + if (err) + goto err_sp2_core_driver_register; + + err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); if (err) - goto err_core_driver_register; + goto err_sp1_pci_driver_register; - err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); + err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); if (err) - goto err_pci_driver_register; + goto err_sp2_pci_driver_register; return 0; -err_pci_driver_register: - mlxsw_core_driver_unregister(&mlxsw_sp_driver); -err_core_driver_register: +err_sp2_pci_driver_register: + mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); +err_sp1_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp2_driver); +err_sp2_core_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp1_driver); +err_sp1_core_driver_register: unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4722,8 +4990,10 @@ err_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { - mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); - mlxsw_core_driver_unregister(&mlxsw_sp_driver); + mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); + mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); + mlxsw_core_driver_unregister(&mlxsw_sp2_driver); + mlxsw_core_driver_unregister(&mlxsw_sp1_driver); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4736,5 +5006,6 @@ module_exit(mlxsw_sp_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Spectrum driver"); -MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); -MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); +MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); +MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); +MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4a519d8edec8..016058961542 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -145,6 +145,9 @@ struct mlxsw_sp_acl; struct mlxsw_sp_counter_pool; struct mlxsw_sp_fid_core; struct mlxsw_sp_kvdl; +struct mlxsw_sp_kvdl_ops; +struct mlxsw_sp_mr_tcam_ops; +struct mlxsw_sp_acl_tcam_ops; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -168,6 +171,13 @@ struct mlxsw_sp { struct mlxsw_sp_span_entry *entries; int entries_count; } span; + const struct mlxsw_fw_rev *req_rev; + const char *fw_filename; + const struct mlxsw_sp_kvdl_ops *kvdl_ops; + const struct mlxsw_afa_ops *afa_ops; + const struct mlxsw_afk_ops *afk_ops; + const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; + const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; }; static inline struct mlxsw_sp_upper * @@ -407,6 +417,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); +void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, @@ -435,15 +447,62 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); /* spectrum_kvdl.c */ +enum mlxsw_sp_kvdl_entry_type { + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, +}; + +static inline unsigned int +mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) +{ + switch (type) { + case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ + default: + return 1; + } +} + +struct mlxsw_sp_kvdl_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index); + void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index); + int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count); + int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv); +}; + int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, - u32 *p_entry_index); -void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); -int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, - unsigned int entry_count, - unsigned int *p_alloc_size); -int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core); +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index); +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index); +int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count); + +/* spectrum1_kvdl.c */ +extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops; +int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core); + +/* spectrum2_kvdl.c */ +extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops; struct mlxsw_sp_acl_rule_info { unsigned int priority; @@ -452,44 +511,14 @@ struct mlxsw_sp_acl_rule_info { unsigned int counter_index; }; -enum mlxsw_sp_acl_profile { - MLXSW_SP_ACL_PROFILE_FLOWER, -}; - -struct mlxsw_sp_acl_profile_ops { - size_t ruleset_priv_size; - int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, - void *priv, void *ruleset_priv); - void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); - int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); - void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); - u16 (*ruleset_group_id)(void *ruleset_priv); - size_t rule_priv_size; - int (*rule_add)(struct mlxsw_sp *mlxsw_sp, - void *ruleset_priv, void *rule_priv, - struct mlxsw_sp_acl_rule_info *rulei); - void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); - int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, - bool *activity); -}; - -struct mlxsw_sp_acl_ops { - size_t priv_size; - int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); - void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); - const struct mlxsw_sp_acl_profile_ops * - (*profile_ops)(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_sp_acl_profile profile); -}; - struct mlxsw_sp_acl_block; struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ +enum mlxsw_sp_acl_profile { + MLXSW_SP_ACL_PROFILE_FLOWER, +}; + struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block); unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block); @@ -582,7 +611,51 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); /* spectrum_acl_tcam.c */ -extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; +struct mlxsw_sp_acl_tcam; +struct mlxsw_sp_acl_tcam_region; + +struct mlxsw_sp_acl_tcam_ops { + enum mlxsw_reg_ptar_key_type key_type; + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *tcam); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + size_t region_priv_size; + int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, + struct mlxsw_sp_acl_tcam_region *region); + void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); + int (*region_associate)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region); + size_t chunk_priv_size; + void (*chunk_init)(void *region_priv, void *chunk_priv, + unsigned int priority); + void (*chunk_fini)(void *chunk_priv); + size_t entry_priv_size; + int (*entry_add)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*entry_del)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv); + int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity); +}; + +/* spectrum1_acl_tcam.c */ +extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops; + +/* spectrum2_acl_tcam.c */ +extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops; + +/* spectrum_acl_flex_actions.c */ +extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops; +extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops; + +/* spectrum_acl_flex_keys.c */ +extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; +extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, @@ -631,4 +704,40 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp); +/* spectrum_mr.c */ +enum mlxsw_sp_mr_route_prio { + MLXSW_SP_MR_ROUTE_PRIO_SG, + MLXSW_SP_MR_ROUTE_PRIO_STARG, + MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, + __MLXSW_SP_MR_ROUTE_PRIO_MAX +}; + +#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1) + +struct mlxsw_sp_mr_route_key; + +struct mlxsw_sp_mr_tcam_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(void *priv); + size_t route_priv_size; + int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio); + void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key); + int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block); +}; + +/* spectrum1_mr_tcam.c */ +extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops; + +/* spectrum2_mr_tcam.c */ +extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c new file mode 100644 index 000000000000..d339ec43d79c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -0,0 +1,253 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +struct mlxsw_sp1_acl_tcam_region { + struct mlxsw_sp_acl_ctcam_region cregion; + struct mlxsw_sp_acl_tcam_region *region; + struct { + struct mlxsw_sp_acl_ctcam_chunk cchunk; + struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_sp_acl_rule_info *rulei; + } catchall; +}; + +struct mlxsw_sp1_acl_tcam_chunk { + struct mlxsw_sp_acl_ctcam_chunk cchunk; +}; + +struct mlxsw_sp1_acl_tcam_entry { + struct mlxsw_sp_acl_ctcam_entry centry; +}; + +static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *tcam) +{ + return 0; +} + +static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ +} + +static int +mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, + ®ion->catchall.cchunk, + MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rulei)) { + err = PTR_ERR(rulei); + goto err_rulei_create; + } + err = mlxsw_sp_acl_rulei_act_continue(rulei); + if (WARN_ON(err)) + goto err_rulei_act_continue; + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + ®ion->catchall.cchunk, + ®ion->catchall.centry, + rulei, false); + if (err) + goto err_entry_add; + region->catchall.rulei = rulei; + return 0; + +err_entry_add: +err_rulei_commit: +err_rulei_act_continue: + mlxsw_sp_acl_rulei_destroy(rulei); +err_rulei_create: + mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); + return err; +} + +static void +mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + ®ion->catchall.cchunk, + ®ion->catchall.centry); + mlxsw_sp_acl_rulei_destroy(rulei); + mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); +} + +static int +mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + struct mlxsw_sp_acl_tcam_region *_region) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + int err; + + err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, + _region); + if (err) + return err; + err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region); + if (err) + goto err_catchall_add; + region->region = _region; + return 0; + +err_catchall_add: + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); + return err; +} + +static void +mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + + mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region); + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); +} + +static int +mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + return 0; +} + +static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, + unsigned int priority) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, + priority); +} + +static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv) +{ + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); +} + +static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + + return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry, + rulei, false); +} + +static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry); +} + +static int +mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *_region, + unsigned int offset, + bool *activity) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + int err; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, + _region->tcam_region_info, offset, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + if (err) + return err; + *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); + return 0; +} + +static int +mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + unsigned int offset; + + offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry); + return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp, + region->region, + offset, activity); +} + +const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = { + .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX, + .priv_size = 0, + .init = mlxsw_sp1_acl_tcam_init, + .fini = mlxsw_sp1_acl_tcam_fini, + .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region), + .region_init = mlxsw_sp1_acl_tcam_region_init, + .region_fini = mlxsw_sp1_acl_tcam_region_fini, + .region_associate = mlxsw_sp1_acl_tcam_region_associate, + .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk), + .chunk_init = mlxsw_sp1_acl_tcam_chunk_init, + .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini, + .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry), + .entry_add = mlxsw_sp1_acl_tcam_entry_add, + .entry_del = mlxsw_sp1_acl_tcam_entry_del, + .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c new file mode 100644 index 000000000000..0d4583894a97 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c @@ -0,0 +1,459 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> + +#include "spectrum.h" + +#define MLXSW_SP1_KVDL_SINGLE_BASE 0 +#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384 +#define MLXSW_SP1_KVDL_SINGLE_END \ + (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1) + +#define MLXSW_SP1_KVDL_CHUNKS_BASE \ + (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE) +#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152 +#define MLXSW_SP1_KVDL_CHUNKS_END \ + (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1) + +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \ + (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE) +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \ + (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE) +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \ + (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1) + +#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1 +#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32 +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 + +struct mlxsw_sp1_kvdl_part_info { + unsigned int part_index; + unsigned int start_index; + unsigned int end_index; + unsigned int alloc_size; + enum mlxsw_sp_resource_id resource_id; +}; + +enum mlxsw_sp1_kvdl_part_id { + MLXSW_SP1_KVDL_PART_ID_SINGLE, + MLXSW_SP1_KVDL_PART_ID_CHUNKS, + MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS, +}; + +#define MLXSW_SP1_KVDL_PART_INFO(id) \ +[MLXSW_SP1_KVDL_PART_ID_##id] = { \ + .start_index = MLXSW_SP1_KVDL_##id##_BASE, \ + .end_index = MLXSW_SP1_KVDL_##id##_END, \ + .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \ + .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ +} + +static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = { + MLXSW_SP1_KVDL_PART_INFO(SINGLE), + MLXSW_SP1_KVDL_PART_INFO(CHUNKS), + MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS), +}; + +#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info) + +struct mlxsw_sp1_kvdl_part { + struct mlxsw_sp1_kvdl_part_info info; + unsigned long usage[0]; /* Entries */ +}; + +struct mlxsw_sp1_kvdl { + struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN]; +}; + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl, + unsigned int alloc_size) +{ + struct mlxsw_sp1_kvdl_part *part, *min_part = NULL; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (alloc_size <= part->info.alloc_size && + (!min_part || + part->info.alloc_size <= min_part->info.alloc_size)) + min_part = part; + } + + return min_part ?: ERR_PTR(-ENOBUFS); +} + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index) +{ + struct mlxsw_sp1_kvdl_part *part; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (kvdl_index >= part->info.start_index && + kvdl_index <= part->info.end_index) + return part; + } + + return ERR_PTR(-EINVAL); +} + +static u32 +mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info, + unsigned int entry_index) +{ + return info->start_index + entry_index * info->alloc_size; +} + +static unsigned int +mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info, + u32 kvdl_index) +{ + return (kvdl_index - info->start_index) / info->alloc_size; +} + +static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part, + u32 *p_kvdl_index) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int entry_index, nr_entries; + + nr_entries = (info->end_index - info->start_index + 1) / + info->alloc_size; + entry_index = find_first_zero_bit(part->usage, nr_entries); + if (entry_index == nr_entries) + return -ENOBUFS; + __set_bit(entry_index, part->usage); + + *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index); + + return 0; +} + +static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part, + u32 kvdl_index) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int entry_index; + + entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index); + __clear_bit(entry_index, part->usage); +} + +static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + u32 *p_entry_index) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + /* Find partition with smallest allocation size satisfying the + * requested size. + */ + part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index); +} + +static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index); + if (IS_ERR(part)) + return; + mlxsw_sp1_kvdl_part_free(part, entry_index); +} + +static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_size) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + *p_alloc_size = part->info.alloc_size; + + return 0; +} + +static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part, + struct mlxsw_sp1_kvdl_part *part_prev, + unsigned int size) +{ + if (!part_prev) { + part->info.end_index = size - 1; + } else { + part->info.start_index = part_prev->info.end_index + 1; + part->info.end_index = part->info.start_index + size - 1; + } +} + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp1_kvdl_part_info *info, + struct mlxsw_sp1_kvdl_part *part_prev) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl_part *part; + bool need_update = true; + unsigned int nr_entries; + size_t usage_size; + u64 resource_size; + int err; + + err = devlink_resource_size_get(devlink, info->resource_id, + &resource_size); + if (err) { + need_update = false; + resource_size = info->end_index - info->start_index + 1; + } + + nr_entries = div_u64(resource_size, info->alloc_size); + usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); + part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); + if (!part) + return ERR_PTR(-ENOMEM); + + memcpy(&part->info, info, sizeof(part->info)); + + if (need_update) + mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size); + return part; +} + +static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part) +{ + kfree(part); +} + +static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_kvdl *kvdl) +{ + const struct mlxsw_sp1_kvdl_part_info *info; + struct mlxsw_sp1_kvdl_part *part_prev = NULL; + int err, i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp1_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info, + part_prev); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); + goto err_kvdl_part_init; + } + part_prev = kvdl->parts[i]; + } + return 0; + +err_kvdl_part_init: + for (i--; i >= 0; i--) + mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]); + return err; +} + +static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl) +{ + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]); +} + +static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int nr_entries; + int bit = -1; + u64 occ = 0; + + nr_entries = (info->end_index - + info->start_index + 1) / + info->alloc_size; + while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) + < nr_entries) + occ += info->alloc_size; + return occ; +} + +static u64 mlxsw_sp1_kvdl_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + u64 occ = 0; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) + occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]); + + return occ; +} + +static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl *kvdl = priv; + int err; + + err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl); + if (err) + return err; + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR, + mlxsw_sp1_kvdl_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + mlxsw_sp1_kvdl_single_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + mlxsw_sp1_kvdl_chunks_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + mlxsw_sp1_kvdl_large_chunks_occ_get, + kvdl); + return 0; +} + +static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl *kvdl = priv; + + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR); + mlxsw_sp1_kvdl_parts_fini(kvdl); +} + +const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = { + .priv_size = sizeof(struct mlxsw_sp1_kvdl), + .init = mlxsw_sp1_kvdl_init, + .fini = mlxsw_sp1_kvdl_fini, + .alloc = mlxsw_sp1_kvdl_alloc, + .free = mlxsw_sp1_kvdl_free, + .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query, +}; + +int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + static struct devlink_resource_size_params size_params; + u32 kvdl_max_size; + int err; + + kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, + MLXSW_SP1_KVDL_SINGLE_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, + MLXSW_SP1_KVDL_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c new file mode 100644 index 000000000000..fc649fe7134e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c @@ -0,0 +1,374 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/parman.h> + +#include "reg.h" +#include "spectrum.h" +#include "core_acl_flex_actions.h" +#include "spectrum_mr.h" + +struct mlxsw_sp1_mr_tcam_region { + struct mlxsw_sp *mlxsw_sp; + enum mlxsw_reg_rtar_key_type rtar_key_type; + struct parman *parman; + struct parman_prio *parman_prios; +}; + +struct mlxsw_sp1_mr_tcam { + struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; +}; + +struct mlxsw_sp1_mr_tcam_route { + struct parman_item parman_item; + struct parman_prio *parman_prio; +}; + +static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, + struct parman_item *parman_item, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + ntohl(key->group.addr4), + ntohl(key->group_mask.addr4), + ntohl(key->source.addr4), + ntohl(key->source_mask.addr4), + mlxsw_afa_block_first_set(afa_block)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + key->group.addr6, + key->group_mask.addr6, + key->source.addr6, + key->source_mask.addr6, + mlxsw_afa_block_first_set(afa_block)); + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, + struct parman_item *parman_item, + struct mlxsw_sp_mr_route_key *key) +{ + struct in6_addr zero_addr = IN6ADDR_ANY_INIT; + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, + key->vrid, 0, 0, 0, 0, 0, 0, NULL); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, + key->vrid, 0, 0, zero_addr, zero_addr, + zero_addr, zero_addr, NULL); + break; + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static struct mlxsw_sp1_mr_tcam_region * +mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam, + enum mlxsw_sp_l3proto proto) +{ + return &mr_tcam->tcam_regions[proto]; +} + +static int +mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam, + struct mlxsw_sp1_mr_tcam_route *route, + struct mlxsw_sp_mr_route_key *key, + enum mlxsw_sp_mr_route_prio prio) +{ + struct mlxsw_sp1_mr_tcam_region *tcam_region; + int err; + + tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto); + err = parman_item_add(tcam_region->parman, + &tcam_region->parman_prios[prio], + &route->parman_item); + if (err) + return err; + + route->parman_prio = &tcam_region->parman_prios[prio]; + return 0; +} + +static void +mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam, + struct mlxsw_sp1_mr_tcam_route *route, + struct mlxsw_sp_mr_route_key *key) +{ + struct mlxsw_sp1_mr_tcam_region *tcam_region; + + tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto); + parman_item_remove(tcam_region->parman, + route->parman_prio, &route->parman_item); +} + +static int +mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + int err; + + err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route, + key, prio); + if (err) + return err; + + err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + key, afa_block); + if (err) + goto err_route_replace; + return 0; + +err_route_replace: + mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); + return err; +} + +static void +mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + + mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key); + mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); +} + +static int +mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + + return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + key, afa_block); +} + +#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16 + +static int +mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE, + mr_tcam_region->rtar_key_type, + MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void +mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE, + mr_tcam_region->rtar_key_type, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE, + mr_tcam_region->rtar_key_type, new_count); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rrcr_pl[MLXSW_REG_RRCR_LEN]; + + mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE, + from_index, count, + mr_tcam_region->rtar_key_type, to_index); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl); +} + +static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = { + .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp1_mr_tcam_region_parman_resize, + .move = mlxsw_sp1_mr_tcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static int +mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region, + enum mlxsw_reg_rtar_key_type rtar_key_type) +{ + struct parman_prio *parman_prios; + struct parman *parman; + int err; + int i; + + mr_tcam_region->rtar_key_type = rtar_key_type; + mr_tcam_region->mlxsw_sp = mlxsw_sp; + + err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region); + if (err) + return err; + + parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops, + mr_tcam_region); + if (!parman) { + err = -ENOMEM; + goto err_parman_create; + } + mr_tcam_region->parman = parman; + + parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, + sizeof(*parman_prios), GFP_KERNEL); + if (!parman_prios) { + err = -ENOMEM; + goto err_parman_prios_alloc; + } + mr_tcam_region->parman_prios = parman_prios; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_init(mr_tcam_region->parman, + &mr_tcam_region->parman_prios[i], i); + return 0; + +err_parman_prios_alloc: + parman_destroy(parman); +err_parman_create: + mlxsw_sp1_mr_tcam_region_free(mr_tcam_region); + return err; +} + +static void +mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + int i; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_fini(&mr_tcam_region->parman_prios[i]); + kfree(mr_tcam_region->parman_prios); + parman_destroy(mr_tcam_region->parman); + mlxsw_sp1_mr_tcam_region_free(mr_tcam_region); +} + +static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + u32 rtar_key; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) + return -EIO; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; + err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV4], + rtar_key); + if (err) + return err; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; + err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV6], + rtar_key); + if (err) + goto err_ipv6_region_init; + + return 0; + +err_ipv6_region_init: + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + return err; +} + +static void mlxsw_sp1_mr_tcam_fini(void *priv) +{ + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); +} + +const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp1_mr_tcam), + .init = mlxsw_sp1_mr_tcam_init, + .fini = mlxsw_sp1_mr_tcam_fini, + .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route), + .route_create = mlxsw_sp1_mr_tcam_route_create, + .route_destroy = mlxsw_sp1_mr_tcam_route_destroy, + .route_update = mlxsw_sp1_mr_tcam_route_update, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c new file mode 100644 index 000000000000..d7f1fb35ea2a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -0,0 +1,222 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> + +#include "spectrum.h" +#include "spectrum_acl_tcam.h" +#include "core_acl_flex_actions.h" + +struct mlxsw_sp2_acl_tcam { + u32 kvdl_index; + unsigned int kvdl_count; +}; + +struct mlxsw_sp2_acl_tcam_region { + struct mlxsw_sp_acl_ctcam_region cregion; +}; + +struct mlxsw_sp2_acl_tcam_chunk { + struct mlxsw_sp_acl_ctcam_chunk cchunk; +}; + +struct mlxsw_sp2_acl_tcam_entry { + struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_afa_block *act_block; +}; + +static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *_tcam) +{ + struct mlxsw_sp2_acl_tcam *tcam = priv; + struct mlxsw_afa_block *afa_block; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + char pgcr_pl[MLXSW_REG_PGCR_LEN]; + char *enc_actions; + int i; + int err; + + tcam->kvdl_count = _tcam->max_regions; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, &tcam->kvdl_index); + if (err) + return err; + + /* Create flex action block, set default action (continue) + * but don't commit. We need just the current set encoding + * to be written using PEFA register to all indexes for all regions. + */ + afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); + if (!afa_block) { + err = -ENOMEM; + goto err_afa_block; + } + err = mlxsw_afa_block_continue(afa_block); + if (WARN_ON(err)) + goto err_afa_block_continue; + enc_actions = mlxsw_afa_block_cur_set(afa_block); + + for (i = 0; i < tcam->kvdl_count; i++) { + mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i, + true, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + } + mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl); + if (err) + goto err_pgcr_write; + + mlxsw_afa_block_destroy(afa_block); + return 0; + +err_pgcr_write: +err_pefa_write: +err_afa_block_continue: + mlxsw_afa_block_destroy(afa_block); +err_afa_block: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, tcam->kvdl_index); + return err; +} + +static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_acl_tcam *tcam = priv; + + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, tcam->kvdl_index); +} + +static int +mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + struct mlxsw_sp_acl_tcam_region *_region) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + int err; + + err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region); + if (err) + return err; + return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, + _region); +} + +static void +mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); +} + +static int +mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); +} + +static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, + unsigned int priority) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, + priority); +} + +static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv) +{ + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); +} + +static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + entry->act_block = rulei->act_block; + return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry, + rulei, true); +} + +static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry); +} + +static int +mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity) +{ + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + return mlxsw_afa_block_activity_get(entry->act_block, activity); +} + +const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = { + .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2, + .priv_size = sizeof(struct mlxsw_sp2_acl_tcam), + .init = mlxsw_sp2_acl_tcam_init, + .fini = mlxsw_sp2_acl_tcam_fini, + .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region), + .region_init = mlxsw_sp2_acl_tcam_region_init, + .region_fini = mlxsw_sp2_acl_tcam_region_fini, + .region_associate = mlxsw_sp2_acl_tcam_region_associate, + .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk), + .chunk_init = mlxsw_sp2_acl_tcam_chunk_init, + .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini, + .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry), + .entry_add = mlxsw_sp2_acl_tcam_entry_add, + .entry_del = mlxsw_sp2_acl_tcam_entry_del, + .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c new file mode 100644 index 000000000000..bacf7483c3fb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -0,0 +1,302 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> + +#include "spectrum.h" +#include "core.h" +#include "reg.h" +#include "resources.h" + +struct mlxsw_sp2_kvdl_part_info { + u8 res_type; + /* For each defined partititon we need to know how many + * usage bits we need and how many indexes there are + * represented by a single bit. This could be got from FW + * querying appropriate resources. So have the resource + * ids for for this purpose in partition definition. + */ + enum mlxsw_res_id usage_bit_count_res_id; + enum mlxsw_res_id index_range_res_id; +}; + +#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \ + _usage_bit_count_res_id, _index_range_res_id) \ +[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \ + .res_type = _res_type, \ + .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \ + .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \ +} + +static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { + MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE), + MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS, + MAX_KVD_ACTION_SETS), + MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), +}; + +#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info) + +struct mlxsw_sp2_kvdl_part { + const struct mlxsw_sp2_kvdl_part_info *info; + unsigned int usage_bit_count; + unsigned int indexes_per_usage_bit; + unsigned int last_allocated_bit; + unsigned long usage[0]; /* Usage bits */ +}; + +struct mlxsw_sp2_kvdl { + struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN]; +}; + +static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part, + unsigned int bit_count, + unsigned int *p_bit) +{ + unsigned int start_bit; + unsigned int bit; + unsigned int i; + bool wrap = false; + + start_bit = part->last_allocated_bit + 1; + if (start_bit == part->usage_bit_count) + start_bit = 0; + bit = start_bit; +again: + bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit); + if (!wrap && bit + bit_count >= part->usage_bit_count) { + wrap = true; + bit = 0; + goto again; + } + if (wrap && bit + bit_count >= start_bit) + return -ENOBUFS; + for (i = 0; i < bit_count; i++) { + if (test_bit(bit + i, part->usage)) { + bit += bit_count; + goto again; + } + } + *p_bit = bit; + return 0; +} + +static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part, + unsigned int size, + u32 *p_kvdl_index) +{ + unsigned int bit_count; + unsigned int bit; + unsigned int i; + int err; + + bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit); + err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit); + if (err) + return err; + for (i = 0; i < bit_count; i++) + __set_bit(bit + i, part->usage); + *p_kvdl_index = bit * part->indexes_per_usage_bit; + return 0; +} + +static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type, + u16 size, u32 kvdl_index) +{ + char *iedr_pl; + int err; + + iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL); + if (!iedr_pl) + return -ENOMEM; + + mlxsw_reg_iedr_pack(iedr_pl); + mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl); + kfree(iedr_pl); + return err; +} + +static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp2_kvdl_part *part, + unsigned int size, u32 kvdl_index) +{ + unsigned int bit_count; + unsigned int bit; + unsigned int i; + int err; + + /* We need to ask FW to delete previously used KVD linear index */ + err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type, + size, kvdl_index); + if (err) + return; + + bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit); + bit = kvdl_index / part->indexes_per_usage_bit; + for (i = 0; i < bit_count; i++) + __clear_bit(bit + i, part->usage); +} + +static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + u32 *p_entry_index) +{ + unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type); + struct mlxsw_sp2_kvdl *kvdl = priv; + struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type]; + + return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index); +} + +static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + int entry_index) +{ + unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type); + struct mlxsw_sp2_kvdl *kvdl = priv; + struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type]; + + return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index); +} + +static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count) +{ + *p_alloc_count = entry_count; + return 0; +} + +static struct mlxsw_sp2_kvdl_part * +mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp2_kvdl_part_info *info) +{ + unsigned int indexes_per_usage_bit; + struct mlxsw_sp2_kvdl_part *part; + unsigned int index_range; + unsigned int usage_bit_count; + size_t usage_size; + + if (!mlxsw_core_res_valid(mlxsw_sp->core, + info->usage_bit_count_res_id) || + !mlxsw_core_res_valid(mlxsw_sp->core, + info->index_range_res_id)) + return ERR_PTR(-EIO); + usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core, + info->usage_bit_count_res_id); + index_range = mlxsw_core_res_get(mlxsw_sp->core, + info->index_range_res_id); + + /* For some partitions, one usage bit represents a group of indexes. + * That's why we compute the number of indexes per usage bit here, + * according to queried resources. + */ + indexes_per_usage_bit = index_range / usage_bit_count; + + usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long); + part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); + if (!part) + return ERR_PTR(-ENOMEM); + part->info = info; + part->usage_bit_count = usage_bit_count; + part->indexes_per_usage_bit = indexes_per_usage_bit; + part->last_allocated_bit = usage_bit_count - 1; + return part; +} + +static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part) +{ + kfree(part); +} + +static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp2_kvdl *kvdl) +{ + const struct mlxsw_sp2_kvdl_part_info *info; + int i; + int err; + + for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp2_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); + goto err_kvdl_part_init; + } + } + return 0; + +err_kvdl_part_init: + for (i--; i >= 0; i--) + mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]); + return err; +} + +static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl) +{ + int i; + + for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]); +} + +static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_kvdl *kvdl = priv; + + return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl); +} + +static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_kvdl *kvdl = priv; + + mlxsw_sp2_kvdl_parts_fini(kvdl); +} + +const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = { + .priv_size = sizeof(struct mlxsw_sp2_kvdl), + .init = mlxsw_sp2_kvdl_init, + .fini = mlxsw_sp2_kvdl_fini, + .alloc = mlxsw_sp2_kvdl_alloc, + .free = mlxsw_sp2_kvdl_free, + .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c new file mode 100644 index 000000000000..53d4ab70da95 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -0,0 +1,82 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> + +#include "core_acl_flex_actions.h" +#include "spectrum.h" +#include "spectrum_mr.h" + +static int +mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio) +{ + return 0; +} + +static void +mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key) +{ +} + +static int +mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + return 0; +} + +static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + return 0; +} + +static void mlxsw_sp2_mr_tcam_fini(void *priv) +{ +} + +const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = { + .init = mlxsw_sp2_mr_tcam_init, + .fini = mlxsw_sp2_mr_tcam_fini, + .route_create = mlxsw_sp2_mr_tcam_route_create, + .route_destroy = mlxsw_sp2_mr_tcam_route_destroy, + .route_update = mlxsw_sp2_mr_tcam_route_update, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 79b1fa27a9a4..217621d79e26 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -48,13 +48,12 @@ #include "spectrum.h" #include "core_acl_flex_keys.h" #include "core_acl_flex_actions.h" -#include "spectrum_acl_flex_keys.h" +#include "spectrum_acl_tcam.h" struct mlxsw_sp_acl { struct mlxsw_sp *mlxsw_sp; struct mlxsw_afk *afk; struct mlxsw_sp_fid *dummy_fid; - const struct mlxsw_sp_acl_ops *ops; struct rhashtable ruleset_ht; struct list_head rules; struct { @@ -62,8 +61,7 @@ struct mlxsw_sp_acl { unsigned long interval; /* ms */ #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000 } rule_activity_update; - unsigned long priv[0]; - /* priv has to be always the last item */ + struct mlxsw_sp_acl_tcam tcam; }; struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) @@ -339,7 +337,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_init; - err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); + err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv); if (err) goto err_ops_ruleset_add; @@ -409,7 +407,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; - ops = acl->ops->profile_ops(mlxsw_sp, profile); + ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops); @@ -427,7 +425,7 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; - ops = acl->ops->profile_ops(mlxsw_sp, profile); + ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); @@ -487,7 +485,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, unsigned int priority) { - rulei->priority = priority; + rulei->priority = priority >> 16; } void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, @@ -634,7 +632,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_sp_acl_ruleset_ref_inc(ruleset); - rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp), + GFP_KERNEL); if (!rule) { err = -ENOMEM; goto err_alloc; @@ -825,20 +824,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) { - const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; struct mlxsw_sp_fid *fid; struct mlxsw_sp_acl *acl; + size_t alloc_size; int err; - acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); + alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp); + acl = kzalloc(alloc_size, GFP_KERNEL); if (!acl) return -ENOMEM; mlxsw_sp->acl = acl; acl->mlxsw_sp = mlxsw_sp; acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_FLEX_KEYS), - mlxsw_sp_afk_blocks, - MLXSW_SP_AFK_BLOCKS_COUNT); + mlxsw_sp->afk_ops); if (!acl->afk) { err = -ENOMEM; goto err_afk_create; @@ -857,12 +856,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->dummy_fid = fid; INIT_LIST_HEAD(&acl->rules); - err = acl_ops->init(mlxsw_sp, acl->priv); + err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam); if (err) goto err_acl_ops_init; - acl->ops = acl_ops; - /* Create the delayed work for the rule activity_update */ INIT_DELAYED_WORK(&acl->rule_activity_update.dw, mlxsw_sp_acl_rul_activity_update_work); @@ -884,10 +881,9 @@ err_afk_create: void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); - acl_ops->fini(mlxsw_sp, acl->priv); + mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam); WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c new file mode 100644 index 000000000000..b1b3b0e0c00e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -0,0 +1,95 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2018 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, + u16 region_id) +{ + char perar_pl[MLXSW_REG_PERAR_LEN]; + /* For now, just assume that every region has 12 key blocks */ + u16 hw_region = region_id * 3; + u64 max_regions; + + max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); + if (hw_region >= max_regions) + return -ENOBUFS; + + mlxsw_reg_perar_pack(perar_pl, region_id, hw_region); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl); +} + +static int mlxsw_sp_acl_atcam_region_param_init(struct mlxsw_sp *mlxsw_sp, + u16 region_id) +{ + char percr_pl[MLXSW_REG_PERCR_LEN]; + + mlxsw_reg_percr_pack(percr_pl, region_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); +} + +static int +mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp, + u16 region_id) +{ + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + int err; + + err = mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); + if (err) + return err; + err = mlxsw_sp_acl_atcam_region_param_init(mlxsw_sp, region->id); + if (err) + return err; + err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id); + if (err) + return err; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c new file mode 100644 index 000000000000..ef0d4c0a5a1f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -0,0 +1,215 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/parman.h> + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +static int +mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 new_size) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, + region->key_type, new_size, region->id, + region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static void +mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 src_offset, u16 dst_offset, u16 size) +{ + char prcr_pl[MLXSW_REG_PRCR_LEN]; + + mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, + region->tcam_region_info, src_offset, + region->tcam_region_info, dst_offset, size); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); +} + +static int +mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset, + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority) +{ + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + char *act_set; + u32 priority; + char *mask; + char *key; + int err; + + err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, + fillup_priority); + if (err) + return err; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset, priority); + key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); + mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); + mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); + + /* Only the first action set belongs here, the rest is in KVD */ + act_set = mlxsw_afa_block_first_set(rulei->act_block); + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static void +mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + + mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp_acl_ctcam_region *cregion = priv; + struct mlxsw_sp_acl_tcam_region *region = cregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count); +} + +static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp_acl_ctcam_region *cregion = priv; + struct mlxsw_sp_acl_tcam_region *region = cregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + + mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region, + from_index, to_index, count); +} + +static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = { + .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp_acl_ctcam_region_parman_resize, + .move = mlxsw_sp_acl_ctcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region) +{ + cregion->region = region; + cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops, + cregion); + if (!cregion->parman) + return -ENOMEM; + return 0; +} + +void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion) +{ + parman_destroy(cregion->parman); +} + +void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + unsigned int priority) +{ + parman_prio_init(cregion->parman, &cchunk->parman_prio, priority); +} + +void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk) +{ + parman_prio_fini(&cchunk->parman_prio); +} + +int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority) +{ + int err; + + err = parman_item_add(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); + if (err) + return err; + + err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region, + centry->parman_item.index, + rulei, fillup_priority); + if (err) + goto err_rule_insert; + return 0; + +err_rule_insert: + parman_item_remove(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); + return err; +} + +void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ + mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region, + centry->parman_item.index); + parman_item_remove(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 510ce48d87f7..bca0def756cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -37,10 +37,8 @@ #include "core_acl_flex_actions.h" #include "spectrum_span.h" -#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1 - static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, - char *enc_actions, bool is_first) + char *enc_actions, bool is_first, bool ca) { struct mlxsw_sp *mlxsw_sp = priv; char pefa_pl[MLXSW_REG_PEFA_LEN]; @@ -53,11 +51,11 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, if (is_first) return 0; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE, - &kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + 1, &kvdl_index); if (err) return err; - mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); if (err) goto err_pefa_write; @@ -65,10 +63,25 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, return 0; err_pefa_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + 1, kvdl_index); return err; } +static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions, + is_first, false); +} + +static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions, + is_first, true); +} + static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, bool is_first) { @@ -76,7 +89,29 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, if (is_first) return; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + 1, kvdl_index); +} + +static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, + bool *activity) +{ + return -EOPNOTSUPP; +} + +static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, + bool *activity) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + int err; + + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + return err; + mlxsw_reg_pefa_unpack(pefa_pl, activity); + return 0; } static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, @@ -87,7 +122,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, u32 kvdl_index; int err; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + 1, &kvdl_index); if (err) return err; mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); @@ -98,7 +134,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, return 0; err_ppbs_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + 1, kvdl_index); return err; } @@ -106,7 +143,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) { struct mlxsw_sp *mlxsw_sp = priv; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + 1, kvdl_index); } static int @@ -154,22 +192,36 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) mlxsw_sp_span_mirror_del(in_port, span_id, type, false); } -static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { - .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, +const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { + .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, + .counter_index_get = mlxsw_sp_act_counter_index_get, + .counter_index_put = mlxsw_sp_act_counter_index_put, + .mirror_add = mlxsw_sp_act_mirror_add, + .mirror_del = mlxsw_sp_act_mirror_del, +}; + +const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { + .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, .counter_index_get = mlxsw_sp_act_counter_index_get, .counter_index_put = mlxsw_sp_act_counter_index_put, .mirror_add = mlxsw_sp_act_mirror_add, .mirror_del = mlxsw_sp_act_mirror_del, + .dummy_first_set = true, }; int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ACTIONS_PER_SET), - &mlxsw_sp_act_afa_ops, mlxsw_sp); + mlxsw_sp->afa_ops, mlxsw_sp); return PTR_ERR_OR_ZERO(mlxsw_sp->afa); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c new file mode 100644 index 000000000000..aa8927cee376 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -0,0 +1,316 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include "spectrum.h" +#include "item.h" +#include "core_acl_flex_keys.h" + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), + MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3), + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), +}; + +static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), + MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), + MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), + MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4), + MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), + MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), + MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), + MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), + MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), + MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), +}; + +#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16 + +static void mlxsw_sp1_afk_encode_block(char *block, int block_index, + char *output) +{ + unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE; + char *output_indexed = output + offset; + + memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE); +} + +const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { + .blocks = mlxsw_sp1_afk_blocks, + .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks), + .encode_block = mlxsw_sp1_afk_encode_block, +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = { + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = { + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */ +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = { + MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */ +}; + +static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2), + MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3), + MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4), + MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5), + MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), + MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), + MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), + MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), + MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), + MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2), + MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3), + MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4), + MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5), + MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0), + MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), +}; + +#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36 + +/* A block in Spectrum-2 is of the following form: + * + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32| + * +-----------------------------------------------------------------------------------------------+ + * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + */ +MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK); + +/* The key / mask block layout in Spectrum-2 is of the following form: + * + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * | | | | | | | | | | | | | | | | | block11_high | + * +-----------------------------------------------------------------------------------------------+ + * | block11_low | block10_high | + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * ... + */ + +struct mlxsw_sp2_afk_block_layout { + unsigned short offset; + struct mlxsw_item item; +}; + +#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \ + { \ + .offset = _offset, \ + { \ + .shift = _shift, \ + .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \ + .name = #_block, \ + } \ + } \ + +static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = { + MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12), +}; + +static void mlxsw_sp2_afk_encode_block(char *block, int block_index, + char *output) +{ + u64 block_value = mlxsw_sp2_afk_block_value_get(block); + const struct mlxsw_sp2_afk_block_layout *block_layout; + + if (WARN_ON(block_index < 0 || + block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout))) + return; + + block_layout = &mlxsw_sp2_afk_blocks_layout[block_index]; + __mlxsw_item_set64(output + block_layout->offset, + &block_layout->item, 0, block_value); +} + +const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { + .blocks = mlxsw_sp2_afk_blocks, + .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks), + .encode_block = mlxsw_sp2_afk_encode_block, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h deleted file mode 100644 index fb8031828454..000000000000 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H -#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H - -#include "core_acl_flex_keys.h" - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), - MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), - MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), - MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), - MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6), - MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { - MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), - MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { - MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32), - MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { - MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), - MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), - MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), - MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { - MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3), - MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), - MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8), - MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { - MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), -}; - -static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { - MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), - MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), - MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), - MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), - MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), - MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4), - MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), - MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), - MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), - MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), - MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), - MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), -}; - -#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks) - -#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index ad1b548e3cac..e06d7d9e5b7f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -39,25 +39,25 @@ #include <linux/list.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> -#include <linux/parman.h> #include "reg.h" #include "core.h" #include "resources.h" #include "spectrum.h" +#include "spectrum_acl_tcam.h" #include "core_acl_flex_keys.h" -struct mlxsw_sp_acl_tcam { - unsigned long *used_regions; /* bit array */ - unsigned int max_regions; - unsigned long *used_groups; /* bit array */ - unsigned int max_groups; - unsigned int max_group_size; -}; +size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + return ops->priv_size; +} -static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) { - struct mlxsw_sp_acl_tcam *tcam = priv; + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; u64 max_tcam_regions; u64 max_regions; u64 max_groups; @@ -88,21 +88,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) tcam->max_groups = max_groups; tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUP_SIZE); + + err = ops->init(mlxsw_sp, tcam->priv, tcam); + if (err) + goto err_tcam_init; + return 0; +err_tcam_init: + kfree(tcam->used_groups); err_alloc_used_groups: kfree(tcam->used_regions); return err; } -static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) { - struct mlxsw_sp_acl_tcam *tcam = priv; + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + ops->fini(mlxsw_sp, tcam->priv); kfree(tcam->used_groups); kfree(tcam->used_regions); } +int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 *priority, bool fillup_priority) +{ + u64 max_priority; + + if (!fillup_priority) { + *priority = 0; + return 0; + } + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE)) + return -EIO; + + max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE); + if (rulei->priority > max_priority) + return -EINVAL; + + /* Unlike in TC, in HW, higher number means higher priority. */ + *priority = max_priority - rulei->priority; + return 0; +} + static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, u16 *p_id) { @@ -159,35 +191,21 @@ struct mlxsw_sp_acl_tcam_group { unsigned int patterns_count; }; -struct mlxsw_sp_acl_tcam_region { - struct list_head list; /* Member of a TCAM group */ - struct list_head chunk_list; /* List of chunks under this region */ - struct parman *parman; - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_acl_tcam_group *group; - u16 id; /* ACL ID and region ID - they are same */ - char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; - struct mlxsw_afk_key_info *key_info; - struct { - struct parman_prio parman_prio; - struct parman_item parman_item; - struct mlxsw_sp_acl_rule_info *rulei; - } catchall; -}; - struct mlxsw_sp_acl_tcam_chunk { struct list_head list; /* Member of a TCAM region */ struct rhash_head ht_node; /* Member of a chunk HT */ unsigned int priority; /* Priority within the region and group */ - struct parman_prio parman_prio; struct mlxsw_sp_acl_tcam_group *group; struct mlxsw_sp_acl_tcam_region *region; unsigned int ref_count; + unsigned long priv[0]; + /* priv has to be always the last item */ }; struct mlxsw_sp_acl_tcam_entry { - struct parman_item parman_item; struct mlxsw_sp_acl_tcam_chunk *chunk; + unsigned long priv[0]; + /* priv has to be always the last item */ }; static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { @@ -441,9 +459,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, memcpy(out, elusage, sizeof(*out)); } -#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 -#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 - static int mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) @@ -455,6 +470,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, + region->key_type, MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, region->id, region->tcam_region_info); encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); @@ -477,24 +493,13 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, { char ptar_pl[MLXSW_REG_PTAR_LEN]; - mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, + region->key_type, 0, region->id, region->tcam_region_info); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); } static int -mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - u16 new_size) -{ - char ptar_pl[MLXSW_REG_PTAR_LEN]; - - mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, - new_size, region->id, region->tcam_region_info); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); -} - -static int mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) { @@ -516,193 +521,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); } -static int -mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset, - struct mlxsw_sp_acl_rule_info *rulei) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - char *act_set; - char *mask; - char *key; - - mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); - key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); - mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); - mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); - - /* Only the first action set belongs here, the rest is in KVD */ - act_set = mlxsw_afa_block_first_set(rulei->act_block); - mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); -} - -static void -mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - - mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); -} - -static int -mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset, - bool *activity) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - int err; - - mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, - region->tcam_region_info, offset); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); - if (err) - return err; - *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); - return 0; -} - -#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) - -static int -mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) -{ - struct parman_prio *parman_prio = ®ion->catchall.parman_prio; - struct parman_item *parman_item = ®ion->catchall.parman_item; - struct mlxsw_sp_acl_rule_info *rulei; - int err; - - parman_prio_init(region->parman, parman_prio, - MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); - err = parman_item_add(region->parman, parman_prio, parman_item); - if (err) - goto err_parman_item_add; - - rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); - if (IS_ERR(rulei)) { - err = PTR_ERR(rulei); - goto err_rulei_create; - } - - err = mlxsw_sp_acl_rulei_act_continue(rulei); - if (WARN_ON(err)) - goto err_rulei_act_continue; - - err = mlxsw_sp_acl_rulei_commit(rulei); - if (err) - goto err_rulei_commit; - - err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, - parman_item->index, rulei); - region->catchall.rulei = rulei; - if (err) - goto err_rule_insert; - - return 0; - -err_rule_insert: -err_rulei_commit: -err_rulei_act_continue: - mlxsw_sp_acl_rulei_destroy(rulei); -err_rulei_create: - parman_item_remove(region->parman, parman_prio, parman_item); -err_parman_item_add: - parman_prio_fini(parman_prio); - return err; -} - -static void -mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) -{ - struct parman_prio *parman_prio = ®ion->catchall.parman_prio; - struct parman_item *parman_item = ®ion->catchall.parman_item; - struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; - - mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, - parman_item->index); - mlxsw_sp_acl_rulei_destroy(rulei); - parman_item_remove(region->parman, parman_prio, parman_item); - parman_prio_fini(parman_prio); -} - -static void -mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - u16 src_offset, u16 dst_offset, u16 size) -{ - char prcr_pl[MLXSW_REG_PRCR_LEN]; - - mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, - region->tcam_region_info, src_offset, - region->tcam_region_info, dst_offset, size); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); -} - -static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, - unsigned long new_count) -{ - struct mlxsw_sp_acl_tcam_region *region = priv; - struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; - u64 max_tcam_rules; - - max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); - if (new_count > max_tcam_rules) - return -EINVAL; - return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); -} - -static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, - unsigned long from_index, - unsigned long to_index, - unsigned long count) -{ - struct mlxsw_sp_acl_tcam_region *region = priv; - struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; - - mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, - from_index, to_index, count); -} - -static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { - .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, - .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, - .resize = mlxsw_sp_acl_tcam_region_parman_resize, - .move = mlxsw_sp_acl_tcam_region_parman_move, - .algo = PARMAN_ALGO_TYPE_LSORT, -}; - static struct mlxsw_sp_acl_tcam_region * mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, struct mlxsw_afk_element_usage *elusage) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); struct mlxsw_sp_acl_tcam_region *region; int err; - region = kzalloc(sizeof(*region), GFP_KERNEL); + region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL); if (!region) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(®ion->chunk_list); region->mlxsw_sp = mlxsw_sp; - region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, - region); - if (!region->parman) { - err = -ENOMEM; - goto err_parman_create; - } - region->key_info = mlxsw_afk_key_info_get(afk, elusage); if (IS_ERR(region->key_info)) { err = PTR_ERR(region->key_info); @@ -713,6 +547,11 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_region_id_get; + err = ops->region_associate(mlxsw_sp, region); + if (err) + goto err_tcam_region_associate; + + region->key_type = ops->key_type; err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); if (err) goto err_tcam_region_alloc; @@ -721,23 +560,22 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tcam_region_enable; - err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); + err = ops->region_init(mlxsw_sp, region->priv, region); if (err) - goto err_tcam_region_catchall_add; + goto err_tcam_region_init; return region; -err_tcam_region_catchall_add: +err_tcam_region_init: mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); err_tcam_region_enable: mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); err_tcam_region_alloc: +err_tcam_region_associate: mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); err_region_id_get: mlxsw_afk_key_info_put(region->key_info); err_key_info_get: - parman_destroy(region->parman); -err_parman_create: kfree(region); return ERR_PTR(err); } @@ -746,12 +584,13 @@ static void mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) { - mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + ops->region_fini(mlxsw_sp, region->priv); mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); mlxsw_afk_key_info_put(region->key_info); - parman_destroy(region->parman); kfree(region); } @@ -826,13 +665,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, unsigned int priority, struct mlxsw_afk_element_usage *elusage) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; int err; if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) return ERR_PTR(-EINVAL); - chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); if (!chunk) return ERR_PTR(-ENOMEM); chunk->priority = priority; @@ -844,7 +684,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_chunk_assoc; - parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); + ops->chunk_init(chunk->region->priv, chunk->priv, priority); err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, mlxsw_sp_acl_tcam_chunk_ht_params); @@ -854,7 +694,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, return chunk; err_rhashtable_insert: - parman_prio_fini(&chunk->parman_prio); + ops->chunk_fini(chunk->priv); mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); err_chunk_assoc: kfree(chunk); @@ -865,11 +705,12 @@ static void mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_chunk *chunk) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_group *group = chunk->group; rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, mlxsw_sp_acl_tcam_chunk_ht_params); - parman_prio_fini(&chunk->parman_prio); + ops->chunk_fini(chunk->priv); mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); kfree(chunk); } @@ -903,11 +744,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); } +static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + return ops->entry_priv_size; +} + static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_group *group, struct mlxsw_sp_acl_tcam_entry *entry, struct mlxsw_sp_acl_rule_info *rulei) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; struct mlxsw_sp_acl_tcam_region *region; int err; @@ -918,24 +767,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(chunk); region = chunk->region; - err = parman_item_add(region->parman, &chunk->parman_prio, - &entry->parman_item); - if (err) - goto err_parman_item_add; - err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, - entry->parman_item.index, - rulei); + err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv, + entry->priv, rulei); if (err) - goto err_rule_insert; + goto err_entry_add; entry->chunk = chunk; return 0; -err_rule_insert: - parman_item_remove(region->parman, &chunk->parman_prio, - &entry->parman_item); -err_parman_item_add: +err_entry_add: mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); return err; } @@ -943,13 +784,11 @@ err_parman_item_add: static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_entry *entry) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; struct mlxsw_sp_acl_tcam_region *region = chunk->region; - mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, - entry->parman_item.index); - parman_item_remove(region->parman, &chunk->parman_prio, - &entry->parman_item); + ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv); mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); } @@ -958,22 +797,24 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_entry *entry, bool *activity) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; struct mlxsw_sp_acl_tcam_region *region = chunk->region; - return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region, - entry->parman_item.index, - activity); + return ops->entry_activity_get(mlxsw_sp, region->priv, + entry->priv, activity); } static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, - MLXSW_AFK_ELEMENT_DMAC, - MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_DMAC_32_47, + MLXSW_AFK_ELEMENT_DMAC_0_31, + MLXSW_AFK_ELEMENT_SMAC_32_47, + MLXSW_AFK_ELEMENT_SMAC_0_31, MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP4, - MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, MLXSW_AFK_ELEMENT_VID, @@ -987,10 +828,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP6_HI, - MLXSW_AFK_ELEMENT_SRC_IP6_LO, - MLXSW_AFK_ELEMENT_DST_IP6_HI, - MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_SRC_IP_96_127, + MLXSW_AFK_ELEMENT_SRC_IP_64_95, + MLXSW_AFK_ELEMENT_SRC_IP_32_63, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_96_127, + MLXSW_AFK_ELEMENT_DST_IP_64_95, + MLXSW_AFK_ELEMENT_DST_IP_32_63, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, }; @@ -1019,10 +864,10 @@ struct mlxsw_sp_acl_tcam_flower_rule { static int mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, - void *priv, void *ruleset_priv) + struct mlxsw_sp_acl_tcam *tcam, + void *ruleset_priv) { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - struct mlxsw_sp_acl_tcam *tcam = priv; return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, mlxsw_sp_acl_tcam_patterns, @@ -1070,6 +915,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) return mlxsw_sp_acl_tcam_group_id(&ruleset->group); } +static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) + + mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); +} + static int mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -1107,7 +958,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, - .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), + .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size, .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, @@ -1118,7 +969,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = { [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, }; -static const struct mlxsw_sp_acl_profile_ops * +const struct mlxsw_sp_acl_profile_ops * mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_acl_profile profile) { @@ -1131,10 +982,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, return NULL; return ops; } - -const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { - .priv_size = sizeof(struct mlxsw_sp_acl_tcam), - .init = mlxsw_sp_acl_tcam_init, - .fini = mlxsw_sp_acl_tcam_fini, - .profile_ops = mlxsw_sp_acl_tcam_profile_ops, -}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h new file mode 100644 index 000000000000..68551da2221d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -0,0 +1,151 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H +#define _MLXSW_SPECTRUM_ACL_TCAM_H + +#include <linux/list.h> +#include <linux/parman.h> + +#include "reg.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_sp_acl_tcam { + unsigned long *used_regions; /* bit array */ + unsigned int max_regions; + unsigned long *used_groups; /* bit array */ + unsigned int max_groups; + unsigned int max_group_size; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); +void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); +int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 *priority, bool fillup_priority); + +struct mlxsw_sp_acl_profile_ops { + size_t ruleset_priv_size; + int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv); + void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); + u16 (*ruleset_group_id)(void *ruleset_priv); + size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp); + int (*rule_add)(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); + int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, + bool *activity); +}; + +const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile); + +#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 + +#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) + +struct mlxsw_sp_acl_tcam_group; + +struct mlxsw_sp_acl_tcam_region { + struct list_head list; /* Member of a TCAM group */ + struct list_head chunk_list; /* List of chunks under this region */ + struct mlxsw_sp_acl_tcam_group *group; + enum mlxsw_reg_ptar_key_type key_type; + u16 id; /* ACL ID and region ID - they are same */ + char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; + struct mlxsw_afk_key_info *key_info; + struct mlxsw_sp *mlxsw_sp; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_sp_acl_ctcam_region { + struct parman *parman; + struct mlxsw_sp_acl_tcam_region *region; +}; + +struct mlxsw_sp_acl_ctcam_chunk { + struct parman_prio parman_prio; +}; + +struct mlxsw_sp_acl_ctcam_entry { + struct parman_item parman_item; +}; + +int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region); +void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion); +void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + unsigned int priority); +void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk); +int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority); +void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry); +static inline unsigned int +mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry) +{ + return centry->parman_item.index; +} + +int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, + u16 region_id); +int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 89dbf569dff5..201761a3539e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -144,10 +144,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, FLOW_DISSECTOR_KEY_IPV4_ADDRS, f->mask); - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4, - ntohl(key->src), ntohl(mask->src)); - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4, - ntohl(key->dst), ntohl(mask->dst)); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + (char *) &key->src, + (char *) &mask->src, 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + (char *) &key->dst, + (char *) &mask->dst, 4); } static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, @@ -161,24 +163,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS, f->mask); - size_t addr_half_size = sizeof(key->src) / 2; - - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI, - &key->src.s6_addr[0], - &mask->src.s6_addr[0], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO, - &key->src.s6_addr[addr_half_size], - &mask->src.s6_addr[addr_half_size], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI, - &key->dst.s6_addr[0], - &mask->dst.s6_addr[0], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO, - &key->dst.s6_addr[addr_half_size], - &mask->dst.s6_addr[addr_half_size], - addr_half_size); + + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, + &key->src.s6_addr[0x0], + &mask->src.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, + &key->src.s6_addr[0x4], + &mask->src.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, + &key->src.s6_addr[0x8], + &mask->src.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + &key->src.s6_addr[0xC], + &mask->src.s6_addr[0xC], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, + &key->dst.s6_addr[0x0], + &mask->dst.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, + &key->dst.s6_addr[0x4], + &mask->dst.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, + &key->dst.s6_addr[0x8], + &mask->dst.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + &key->dst.s6_addr[0xC], + &mask->dst.s6_addr[0xC], 4); } static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, @@ -340,13 +349,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, f->mask); mlxsw_sp_acl_rulei_keymask_buf(rulei, - MLXSW_AFK_ELEMENT_DMAC, - key->dst, mask->dst, - sizeof(key->dst)); + MLXSW_AFK_ELEMENT_DMAC_32_47, + key->dst, mask->dst, 2); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_DMAC_0_31, + key->dst + 2, mask->dst + 2, 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC_32_47, + key->src, mask->src, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, - MLXSW_AFK_ELEMENT_SMAC, - key->src, mask->src, - sizeof(key->src)); + MLXSW_AFK_ELEMENT_SMAC_0_31, + key->src + 2, mask->src + 2, 4); } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index fe4327f547d2..fd557585514d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,422 +33,74 @@ */ #include <linux/kernel.h> -#include <linux/bitops.h> +#include <linux/slab.h> #include "spectrum.h" -#define MLXSW_SP_KVDL_SINGLE_BASE 0 -#define MLXSW_SP_KVDL_SINGLE_SIZE 16384 -#define MLXSW_SP_KVDL_SINGLE_END \ - (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1) - -#define MLXSW_SP_KVDL_CHUNKS_BASE \ - (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE) -#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152 -#define MLXSW_SP_KVDL_CHUNKS_END \ - (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1) - -#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \ - (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE) -#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \ - (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE) -#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \ - (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1) - -#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1 -#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32 -#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 - -struct mlxsw_sp_kvdl_part_info { - unsigned int part_index; - unsigned int start_index; - unsigned int end_index; - unsigned int alloc_size; - enum mlxsw_sp_resource_id resource_id; -}; - -enum mlxsw_sp_kvdl_part_id { - MLXSW_SP_KVDL_PART_ID_SINGLE, - MLXSW_SP_KVDL_PART_ID_CHUNKS, - MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS, -}; - -#define MLXSW_SP_KVDL_PART_INFO(id) \ -[MLXSW_SP_KVDL_PART_ID_##id] = { \ - .start_index = MLXSW_SP_KVDL_##id##_BASE, \ - .end_index = MLXSW_SP_KVDL_##id##_END, \ - .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \ - .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ -} - -static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = { - MLXSW_SP_KVDL_PART_INFO(SINGLE), - MLXSW_SP_KVDL_PART_INFO(CHUNKS), - MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS), -}; - -#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info) - -struct mlxsw_sp_kvdl_part { - struct mlxsw_sp_kvdl_part_info info; - unsigned long usage[0]; /* Entries */ -}; - struct mlxsw_sp_kvdl { - struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN]; + const struct mlxsw_sp_kvdl_ops *kvdl_ops; + unsigned long priv[0]; + /* priv has to be always the last item */ }; -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl, - unsigned int alloc_size) -{ - struct mlxsw_sp_kvdl_part *part, *min_part = NULL; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - part = kvdl->parts[i]; - if (alloc_size <= part->info.alloc_size && - (!min_part || - part->info.alloc_size <= min_part->info.alloc_size)) - min_part = part; - } - - return min_part ?: ERR_PTR(-ENOBUFS); -} - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index) -{ - struct mlxsw_sp_kvdl_part *part; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - part = kvdl->parts[i]; - if (kvdl_index >= part->info.start_index && - kvdl_index <= part->info.end_index) - return part; - } - - return ERR_PTR(-EINVAL); -} - -static u32 -mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info, - unsigned int entry_index) -{ - return info->start_index + entry_index * info->alloc_size; -} - -static unsigned int -mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info, - u32 kvdl_index) -{ - return (kvdl_index - info->start_index) / info->alloc_size; -} - -static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, - u32 *p_kvdl_index) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int entry_index, nr_entries; - - nr_entries = (info->end_index - info->start_index + 1) / - info->alloc_size; - entry_index = find_first_zero_bit(part->usage, nr_entries); - if (entry_index == nr_entries) - return -ENOBUFS; - __set_bit(entry_index, part->usage); - - *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index); - - return 0; -} - -static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part, - u32 kvdl_index) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int entry_index; - - entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index); - __clear_bit(entry_index, part->usage); -} - -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, - u32 *p_entry_index) -{ - struct mlxsw_sp_kvdl_part *part; - - /* Find partition with smallest allocation size satisfying the - * requested size. - */ - part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); - if (IS_ERR(part)) - return PTR_ERR(part); - - return mlxsw_sp_kvdl_part_alloc(part, p_entry_index); -} - -void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) -{ - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index); - if (IS_ERR(part)) - return; - mlxsw_sp_kvdl_part_free(part, entry_index); -} - -int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, - unsigned int entry_count, - unsigned int *p_alloc_size) -{ - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); - if (IS_ERR(part)) - return PTR_ERR(part); - - *p_alloc_size = part->info.alloc_size; - - return 0; -} - -static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part, - struct mlxsw_sp_kvdl_part *part_prev, - unsigned int size) -{ - - if (!part_prev) { - part->info.end_index = size - 1; - } else { - part->info.start_index = part_prev->info.end_index + 1; - part->info.end_index = part->info.start_index + size - 1; - } -} - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_kvdl_part_info *info, - struct mlxsw_sp_kvdl_part *part_prev) +int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - struct mlxsw_sp_kvdl_part *part; - bool need_update = true; - unsigned int nr_entries; - size_t usage_size; - u64 resource_size; + const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops; + struct mlxsw_sp_kvdl *kvdl; int err; - err = devlink_resource_size_get(devlink, info->resource_id, - &resource_size); - if (err) { - need_update = false; - resource_size = info->end_index - info->start_index + 1; - } - - nr_entries = div_u64(resource_size, info->alloc_size); - usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); - part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); - if (!part) - return ERR_PTR(-ENOMEM); - - memcpy(&part->info, info, sizeof(part->info)); - - if (need_update) - mlxsw_sp_kvdl_part_update(part, part_prev, resource_size); - return part; -} - -static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part) -{ - kfree(part); -} - -static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp) -{ - struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - const struct mlxsw_sp_kvdl_part_info *info; - struct mlxsw_sp_kvdl_part *part_prev = NULL; - int err, i; + kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size, + GFP_KERNEL); + if (!kvdl) + return -ENOMEM; + kvdl->kvdl_ops = kvdl_ops; + mlxsw_sp->kvdl = kvdl; - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - info = &mlxsw_sp_kvdl_parts_info[i]; - kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info, - part_prev); - if (IS_ERR(kvdl->parts[i])) { - err = PTR_ERR(kvdl->parts[i]); - goto err_kvdl_part_init; - } - part_prev = kvdl->parts[i]; - } + err = kvdl_ops->init(mlxsw_sp, kvdl->priv); + if (err) + goto err_init; return 0; -err_kvdl_part_init: - for (i--; i >= 0; i--) - mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); +err_init: + kfree(kvdl); return err; } -static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp) +void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) - mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); -} - -static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int nr_entries; - int bit = -1; - u64 occ = 0; - - nr_entries = (info->end_index - - info->start_index + 1) / - info->alloc_size; - while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) - < nr_entries) - occ += info->alloc_size; - return occ; -} - -static u64 mlxsw_sp_kvdl_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - u64 occ = 0; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) - occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]); - - return occ; -} - -static u64 mlxsw_sp_kvdl_single_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE]; - return mlxsw_sp_kvdl_part_occ(part); -} - -static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS]; - return mlxsw_sp_kvdl_part_occ(part); -} - -static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS]; - return mlxsw_sp_kvdl_part_occ(part); + kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv); + kfree(kvdl); } -int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core) +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index) { - struct devlink *devlink = priv_to_devlink(mlxsw_core); - static struct devlink_resource_size_params size_params; - u32 kvdl_max_size; - int err; - - kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - - MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - - MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); - - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, - MLXSW_SP_KVDL_SINGLE_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - if (err) - return err; - - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, - MLXSW_SP_KVDL_CHUNKS_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - if (err) - return err; + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, - MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - return err; + return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type, + entry_count, p_entry_index); } -int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - struct mlxsw_sp_kvdl *kvdl; - int err; - - kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL); - if (!kvdl) - return -ENOMEM; - mlxsw_sp->kvdl = kvdl; - - err = mlxsw_sp_kvdl_parts_init(mlxsw_sp); - if (err) - goto err_kvdl_parts_init; - - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR, - mlxsw_sp_kvdl_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, - mlxsw_sp_kvdl_single_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, - mlxsw_sp_kvdl_chunks_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, - mlxsw_sp_kvdl_large_chunks_occ_get, - mlxsw_sp); - - return 0; + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; -err_kvdl_parts_init: - kfree(mlxsw_sp->kvdl); - return err; + kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type, + entry_count, entry_index); } -void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) +int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR); - mlxsw_sp_kvdl_parts_fini(mlxsw_sp); - kfree(mlxsw_sp->kvdl); + return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type, + entry_count, p_alloc_count); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index a82539609d49..98dcaf78365c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -1075,6 +1075,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_mr *mr = mlxsw_sp->mr; cancel_delayed_work_sync(&mr->stats_update_dw); - mr->mr_ops->fini(mr->priv); + mr->mr_ops->fini(mlxsw_sp, mr->priv); kfree(mr); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h index 7c864a86811d..c92fa90dca31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -46,15 +46,6 @@ enum mlxsw_sp_mr_route_action { MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD, }; -enum mlxsw_sp_mr_route_prio { - MLXSW_SP_MR_ROUTE_PRIO_SG, - MLXSW_SP_MR_ROUTE_PRIO_STARG, - MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, - __MLXSW_SP_MR_ROUTE_PRIO_MAX -}; - -#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1) - struct mlxsw_sp_mr_route_key { int vrid; enum mlxsw_sp_l3proto proto; @@ -101,7 +92,7 @@ struct mlxsw_sp_mr_ops { u16 erif_index); void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv); - void (*fini)(void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); }; struct mlxsw_sp_mr; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 4f4c0d311883..e9c9f1f45b9d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -1,7 +1,8 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,7 +36,6 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> -#include <linux/parman.h> #include "spectrum_mr_tcam.h" #include "reg.h" @@ -43,15 +43,8 @@ #include "core_acl_flex_actions.h" #include "spectrum_mr.h" -struct mlxsw_sp_mr_tcam_region { - struct mlxsw_sp *mlxsw_sp; - enum mlxsw_reg_rtar_key_type rtar_key_type; - struct parman *parman; - struct parman_prio *parman_prios; -}; - struct mlxsw_sp_mr_tcam { - struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; + void *priv; }; /* This struct maps to one RIGR2 register entry */ @@ -84,8 +77,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list) INIT_LIST_HEAD(&erif_list->erif_sublists); } -#define MLXSW_SP_KVDL_RIGR2_SIZE 1 - static struct mlxsw_sp_mr_erif_sublist * mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam_erif_list *erif_list) @@ -96,8 +87,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp, erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL); if (!erif_sublist) return ERR_PTR(-ENOMEM); - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE, - &erif_sublist->rigr2_kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + 1, &erif_sublist->rigr2_kvdl_index); if (err) { kfree(erif_sublist); return ERR_PTR(err); @@ -112,7 +103,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_erif_sublist *erif_sublist) { list_del(&erif_sublist->list); - mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + 1, erif_sublist->rigr2_kvdl_index); kfree(erif_sublist); } @@ -221,12 +213,11 @@ struct mlxsw_sp_mr_tcam_route { struct mlxsw_sp_mr_tcam_erif_list erif_list; struct mlxsw_afa_block *afa_block; u32 counter_index; - struct parman_item parman_item; - struct parman_prio *parman_prio; enum mlxsw_sp_mr_route_action action; struct mlxsw_sp_mr_route_key key; u16 irif_index; u16 min_mtu; + void *priv; }; static struct mlxsw_afa_block * @@ -297,60 +288,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block) mlxsw_afa_block_destroy(afa_block); } -static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, - struct parman_item *parman_item, - struct mlxsw_sp_mr_route_key *key, - struct mlxsw_afa_block *afa_block) -{ - char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - - switch (key->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index, - key->vrid, - MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, - ntohl(key->group.addr4), - ntohl(key->group_mask.addr4), - ntohl(key->source.addr4), - ntohl(key->source_mask.addr4), - mlxsw_afa_block_first_set(afa_block)); - break; - case MLXSW_SP_L3_PROTO_IPV6: - mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, - key->vrid, - MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, - key->group.addr6, - key->group_mask.addr6, - key->source.addr6, - key->source_mask.addr6, - mlxsw_afa_block_first_set(afa_block)); - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); -} - -static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid, - struct mlxsw_sp_mr_route_key *key, - struct parman_item *parman_item) -{ - struct in6_addr zero_addr = IN6ADDR_ANY_INIT; - char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - - switch (key->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, - vrid, 0, 0, 0, 0, 0, 0, NULL); - break; - case MLXSW_SP_L3_PROTO_IPV6: - mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, - vrid, 0, 0, zero_addr, zero_addr, - zero_addr, zero_addr, NULL); - break; - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); -} - static int mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam_erif_list *erif_list, @@ -370,51 +307,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, return 0; } -static struct mlxsw_sp_mr_tcam_region * -mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam, - enum mlxsw_sp_l3proto proto) -{ - return &mr_tcam->tcam_regions[proto]; -} - -static int -mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam, - struct mlxsw_sp_mr_tcam_route *route, - enum mlxsw_sp_mr_route_prio prio) -{ - struct mlxsw_sp_mr_tcam_region *tcam_region; - int err; - - tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, - route->key.proto); - err = parman_item_add(tcam_region->parman, - &tcam_region->parman_prios[prio], - &route->parman_item); - if (err) - return err; - - route->parman_prio = &tcam_region->parman_prios[prio]; - return 0; -} - -static void -mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam, - struct mlxsw_sp_mr_tcam_route *route) -{ - struct mlxsw_sp_mr_tcam_region *tcam_region; - - tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, - route->key.proto); - - parman_item_remove(tcam_region->parman, - route->parman_prio, &route->parman_item); -} - static int mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv, struct mlxsw_sp_mr_route_params *route_params) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam *mr_tcam = priv; int err; @@ -448,22 +346,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, goto err_afa_block_create; } - /* Allocate place in the TCAM */ - err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route, - route_params->prio); - if (err) - goto err_parman_item_add; + route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL); + if (!route->priv) { + err = -ENOMEM; + goto err_route_priv_alloc; + } /* Write the route to the TCAM */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, route->afa_block); + err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv, + &route->key, route->afa_block, + route_params->prio); if (err) - goto err_route_replace; + goto err_route_create; return 0; -err_route_replace: - mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); -err_parman_item_add: +err_route_create: + kfree(route->priv); +err_route_priv_alloc: mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); err_afa_block_create: mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); @@ -476,12 +375,12 @@ err_counter_alloc: static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid, - &route->key, &route->parman_item); - mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); + ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key); + kfree(route->priv); mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); @@ -502,6 +401,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, enum mlxsw_sp_mr_route_action route_action) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_afa_block *afa_block; int err; @@ -516,8 +416,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(afa_block); /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err; @@ -534,6 +433,7 @@ err: static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, u16 min_mtu) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_afa_block *afa_block; int err; @@ -549,8 +449,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(afa_block); /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err; @@ -596,6 +495,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp, void *route_priv, u16 erif_index) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_erif_sublist *erif_sublist; struct mlxsw_sp_mr_tcam_erif_list erif_list; @@ -630,8 +530,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp, } /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err_route_write; @@ -653,6 +552,7 @@ static int mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, struct mlxsw_sp_mr_route_info *route_info) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam_erif_list erif_list; struct mlxsw_afa_block *afa_block; @@ -677,8 +577,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, } /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err_route_write; @@ -699,167 +598,36 @@ err_erif_populate: return err; } -#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16 -#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16 - -static int -mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE, - mr_tcam_region->rtar_key_type, - MLXSW_SP_MR_TCAM_REGION_BASE_COUNT); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static void -mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE, - mr_tcam_region->rtar_key_type, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv, - unsigned long new_count) -{ - struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - u64 max_tcam_rules; - - max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); - if (new_count > max_tcam_rules) - return -EINVAL; - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE, - mr_tcam_region->rtar_key_type, new_count); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static void mlxsw_sp_mr_tcam_region_parman_move(void *priv, - unsigned long from_index, - unsigned long to_index, - unsigned long count) -{ - struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rrcr_pl[MLXSW_REG_RRCR_LEN]; - - mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE, - from_index, count, - mr_tcam_region->rtar_key_type, to_index); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl); -} - -static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = { - .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT, - .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP, - .resize = mlxsw_sp_mr_tcam_region_parman_resize, - .move = mlxsw_sp_mr_tcam_region_parman_move, - .algo = PARMAN_ALGO_TYPE_LSORT, -}; - -static int -mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mr_tcam_region *mr_tcam_region, - enum mlxsw_reg_rtar_key_type rtar_key_type) -{ - struct parman_prio *parman_prios; - struct parman *parman; - int err; - int i; - - mr_tcam_region->rtar_key_type = rtar_key_type; - mr_tcam_region->mlxsw_sp = mlxsw_sp; - - err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region); - if (err) - return err; - - parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops, - mr_tcam_region); - if (!parman) { - err = -ENOMEM; - goto err_parman_create; - } - mr_tcam_region->parman = parman; - - parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, - sizeof(*parman_prios), GFP_KERNEL); - if (!parman_prios) { - err = -ENOMEM; - goto err_parman_prios_alloc; - } - mr_tcam_region->parman_prios = parman_prios; - - for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) - parman_prio_init(mr_tcam_region->parman, - &mr_tcam_region->parman_prios[i], i); - return 0; - -err_parman_prios_alloc: - parman_destroy(parman); -err_parman_create: - mlxsw_sp_mr_tcam_region_free(mr_tcam_region); - return err; -} - -static void -mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - int i; - - for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) - parman_prio_fini(&mr_tcam_region->parman_prios[i]); - kfree(mr_tcam_region->parman_prios); - parman_destroy(mr_tcam_region->parman); - mlxsw_sp_mr_tcam_region_free(mr_tcam_region); -} - static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - u32 rtar_key; int err; - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) || - !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES)) return -EIO; - rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; - err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - ®ion[MLXSW_SP_L3_PROTO_IPV4], - rtar_key); - if (err) - return err; + mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL); + if (!mr_tcam->priv) + return -ENOMEM; - rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; - err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - ®ion[MLXSW_SP_L3_PROTO_IPV6], - rtar_key); + err = ops->init(mlxsw_sp, mr_tcam->priv); if (err) - goto err_ipv6_region_init; - + goto err_init; return 0; -err_ipv6_region_init: - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); +err_init: + kfree(mr_tcam->priv); return err; } -static void mlxsw_sp_mr_tcam_fini(void *priv) +static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + ops->fini(mr_tcam->priv); + kfree(mr_tcam->priv); } const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 77b2adb29341..8d67f0123699 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -48,6 +48,7 @@ #include <linux/route.h> #include <linux/gcd.h> #include <linux/random.h> +#include <linux/if_macvlan.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> @@ -60,6 +61,7 @@ #include <net/ndisc.h> #include <net/ipv6.h> #include <net/fib_notifier.h> +#include <net/switchdev.h> #include "spectrum.h" #include "core.h" @@ -163,7 +165,9 @@ struct mlxsw_sp_rif_ops { const struct mlxsw_sp_rif_params *params); int (*configure)(struct mlxsw_sp_rif *rif); void (*deconfigure)(struct mlxsw_sp_rif *rif); - struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif); + struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack); + void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); @@ -342,10 +346,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif) mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); } -static struct mlxsw_sp_rif * -mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); - #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) struct mlxsw_sp_prefix_usage { @@ -1109,7 +1109,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, u32 tunnel_index; int err; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + 1, &tunnel_index); if (err) return err; @@ -1125,7 +1126,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, /* Unlink this node from the IPIP entry that it's the decap entry of. */ fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; - mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + 1, fib_entry->decap.tunnel_index); } static struct mlxsw_sp_fib_node * @@ -3165,8 +3167,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, * by the device and make sure the request can be satisfied. */ mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); - err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size, - &alloc_size); + err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + *p_adj_grp_size, &alloc_size); if (err) return err; /* It is possible the allocation results in more allocated @@ -3278,7 +3281,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* No valid allocation size available. */ goto set_trap; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + ecmp_size, &adj_index); if (err) { /* We ran out of KVD linear space, just set the * trap and let everything flow through kernel. @@ -3313,7 +3317,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, old_adj_index, old_ecmp_size); - mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + old_ecmp_size, old_adj_index); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; @@ -3335,7 +3340,8 @@ set_trap: if (err) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); if (old_adj_index_valid) - mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + nh_grp->ecmp_size, nh_grp->adj_index); } static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, @@ -5967,7 +5973,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } -static struct mlxsw_sp_rif * +struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) { @@ -6024,6 +6030,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, !list_empty(&inet6_dev->addr_list)) addr_list_empty = false; + /* macvlans do not have a RIF, but rather piggy back on the + * RIF of their lower device. + */ + if (netif_is_macvlan(dev) && addr_list_empty) + return true; + if (rif && addr_list_empty && !netif_is_l3_slave(rif->dev)) return true; @@ -6125,6 +6137,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) return rif->dev; } +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif) +{ + return rif->fid; +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params, @@ -6162,7 +6179,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, rif->ops = ops; if (ops->fid_get) { - fid = ops->fid_get(rif); + fid = ops->fid_get(rif, extack); if (IS_ERR(fid)) { err = PTR_ERR(fid); goto err_fid_get; @@ -6267,7 +6284,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, } /* FID was already created, just take a reference */ - fid = rif->ops->fid_get(rif); + fid = rif->ops->fid_get(rif, extack); err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); if (err) goto err_fid_port_vid_map; @@ -6432,6 +6449,123 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, return 0; } +static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac) +{ + u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp4, mask); +} + +static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac) +{ + u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp6, mask); +} + +static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + const u8 *mac, bool adding) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + u8 vrrp_id = adding ? mac[5] : 0; + int err; + + if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) && + !mlxsw_sp_rif_macvlan_is_vrrp6(mac)) + return 0; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + if (mlxsw_sp_rif_macvlan_is_vrrp4(mac)) + mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id); + else + mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev, + struct netlink_ext_ack *extack) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + int err; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + if (!rif) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), true); + if (err) + return err; + + err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, + macvlan_dev->dev_addr, true); + if (err) + goto err_rif_vrrp_add; + + /* Make sure the bridge driver does not have this MAC pointing at + * some other port. + */ + if (rif->ops->fdb_del) + rif->ops->fdb_del(rif, macvlan_dev->dev_addr); + + return 0; + +err_rif_vrrp_add: + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); + return err; +} + +void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + /* If we do not have a RIF, then we already took care of + * removing the macvlan's MAC during RIF deletion. + */ + if (!rif) + return; + mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr, + false); + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp; + + mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); + if (!mlxsw_sp) + return 0; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack); + case NETDEV_DOWN: + mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev); + break; + } + + return 0; +} + static int __mlxsw_sp_inetaddr_event(struct net_device *dev, unsigned long event, struct netlink_ext_ack *extack) @@ -6444,6 +6578,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev, return mlxsw_sp_inetaddr_bridge_event(dev, event, extack); else if (is_vlan_dev(dev)) return mlxsw_sp_inetaddr_vlan_event(dev, event, extack); + else if (netif_is_macvlan(dev)) + return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack); else return 0; } @@ -6684,7 +6820,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); int err = 0; - if (!mlxsw_sp) + /* We do not create a RIF for a macvlan, but only use it to + * direct more MAC addresses to the router. + */ + if (!mlxsw_sp || netif_is_macvlan(l3_dev)) return 0; switch (event) { @@ -6705,6 +6844,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, return err; } +static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data) +{ + struct mlxsw_sp_rif *rif = data; + + if (!netif_is_macvlan(dev)) + return 0; + + return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) +{ + if (!netif_is_macvlan_port(rif->dev)) + return 0; + + netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n"); + return netdev_walk_all_upper_dev_rcu(rif->dev, + __mlxsw_sp_rif_macvlan_flush, rif); +} + static struct mlxsw_sp_rif_subport * mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) { @@ -6771,11 +6931,13 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_rif_subport_op(rif, false); } static struct mlxsw_sp_fid * -mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); } @@ -6857,6 +7019,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@ -6865,19 +7028,49 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { - u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1; + u16 vid; + int err; + + if (is_vlan_dev(rif->dev)) { + vid = vlan_dev_vlan_id(rif->dev); + } else { + err = br_vlan_get_pvid(rif->dev, &vid); + if (err < 0 || !vid) { + NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); + return ERR_PTR(-EINVAL); + } + } return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); } +static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); + struct switchdev_notifier_fdb_info info; + struct net_device *br_dev; + struct net_device *dev; + + br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev; + dev = br_fdb_find_port(br_dev, mac, vid); + if (!dev) + return; + + info.addr = mac; + info.vid = vid; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = { .type = MLXSW_SP_RIF_TYPE_VLAN, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_vlan_configure, .deconfigure = mlxsw_sp_rif_vlan_deconfigure, .fid_get = mlxsw_sp_rif_vlan_fid_get, + .fdb_del = mlxsw_sp_rif_vlan_fdb_del, }; static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) @@ -6929,6 +7122,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@ -6937,17 +7131,33 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex); } +static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + struct switchdev_notifier_fdb_info info; + struct net_device *dev; + + dev = br_fdb_find_port(rif->dev, mac, 0); + if (!dev) + return; + + info.addr = mac; + info.vid = 0; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { .type = MLXSW_SP_RIF_TYPE_FID, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_fid_configure, .deconfigure = mlxsw_sp_rif_fid_deconfigure, .fid_get = mlxsw_sp_rif_fid_fid_get, + .fdb_del = mlxsw_sp_rif_fid_fdb_del, }; static struct mlxsw_sp_rif_ipip_lb * diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index a01edcf56797..52e25695625c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -66,6 +66,8 @@ struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; struct mlxsw_sp_ipip_entry; +struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); @@ -75,6 +77,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 3d187d88cc7c..e42d640cddab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -36,6 +36,7 @@ #include <linux/list.h> #include <net/arp.h> #include <net/gre.h> +#include <net/lag.h> #include <net/ndisc.h> #include <net/ip6_tunnel.h> @@ -254,7 +255,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev) struct list_head *iter; netdev_for_each_lower_dev(lag_dev, dev, iter) - if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev)) + if (netif_carrier_ok(dev) && + net_lag_port_dev_txable(dev) && + mlxsw_sp_port_dev_check(dev)) return dev; return NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index eea5666a86b2..da94e1eb9e16 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1135,6 +1135,39 @@ err_port_vlan_set: return err; } +static int +mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct mlxsw_sp_rif *rif; + struct mlxsw_sp_fid *fid; + u16 pvid; + u16 vid; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); + if (!rif) + return 0; + fid = mlxsw_sp_rif_fid(rif); + pvid = mlxsw_sp_fid_8021q_vid(fid); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { + if (vid != pvid) { + netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); + return -EBUSY; + } + } else { + if (vid == pvid) { + netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); + return -EBUSY; + } + } + } + + return 0; +} + static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) @@ -1146,8 +1179,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; - if (netif_is_bridge_master(orig_dev)) - return -EOPNOTSUPP; + if (netif_is_bridge_master(orig_dev)) { + int err = 0; + + if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && + br_vlan_enabled(orig_dev) && + switchdev_trans_ph_prepare(trans)) + err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, + orig_dev, vlan); + if (!err) + err = -EOPNOTSUPP; + return err; + } if (switchdev_trans_ph_prepare(trans)) return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 399e9d6993f7..eb437f59640d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -63,6 +63,7 @@ enum { MLXSW_TRAP_ID_LBERROR = 0x54, MLXSW_TRAP_ID_IPV4_OSPF = 0x55, MLXSW_TRAP_ID_IPV4_PIM = 0x58, + MLXSW_TRAP_ID_IPV4_VRRP = 0x59, MLXSW_TRAP_ID_RPF = 0x5C, MLXSW_TRAP_ID_IP2ME = 0x5F, MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, @@ -78,6 +79,7 @@ enum { MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, MLXSW_TRAP_ID_IPV6_PIM = 0x79, + MLXSW_TRAP_ID_IPV6_VRRP = 0x7A, MLXSW_TRAP_ID_IPV4_BGP = 0x88, MLXSW_TRAP_ID_IPV6_BGP = 0x89, MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index b72d1bd11296..ebbdfb908745 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port) */ static void port_set_link_speed(struct ksz_port *port) { - struct ksz_port_info *info; struct ksz_hw *hw = port->hw; u16 data; u16 cfg; @@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port) int p; for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { - info = &hw->port_info[p]; - port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data); port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index dd947e4dd3ce..e1747a490066 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -828,7 +828,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) } if (!mac_address_valid) - random_ether_addr(adapter->mac_address); + eth_random_addr(adapter->mac_address); lan743x_mac_set_address(adapter, adapter->mac_address); ether_addr_copy(netdev->dev_addr, adapter->mac_address); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 776a8a9be8e3..1a4f2bb48ead 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) return 0; } +static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) +{ + /* Select the VID to configure */ + ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid), + ANA_TABLES_VLANTIDX); + /* Set the vlan port members mask and issue a write command */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) | + ANA_TABLES_VLANACCESS_CMD_WRITE, + ANA_TABLES_VLANACCESS); + + return ocelot_vlant_wait_for_completion(ocelot); +} + +static void ocelot_vlan_mode(struct ocelot_port *port, + netdev_features_t features) +{ + struct ocelot *ocelot = port->ocelot; + u8 p = port->chip_port; + u32 val; + + /* Filtering */ + val = ocelot_read(ocelot, ANA_VLANMASK); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + val |= BIT(p); + else + val &= ~BIT(p); + ocelot_write(ocelot, val, ANA_VLANMASK); +} + +static void ocelot_vlan_port_apply(struct ocelot *ocelot, + struct ocelot_port *port) +{ + u32 val; + + /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to clasify for untagged frames (may be zero) */ + val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid); + if (port->vlan_aware) + val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_VLAN_CFG_VLAN_VID_M | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, + ANA_PORT_VLAN_CFG, port->chip_port); + + /* Drop frames with multicast source address */ + val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA; + if (port->vlan_aware && !port->vid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */ + val = REW_TAG_CFG_TAG_TPID_CFG(0); + + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CFG_TAG_CFG(1); + else + /* Tag all frames */ + val |= REW_TAG_CFG_TAG_CFG(3); + } + ocelot_rmw_gix(ocelot, val, + REW_TAG_CFG_TAG_TPID_CFG_M | + REW_TAG_CFG_TAG_CFG_M, + REW_TAG_CFG, port->chip_port); + + /* Set default VLAN and tag type to 8021Q. */ + val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) | + REW_PORT_VLAN_CFG_PORT_VID(port->vid); + ocelot_rmw_gix(ocelot, val, + REW_PORT_VLAN_CFG_PORT_TPID_M | + REW_PORT_VLAN_CFG_PORT_VID_M, + REW_PORT_VLAN_CFG, port->chip_port); +} + +static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* Add the port MAC address to with the right VLAN information */ + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, + ENTRYTYPE_LOCKED); + + /* Make the port a member of the VLAN */ + ocelot->vlan_mask[vid] |= BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + /* Untagged egress vlan clasification */ + if (untagged) + port->vid = vid; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + +static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + /* Del the port MAC address to with the right VLAN information */ + ocelot_mact_forget(ocelot, dev->dev_addr, vid); + + /* Stop the port from being a member of the vlan */ + ocelot->vlan_mask[vid] &= ~BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Ingress */ + if (port->pvid == vid) + port->pvid = 0; + + /* Egress */ + if (port->vid == vid) + port->vid = 0; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + static void ocelot_vlan_init(struct ocelot *ocelot) { + u16 port, vid; + /* Clear VLAN table, by default all ports are members of all VLANs */ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, ANA_TABLES_VLANACCESS); ocelot_vlant_wait_for_completion(ocelot); + + /* Configure the port VLAN memberships */ + for (vid = 1; vid < VLAN_N_VID; vid++) { + ocelot->vlan_mask[vid] = 0; + ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + } + + /* Because VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, but + * we can't rely on it since module may be not loaded. + */ + ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0); + ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]); + + /* Configure the CPU port to be VLAN aware */ + ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), + ANA_PORT_VLAN_CFG, ocelot->num_phys_ports); + + /* Set vlan ingress filter mask to all ports but the CPU port by + * default. + */ + ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); + ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port); + } } /* Watermark encode @@ -539,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; + if (!vid) { + if (!port->vlan_aware) + /* If the bridge is not VLAN aware and no VID was + * provided, set it to pvid to ensure the MAC entry + * matches incoming untagged packets + */ + vid = port->pvid; + else + /* If the bridge is VLAN aware a VID must be provided as + * otherwise the learnt entry wouldn't match any frame. + */ + return -EINVAL; + } + return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, ENTRYTYPE_NORMAL); } @@ -690,6 +883,30 @@ end: return ret; } +static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_add(dev, vid, false, true); +} + +static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_del(dev, vid); +} + +static int ocelot_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct ocelot_port *port = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + ocelot_vlan_mode(port, features); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, @@ -701,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_fdb_add = ocelot_fdb_add, .ndo_fdb_del = ocelot_fdb_del, .ndo_fdb_dump = ocelot_fdb_dump, + .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, + .ndo_set_features = ocelot_set_features, }; static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -780,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = { .get_strings = ocelot_get_strings, .get_ethtool_stats = ocelot_get_ethtool_stats, .get_sset_count = ocelot_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int ocelot_port_attr_get(struct net_device *dev, @@ -914,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev, case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + ocelot_port->vlan_aware = attr->u.vlan_filtering; + ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port); + break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled); break; @@ -925,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev, return err; } +static int ocelot_port_obj_add_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_add(dev, vid, + vlan->flags & BRIDGE_VLAN_INFO_PVID, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (ret) + return ret; + } + + return 0; +} + +static int ocelot_port_vlan_del_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_del(dev, vid); + + if (ret) + return ret; + } + + return 0; +} + static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, const unsigned char *addr, u16 vid) @@ -951,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, bool new = false; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { @@ -992,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, u16 vid = mdb->vid; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) @@ -1024,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_obj_add_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), trans); @@ -1041,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_vlan_del_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); break; @@ -1086,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, if (!ocelot->bridge_mask) ocelot->hw_bridge_dev = NULL; + + /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */ + ocelot_port->vlan_aware = 0; + ocelot_port->pvid = 0; + ocelot_port->vid = 0; +} + +static void ocelot_set_aggr_pgids(struct ocelot *ocelot) +{ + int i, port, lag; + + /* Reset destination and aggregation PGIDS */ + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + + for (i = PGID_AGGR; i < PGID_SRC; i++) + ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), + ANA_PGID_PGID, i); + + /* Now, set PGIDs for each LAG */ + for (lag = 0; lag < ocelot->num_phys_ports; lag++) { + unsigned long bond_mask; + int aggr_count = 0; + u8 aggr_idx[16]; + + bond_mask = ocelot->lags[lag]; + if (!bond_mask) + continue; + + for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { + // Destination mask + ocelot_write_rix(ocelot, bond_mask, + ANA_PGID_PGID, port); + aggr_idx[aggr_count] = port; + aggr_count++; + } + + for (i = PGID_AGGR; i < PGID_SRC; i++) { + u32 ac; + + ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); + ac &= ~bond_mask; + ac |= BIT(aggr_idx[i % aggr_count]); + ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); + } + } +} + +static void ocelot_setup_lag(struct ocelot *ocelot, int lag) +{ + unsigned long bond_mask = ocelot->lags[lag]; + unsigned int p; + + for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) { + u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + + /* Use lag port as logical port for port i */ + ocelot_write_gix(ocelot, port_cfg | + ANA_PORT_PORT_CFG_PORTID_VAL(lag), + ANA_PORT_PORT_CFG, p); + } +} + +static int ocelot_port_lag_join(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + int lag, lp; + struct net_device *ndev; + u32 bond_mask = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(bond, ndev) { + struct ocelot_port *port = netdev_priv(ndev); + + bond_mask |= BIT(port->chip_port); + } + rcu_read_unlock(); + + lp = __ffs(bond_mask); + + /* If the new port is the lowest one, use it as the logical port from + * now on + */ + if (p == lp) { + lag = p; + ocelot->lags[p] = bond_mask; + bond_mask &= ~BIT(p); + if (bond_mask) { + lp = __ffs(bond_mask); + ocelot->lags[lp] = 0; + } + } else { + lag = lp; + ocelot->lags[lp] |= BIT(p); + } + + ocelot_setup_lag(ocelot, lag); + ocelot_set_aggr_pgids(ocelot); + + return 0; +} + +static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + u32 port_cfg; + int i; + + /* Remove port from any lag */ + for (i = 0; i < ocelot->num_phys_ports; i++) + ocelot->lags[i] &= ~BIT(ocelot_port->chip_port); + + /* if it was the logical port of the lag, move the lag config to the + * next port + */ + if (ocelot->lags[p]) { + int n = __ffs(ocelot->lags[p]); + + ocelot->lags[n] = ocelot->lags[p]; + ocelot->lags[p] = 0; + + ocelot_setup_lag(ocelot, n); + } + + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p), + ANA_PORT_PORT_CFG, p); + + ocelot_set_aggr_pgids(ocelot); } /* Checks if the net_device instance given to us originate from our driver. */ @@ -1113,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev, else ocelot_port_bridge_leave(ocelot_port, info->upper_dev); + + ocelot_vlan_port_apply(ocelot_port->ocelot, + ocelot_port); + } + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = ocelot_port_lag_join(ocelot_port, + info->upper_dev); + else + ocelot_port_lag_leave(ocelot_port, + info->upper_dev); } break; default: @@ -1129,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (event == NETDEV_PRECHANGEUPPER && + netif_is_lag_master(info->upper_dev)) { + struct netdev_lag_upper_info *lag_upper_info = info->upper_info; + struct netlink_ext_ack *extack; + + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + extack = netdev_notifier_info_to_extack(&info->info); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); + + ret = -EINVAL; + goto notify; + } + } + if (netif_is_lag_master(dev)) { struct net_device *slave; struct list_head *iter; @@ -1176,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->ethtool_ops = &ocelot_ethtool_ops; dev->switchdev_ops = &ocelot_port_switchdev_ops; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); dev->dev_addr[ETH_ALEN - 1] += port; ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, @@ -1187,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, goto err_register_netdev; } + /* Basic L2 initialization */ + ocelot_vlan_port_apply(ocelot, ocelot_port); + return 0; err_register_netdev: @@ -1201,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot) int i, cpu = ocelot->num_phys_ports; char queue_name[32]; + ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, + sizeof(u32), GFP_KERNEL); + if (!ocelot->lags) + return -ENOMEM; + ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 097bd12a10d4..616bec30dfa3 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -493,7 +493,7 @@ struct ocelot { u8 num_cpu_ports; struct ocelot_port **ports; - u16 lags[16]; + u32 *lags; /* Keep track of the vlan port masks */ u32 vlan_mask[VLAN_N_VID]; diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 18df7d934e81..26bb3b18f3be 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info) info->port = (ifh[2] & GENMASK(14, 11)) >> 11; info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20; - info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16; + info->tag_type = (ifh[3] & BIT(16)) >> 16; info->vid = ifh[3] & GENMASK(11, 0); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 8a92088df0d7..1d9e36835404 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -34,10 +34,11 @@ #define pr_fmt(fmt) "NFP net bpf: " fmt #include <linux/bug.h> -#include <linux/kernel.h> #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/kernel.h> #include <linux/pkt_cls.h> +#include <linux/reciprocal_div.h> #include <linux/unistd.h> #include "main.h" @@ -416,6 +417,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst, } static void +__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, + enum mul_type type, enum mul_step step, u16 breg, bool swap, + bool wr_both, bool dst_lmextn, bool src_lmextn) +{ + u64 insn; + + insn = OP_MUL_BASE | + FIELD_PREP(OP_MUL_A_SRC, areg) | + FIELD_PREP(OP_MUL_B_SRC, breg) | + FIELD_PREP(OP_MUL_STEP, step) | + FIELD_PREP(OP_MUL_DST_AB, dst_ab) | + FIELD_PREP(OP_MUL_SW, swap) | + FIELD_PREP(OP_MUL_TYPE, type) | + FIELD_PREP(OP_MUL_WR_AB, wr_both) | + FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void +emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, + enum mul_step step, swreg rreg) +{ + struct nfp_insn_ur_regs reg; + u16 areg; + int err; + + if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { + nfp_prog->error = -EINVAL; + return; + } + + if (step == MUL_LAST || step == MUL_LAST_2) { + /* When type is step and step Number is LAST or LAST2, left + * source is used as destination. + */ + err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); + areg = reg.dst; + } else { + err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); + areg = reg.areg; + } + + if (err) { + nfp_prog->error = err; + return; + } + + __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, + reg.wr_both, reg.dst_lmextn, reg.src_lmextn); +} + +static void __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, bool zero, bool swap, bool wr_both, @@ -670,7 +725,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) xfer_num = round_up(len, 4) / 4; if (src_40bit_addr) - addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, + addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, &off); /* Setup PREV_ALU fields to override memory read length. */ @@ -1380,6 +1435,133 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) SHF_SC_R_ROT, 16); } +static void +wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, + swreg rreg, bool gen_high_half) +{ + emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); + emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); + if (gen_high_half) + emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, + reg_none()); + else + wrp_immed(nfp_prog, dst_hi, 0); +} + +static void +wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, + swreg rreg) +{ + emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); + emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); +} + +static int +wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + bool gen_high_half, bool ropnd_from_reg) +{ + swreg multiplier, multiplicand, dst_hi, dst_lo; + const struct bpf_insn *insn = &meta->insn; + u32 lopnd_max, ropnd_max; + u8 dst_reg; + + dst_reg = insn->dst_reg; + multiplicand = reg_a(dst_reg * 2); + dst_hi = reg_both(dst_reg * 2 + 1); + dst_lo = reg_both(dst_reg * 2); + lopnd_max = meta->umax_dst; + if (ropnd_from_reg) { + multiplier = reg_b(insn->src_reg * 2); + ropnd_max = meta->umax_src; + } else { + u32 imm = insn->imm; + + multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); + ropnd_max = imm; + } + if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) + wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, + gen_high_half); + else + wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); + + return 0; +} + +static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) +{ + swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); + struct reciprocal_value_adv rvalue; + u8 pre_shift, exp; + swreg magic; + + if (imm > U32_MAX) { + wrp_immed(nfp_prog, dst_both, 0); + return 0; + } + + /* NOTE: because we are using "reciprocal_value_adv" which doesn't + * support "divisor > (1u << 31)", we need to JIT separate NFP sequence + * to handle such case which actually equals to the result of unsigned + * comparison "dst >= imm" which could be calculated using the following + * NFP sequence: + * + * alu[--, dst, -, imm] + * immed[imm, 0] + * alu[dst, imm, +carry, 0] + * + */ + if (imm > 1U << 31) { + swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); + + emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); + wrp_immed(nfp_prog, imm_a(nfp_prog), 0); + emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, + reg_imm(0)); + return 0; + } + + rvalue = reciprocal_value_adv(imm, 32); + exp = rvalue.exp; + if (rvalue.is_wide_m && !(imm & 1)) { + pre_shift = fls(imm & -imm) - 1; + rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); + } else { + pre_shift = 0; + } + magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); + if (imm == 1U << exp) { + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, exp); + } else if (rvalue.is_wide_m) { + wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, + magic, true); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, + imm_b(nfp_prog)); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, 1); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, + imm_b(nfp_prog)); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, rvalue.sh - 1); + } else { + if (pre_shift) + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, + dst_b, SHF_SC_R_SHF, pre_shift); + wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, + dst_b, SHF_SC_R_SHF, rvalue.sh); + } + + return 0; +} + static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); @@ -1684,6 +1866,31 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, true, true); +} + +static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, true, false); +} + +static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + + return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); +} + +static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + /* NOTE: verifier hook has rejected cases for which verifier doesn't + * know whether the source operand is constant or not. + */ + return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); +} + static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -1772,8 +1979,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __shl_imm64(nfp_prog, dst, umin); @@ -1881,8 +2088,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __shr_imm64(nfp_prog, dst, umin); @@ -1995,8 +2202,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __ashr_imm64(nfp_prog, dst, umin); @@ -2097,6 +2304,26 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); } +static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, false, true); +} + +static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, false, false); +} + +static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return div_reg64(nfp_prog, meta); +} + +static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return div_imm64(nfp_prog, meta); +} + static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { u8 dst = meta->insn.dst_reg * 2; @@ -2848,6 +3075,10 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, + [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, + [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, + [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, + [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, [BPF_ALU64 | BPF_NEG] = neg_reg64, [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, @@ -2867,6 +3098,10 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_ADD | BPF_K] = add_imm, [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, + [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, + [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, + [BPF_ALU | BPF_DIV | BPF_X] = div_reg, + [BPF_ALU | BPF_DIV | BPF_K] = div_imm, [BPF_ALU | BPF_NEG] = neg_reg, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, [BPF_ALU | BPF_END | BPF_X] = end_reg32, @@ -3299,7 +3534,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) return false; - if (ld_meta->ptr.type != PTR_TO_PACKET) + if (ld_meta->ptr.type != PTR_TO_PACKET && + ld_meta->ptr.type != PTR_TO_MAP_VALUE) return false; if (st_meta->ptr.type != PTR_TO_PACKET) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 40216d56dddc..b95b94d008cf 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) { bool running, xdp_running; - int ret; if (!nfp_net_ebpf_capable(nn)) return -EINVAL; running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; - xdp_running = running && nn->dp.bpf_offload_xdp; + xdp_running = running && nn->xdp_hw.prog; if (!prog && !xdp_running) return 0; if (prog && running && !xdp_running) return -EBUSY; - ret = nfp_net_bpf_offload(nn, prog, running, extack); - /* Stop offload if replace not possible */ - if (ret) - return ret; - - nn->dp.bpf_offload_xdp = !!prog; - return ret; + return nfp_net_bpf_offload(nn, prog, running, extack); } static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) @@ -209,7 +202,7 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_bpf_setup_tc_block_cb, - nn, nn); + nn, nn, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_bpf_setup_tc_block_cb, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 654fe7823e5e..9845c1a2d4c2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -263,8 +263,10 @@ struct nfp_bpf_reg_state { * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions - * @umin: copy of core verifier umin_value. - * @umax: copy of core verifier umax_value. + * @umin_src: copy of core verifier umin_value for src opearnd. + * @umax_src: copy of core verifier umax_value for src operand. + * @umin_dst: copy of core verifier umin_value for dst opearnd. + * @umax_dst: copy of core verifier umax_value for dst operand. * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags @@ -300,12 +302,15 @@ struct nfp_insn_meta { struct bpf_reg_state arg1; struct nfp_bpf_reg_state arg2; }; - /* We are interested in range info for some operands, - * for example, the shift amount. + /* We are interested in range info for operands of ALU + * operations. For example, shift amount, multiplicand and + * multiplier etc. */ struct { - u64 umin; - u64 umax; + u64 umin_src; + u64 umax_src; + u64 umin_dst; + u64 umax_dst; }; }; unsigned int off; @@ -339,6 +344,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) return BPF_MODE(meta->insn.code); } +static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta) +{ + return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU; +} + static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) { return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); @@ -384,23 +394,14 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); } -static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta) +static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) { - u8 code = meta->insn.code; - bool is_alu, is_shift; - u8 opclass, opcode; - - opclass = BPF_CLASS(code); - is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU; - if (!is_alu) - return false; - - opcode = BPF_OP(code); - is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH; - if (!is_shift) - return false; + return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL; +} - return BPF_SRC(code) == BPF_X; +static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) +{ + return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; } /** diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 7eae4c0266f8..78f44c4d95b4 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -190,8 +190,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, meta->insn = prog[i]; meta->n = i; - if (is_mbpf_indir_shift(meta)) - meta->umin = U64_MAX; + if (is_mbpf_alu(meta)) { + meta->umin_src = U64_MAX; + meta->umin_dst = U64_MAX; + } list_add_tail(&meta->l, &nfp_prog->insns); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 4bfeba7b21b2..49ba0d645d36 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -517,6 +517,82 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } static int +nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *sreg = + cur_regs(env) + meta->insn.src_reg; + const struct bpf_reg_state *dreg = + cur_regs(env) + meta->insn.dst_reg; + + meta->umin_src = min(meta->umin_src, sreg->umin_value); + meta->umax_src = max(meta->umax_src, sreg->umax_value); + meta->umin_dst = min(meta->umin_dst, dreg->umin_value); + meta->umax_dst = max(meta->umax_dst, dreg->umax_value); + + /* NFP supports u16 and u32 multiplication. + * + * For ALU64, if either operand is beyond u32's value range, we reject + * it. One thing to note, if the source operand is BPF_K, then we need + * to check "imm" field directly, and we'd reject it if it is negative. + * Because for ALU64, "imm" (with s32 type) is expected to be sign + * extended to s64 which NFP mul doesn't support. + * + * For ALU32, it is fine for "imm" be negative though, because the + * result is 32-bits and there is no difference on the low halve of + * the result for signed/unsigned mul, so we will get correct result. + */ + if (is_mbpf_mul(meta)) { + if (meta->umax_dst > U32_MAX) { + pr_vlog(env, "multiplier is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) { + pr_vlog(env, "multiplicand is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_class(meta) == BPF_ALU64 && + mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { + pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n"); + return -EINVAL; + } + } + + /* NFP doesn't have divide instructions, we support divide by constant + * through reciprocal multiplication. Given NFP support multiplication + * no bigger than u32, we'd require divisor and dividend no bigger than + * that as well. + * + * Also eBPF doesn't support signed divide and has enforced this on C + * language level by failing compilation. However LLVM assembler hasn't + * enforced this, so it is possible for negative constant to leak in as + * a BPF_K operand through assembly code, we reject such cases as well. + */ + if (is_mbpf_div(meta)) { + if (meta->umax_dst > U32_MAX) { + pr_vlog(env, "dividend is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_src(meta) == BPF_X) { + if (meta->umin_src != meta->umax_src) { + pr_vlog(env, "divisor is not constant\n"); + return -EINVAL; + } + if (meta->umax_src > U32_MAX) { + pr_vlog(env, "divisor is not within u32 value range\n"); + return -EINVAL; + } + } + if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { + pr_vlog(env, "divide by negative constant is not supported\n"); + return -EINVAL; + } + } + + return 0; +} + +static int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; @@ -551,13 +627,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) if (is_mbpf_xadd(meta)) return nfp_bpf_check_xadd(nfp_prog, meta, env); - if (is_mbpf_indir_shift(meta)) { - const struct bpf_reg_state *sreg = - cur_regs(env) + meta->insn.src_reg; - - meta->umin = min(meta->umin, sreg->umin_value); - meta->umax = max(meta->umax, sreg->umax_value); - } + if (is_mbpf_alu(meta)) + return nfp_bpf_check_alu(nfp_prog, meta, env); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 4a6d2db75071..e56b815a8dc6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -34,6 +34,7 @@ #include <linux/bitfield.h> #include <net/pkt_cls.h> #include <net/switchdev.h> +#include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_pedit.h> @@ -44,6 +45,8 @@ #include "main.h" #include "../nfp_net_repr.h" +#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (TUNNEL_CSUM | TUNNEL_KEY) + static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); @@ -235,9 +238,12 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); u32 tmp_set_ip_tun_type_index = 0; + struct flowi4 flow = {}; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + struct rtable *rt; struct net *net; + int err; if (ip_tun->options_len) return -EOPNOTSUPP; @@ -254,7 +260,28 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; - set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + + /* Do a route lookup to determine ttl - if fails then use default. + * Note that CONFIG_INET is a requirement of CONFIG_NET_SWITCHDEV so + * must be defined here. + */ + flow.daddr = ip_tun->key.u.ipv4.dst; + flow.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(net, &flow); + err = PTR_ERR_OR_ZERO(rt); + if (!err) { + set_tun->ttl = ip4_dst_hoplimit(&rt->dst); + ip_rt_put(rt); + } else { + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + } + + set_tun->tos = ip_tun->key.tos; + + if (!(ip_tun->key.tun_flags & TUNNEL_KEY) || + ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) + return -EOPNOTSUPP; + set_tun->tun_flags = ip_tun->key.tun_flags; /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; @@ -398,8 +425,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, return 0; } +static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) +{ + switch (ip_proto) { + case 0: + /* Filter doesn't force proto match, + * both TCP and UDP will be updated if encountered + */ + return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; + case IPPROTO_TCP: + return TCA_CSUM_UPDATE_FLAG_TCP; + case IPPROTO_UDP: + return TCA_CSUM_UPDATE_FLAG_UDP; + default: + /* All other protocols will be ignored by FW */ + return 0; + } +} + static int -nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) +nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, + char *nfp_action, int *a_len, u32 *csum_updated) { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ip4_addrs set_ip_addr; @@ -409,6 +455,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) int idx, nkeys, err; size_t act_size; u32 offset, cmd; + u8 ip_proto = 0; memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); memset(&set_ip6_src, 0, sizeof(set_ip6_src)); @@ -451,6 +498,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) return err; } + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *basic; + + basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + ip_proto = basic->ip_proto; + } + if (set_eth.head.len_lw) { act_size = sizeof(set_eth); memcpy(nfp_action, &set_eth, act_size); @@ -459,6 +515,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) act_size = sizeof(set_ip_addr); memcpy(nfp_action, &set_ip_addr, act_size); *a_len += act_size; + + /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ + *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | + nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. @@ -471,18 +531,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw) { act_size = sizeof(set_ip6_dst); memcpy(nfp_action, &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_src.head.len_lw) { act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_tport.head.len_lw) { act_size = sizeof(set_tport); memcpy(nfp_action, &set_tport, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } return 0; @@ -493,12 +565,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_output *output; int err, prelag_size; + /* If csum_updated has not been reset by now, it means HW will + * incorrectly update csums when they are not requested. + */ + if (*csum_updated) + return -EOPNOTSUPP; + if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -529,10 +607,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, static int nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, + struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; @@ -545,14 +624,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, } else if (is_tcf_mirred_egress_redirect(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; } else if (is_tcf_mirred_egress_mirror(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; @@ -602,8 +681,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, /* Tunnel decap is handled by default so accept action. */ return 0; } else if (is_tcf_pedit(a)) { - if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len)) + if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len], + a_len, csum_updated)) + return -EOPNOTSUPP; + } else if (is_tcf_csum(a)) { + /* csum action requests recalc of something we have not fixed */ + if (tcf_csum_update_flags(a) & ~*csum_updated) return -EOPNOTSUPP; + /* If we will correctly fix the csum we can remove it from the + * csum update list. Which will later be used to check support. + */ + *csum_updated &= ~tcf_csum_update_flags(a); } else { /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; @@ -620,6 +708,7 @@ int nfp_flower_compile_action(struct nfp_app *app, int act_len, act_cnt, err, tun_out_cnt, out_cnt; enum nfp_flower_tun_type tun_type; const struct tc_action *a; + u32 csum_updated = 0; LIST_HEAD(actions); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -632,8 +721,9 @@ int nfp_flower_compile_action(struct nfp_app *app, tcf_exts_to_list(flow->exts, &actions); list_for_each_entry(a, &actions, list) { - err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev, - &tun_type, &tun_out_cnt, &out_cnt); + err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, + netdev, &tun_type, &tun_out_cnt, + &out_cnt, &csum_updated); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4a7f3510a296..15f1eacd76b6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -203,9 +203,9 @@ struct nfp_fl_set_ipv4_udp_tun { __be16 reserved; __be64 tun_id __packed; __be32 tun_type_index; - __be16 reserved2; + __be16 tun_flags; u8 ttl; - u8 reserved3; + u8 tos; __be32 extra[2]; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 0c4c957717ea..bf10598f66ae 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, if (lag_upper_info && lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH || - (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && - lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) { + (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) { can_offload = false; nfp_flower_cmsg_warn(priv->app, "Unable to offload tx_type %u hash %u\n", diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index bbe5764d26cb..ef2114d13387 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -73,7 +73,7 @@ struct nfp_app; struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; - struct timespec64 *last_used; + ktime_t *last_used; u8 init_unallocated; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 93fb809f50d1..c098730544b7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) { struct nfp_flower_priv *priv = app->priv; struct circ_buf *ring; - struct timespec64 now; ring = &priv->mask_ids.mask_id_free_list; /* Checking if buffer is full. */ @@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) % (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS); - getnstimeofday64(&now); - priv->mask_ids.last_used[mask_id] = now; + priv->mask_ids.last_used[mask_id] = ktime_get(); return 0; } @@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) { struct nfp_flower_priv *priv = app->priv; - struct timespec64 delta, now; + ktime_t reuse_timeout; struct circ_buf *ring; u8 temp_id, freed_id; @@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS); *mask_id = temp_id; - getnstimeofday64(&now); - delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]); + reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id], + NFP_FL_MASK_REUSE_TIME_NS); - if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS) + if (ktime_before(ktime_get(), reuse_timeout)) goto err_not_found; memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 525057bee0ed..43b9bf12b174 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -584,9 +584,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, return nfp_flower_del_offload(app, netdev, flower, egress); case TC_CLSFLOWER_STATS: return nfp_flower_get_stats(app, netdev, flower, egress); + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, @@ -638,7 +638,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_flower_setup_tc_block_cb, - repr, repr); + repr, repr, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_flower_setup_tc_block_cb, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index f6677bc9875a..cdc4e065f6f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -426,4 +426,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset) return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR; } +enum mul_type { + MUL_TYPE_START = 0x00, + MUL_TYPE_STEP_24x8 = 0x01, + MUL_TYPE_STEP_16x16 = 0x02, + MUL_TYPE_STEP_32x32 = 0x03, +}; + +enum mul_step { + MUL_STEP_1 = 0x00, + MUL_STEP_NONE = MUL_STEP_1, + MUL_STEP_2 = 0x01, + MUL_STEP_3 = 0x02, + MUL_STEP_4 = 0x03, + MUL_LAST = 0x04, + MUL_LAST_2 = 0x05, +}; + +#define OP_MUL_BASE 0x0f800000000ULL +#define OP_MUL_A_SRC 0x000000003ffULL +#define OP_MUL_B_SRC 0x000000ffc00ULL +#define OP_MUL_STEP 0x00000700000ULL +#define OP_MUL_DST_AB 0x00000800000ULL +#define OP_MUL_SW 0x00040000000ULL +#define OP_MUL_TYPE 0x00180000000ULL +#define OP_MUL_WR_AB 0x20000000000ULL +#define OP_MUL_SRC_LMEXTN 0x40000000000ULL +#define OP_MUL_DST_LMEXTN 0x80000000000ULL + #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 2a71a9ffd095..8970ec981e11 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -485,7 +485,6 @@ struct nfp_stat_pair { * @dev: Backpointer to struct device * @netdev: Backpointer to net_device structure * @is_vf: Is the driver attached to a VF? - * @bpf_offload_xdp: Offloaded BPF program is XDP * @chained_metadata_format: Firemware will use new metadata format * @rx_dma_dir: Mapping direction for RX buffers * @rx_dma_off: Offset at which DMA packets (for XDP headroom) @@ -510,7 +509,6 @@ struct nfp_net_dp { struct net_device *netdev; u8 is_vf:1; - u8 bpf_offload_xdp:1; u8 chained_metadata_format:1; u8 rx_dma_dir; @@ -553,8 +551,8 @@ struct nfp_net_dp { * @rss_cfg: RSS configuration * @rss_key: RSS secret key * @rss_itbl: RSS indirection table - * @xdp_flags: Flags with which XDP prog was loaded - * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes) + * @xdp: Information about the driver XDP program + * @xdp_hw: Information about the HW XDP program * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware @@ -610,8 +608,8 @@ struct nfp_net { u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; - u32 xdp_flags; - struct bpf_prog *xdp_prog; + struct xdp_attachment_info xdp; + struct xdp_attachment_info xdp_hw; unsigned int max_tx_rings; unsigned int max_rx_rings; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d4c27f849f9b..a712e83c3f0f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -945,11 +945,12 @@ err_free: /** * nfp_net_tx_complete() - Handled completed TX packets - * @tx_ring: TX ring structure + * @tx_ring: TX ring structure + * @budget: NAPI budget (only used as bool to determine if in NAPI context) * * Return: Number of completed TX descriptors */ -static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) +static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; @@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) /* check for last gather fragment */ if (fidx == nr_frags - 1) - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); tx_ring->txbufs[idx].dma_addr = 0; tx_ring->txbufs[idx].skb = NULL; @@ -1709,8 +1710,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) } } - if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && - dp->bpf_offload_xdp) && !meta.portid) { + if (xdp_prog && !meta.portid) { void *orig_data = rxbuf->frag + pkt_off; unsigned int dma_off; int act; @@ -1828,7 +1828,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) unsigned int pkts_polled = 0; if (r_vec->tx_ring) - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, budget); if (r_vec->rx_ring) pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); @@ -2062,7 +2062,7 @@ static void nfp_ctrl_poll(unsigned long arg) struct nfp_net_r_vector *r_vec = (void *)arg; spin_lock_bh(&r_vec->lock); - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); spin_unlock_bh(&r_vec->lock); @@ -3115,6 +3115,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void nfp_net_netpoll(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + /* nfp_net's NAPIs are statically allocated so even if there is a race + * with reconfig path this will simply try to schedule some disabled + * NAPI instances. + */ + for (i = 0; i < nn->dp.num_stack_tx_rings; i++) + napi_schedule_irqoff(&nn->r_vecs[i].napi); +} +#endif + static void nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -3377,14 +3392,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev, nfp_net_set_vxlan_port(nn, idx, 0); } -static int -nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, - struct netlink_ext_ack *extack) +static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf) { + struct bpf_prog *prog = bpf->prog; struct nfp_net_dp *dp; + int err; + + if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) + return -EBUSY; if (!prog == !nn->dp.xdp_prog) { WRITE_ONCE(nn->dp.xdp_prog, prog); + xdp_attachment_setup(&nn->xdp, bpf); return 0; } @@ -3398,38 +3417,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ - return nfp_net_ring_reconfig(nn, dp, extack); + err = nfp_net_ring_reconfig(nn, dp, bpf->extack); + if (err) + return err; + + xdp_attachment_setup(&nn->xdp, bpf); + return 0; } -static int -nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags, - struct netlink_ext_ack *extack) +static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf) { - struct bpf_prog *drv_prog, *offload_prog; int err; - if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES) + if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf)) return -EBUSY; - /* Load both when no flags set to allow easy activation of driver path - * when program is replaced by one which can't be offloaded. - */ - drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog; - offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog; - - err = nfp_net_xdp_setup_drv(nn, drv_prog, extack); + err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack); if (err) return err; - err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack); - if (err && flags & XDP_FLAGS_HW_MODE) - return err; - - if (nn->xdp_prog) - bpf_prog_put(nn->xdp_prog); - nn->xdp_prog = prog; - nn->xdp_flags = flags; - + xdp_attachment_setup(&nn->xdp_hw, bpf); return 0; } @@ -3439,16 +3446,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: + return nfp_net_xdp_setup_drv(nn, xdp); case XDP_SETUP_PROG_HW: - return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags, - xdp->extack); + return nfp_net_xdp_setup_hw(nn, xdp); case XDP_QUERY_PROG: - xdp->prog_attached = !!nn->xdp_prog; - if (nn->dp.bpf_offload_xdp) - xdp->prog_attached = XDP_ATTACHED_HW; - xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; - xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; - return 0; + return xdp_attachment_query(&nn->xdp, xdp); + case XDP_QUERY_PROG_HW: + return xdp_attachment_query(&nn->xdp_hw, xdp); default: return nfp_app_bpf(nn->app, nn, xdp); } @@ -3482,6 +3486,9 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nfp_net_netpoll, +#endif .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 26d1cc4e2906..6a79c8e4a7a4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - struct nfp_app *app; - - app = nfp_app_from_netdev(netdev); - if (!app) - return; + struct nfp_app *app = nfp_app_from_netdev(netdev); + strlcpy(drvinfo->bus_info, pci_name(app->pdev), + sizeof(drvinfo->bus_info)); nfp_get_drvinfo(app, app->pdev, "*", drvinfo); } @@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS; + return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS; } static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) @@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) struct nfp_net *nn = netdev_priv(netdev); int i; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_busy", i); @@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) u64 tmp[NN_RVEC_GATHER_STATS]; unsigned int i, j; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { unsigned int start; do { @@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) return data; } -static unsigned int -nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings) +static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs) { - return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2; + return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4; } static u8 * -nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, - unsigned int tx_rings, bool repr) +nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr) { int swap_off, i; @@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) data = nfp_pr_et(data, nfp_net_et_stats[i].name); - for (i = 0; i < tx_rings; i++) { - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { data = nfp_pr_et(data, "rxq_%u_pkts", i); data = nfp_pr_et(data, "rxq_%u_bytes", i); + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); } return data; } static u64 * -nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, - unsigned int rx_rings, unsigned int tx_rings) +nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs) { unsigned int i; for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) *data++ = readq(mem + nfp_net_et_stats[i].off); - for (i = 0; i < tx_rings; i++) { - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i)); *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); } return data; @@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: data = nfp_vnic_get_sw_stats_strings(netdev, data); - data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, - nn->dp.num_tx_rings, + data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs, false); data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(nn->port, data); @@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_net *nn = netdev_priv(netdev); data = nfp_vnic_get_sw_stats(netdev, data); - data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, - nn->dp.num_rx_rings, nn->dp.num_tx_rings); + data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs); data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(nn->port, data); } @@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: return nfp_vnic_get_sw_stats_count(netdev) + - nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, - nn->dp.num_tx_rings) + + nfp_vnic_get_hw_stats_count(nn->max_r_vecs) + nfp_mac_get_stats_count(netdev) + nfp_app_port_get_stats_count(nn->port); default: @@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); + data = nfp_vnic_get_hw_stats_strings(data, 0, true); else data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(port, data); @@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_port *port = nfp_port_from_netdev(netdev); if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); + data = nfp_vnic_get_hw_stats(data, port->vnic, 0); else data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(port, data); @@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - count = nfp_vnic_get_hw_stats_count(0, 0); + count = nfp_vnic_get_hw_stats_count(0); else count = nfp_mac_get_stats_count(netdev); count += nfp_app_port_get_stats_count(port); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 749655c329b2..c8d0b1016a64 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp) kfree(nfp); } -static void nfp6000_read_serial(struct device *dev, u8 *serial) +static int nfp6000_read_serial(struct device *dev, u8 *serial) { struct pci_dev *pdev = to_pci_dev(dev); int pos; @@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); if (!pos) { - memset(serial, 0, NFP_SERIAL_LEN); - return; + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; } pci_read_config_dword(pdev, pos + 4, ®); put_unaligned_be16(reg >> 16, serial + 4); pci_read_config_dword(pdev, pos + 8, ®); put_unaligned_be32(reg, serial); + + return 0; } -static u16 nfp6000_get_interface(struct device *dev) +static int nfp6000_get_interface(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); int pos; u32 reg; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - if (!pos) - return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff); + if (!pos) { + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; + } pci_read_config_dword(pdev, pos + 4, ®); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index b0da3d436850..c338d539fa96 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -364,8 +364,8 @@ struct nfp_cpp_operations { int (*init)(struct nfp_cpp *cpp); void (*free)(struct nfp_cpp *cpp); - void (*read_serial)(struct device *dev, u8 *serial); - u16 (*get_interface)(struct device *dev); + int (*read_serial)(struct device *dev, u8 *serial); + int (*get_interface)(struct device *dev); int (*area_init)(struct nfp_cpp_area *area, u32 dest, unsigned long long address, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index ef30597aa319..73de57a09800 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, { const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0); struct nfp_cpp *cpp; + int ifc, err; u32 mask[2]; u32 xpbaddr; size_t tgt; - int err; cpp = kzalloc(sizeof(*cpp), GFP_KERNEL); if (!cpp) { @@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, cpp->op = ops; cpp->priv = priv; - cpp->interface = ops->get_interface(parent); - if (ops->read_serial) - ops->read_serial(parent, cpp->serial); + + ifc = ops->get_interface(parent); + if (ifc < 0) { + err = ifc; + goto err_free_cpp; + } + cpp->interface = ifc; + if (ops->read_serial) { + err = ops->read_serial(parent, cpp->serial); + if (err) + goto err_free_cpp; + } + rwlock_init(&cpp->resource_lock); init_waitqueue_head(&cpp->waitq); lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); @@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err = device_register(&cpp->dev); if (err < 0) { put_device(&cpp->dev); - goto err_dev; + goto err_free_cpp; } dev_set_drvdata(&cpp->dev, cpp); @@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err_out: device_unregister(&cpp->dev); -err_dev: +err_free_cpp: kfree(cpp); err_malloc: return ERR_PTR(err); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile index 31288d4ad248..862de0f3bc41 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_PCH_GBE) += pch_gbe.o pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o -pch_gbe-y += pch_gbe_api.o pch_gbe_main.o +pch_gbe-y += pch_gbe_main.o diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 697e29dd4bd3..44c2f291e766 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -326,32 +326,6 @@ struct pch_gbe_regs { #define PCH_GBE_FC_FULL 3 #define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL - -struct pch_gbe_hw; -/** - * struct pch_gbe_functions - HAL APi function pointer - * @get_bus_info: for pch_gbe_hal_get_bus_info - * @init_hw: for pch_gbe_hal_init_hw - * @read_phy_reg: for pch_gbe_hal_read_phy_reg - * @write_phy_reg: for pch_gbe_hal_write_phy_reg - * @reset_phy: for pch_gbe_hal_phy_hw_reset - * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset - * @power_up_phy: for pch_gbe_hal_power_up_phy - * @power_down_phy: for pch_gbe_hal_power_down_phy - * @read_mac_addr: for pch_gbe_hal_read_mac_addr - */ -struct pch_gbe_functions { - void (*get_bus_info) (struct pch_gbe_hw *); - s32 (*init_hw) (struct pch_gbe_hw *); - s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *); - s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); - void (*reset_phy) (struct pch_gbe_hw *); - void (*sw_reset_phy) (struct pch_gbe_hw *); - void (*power_up_phy) (struct pch_gbe_hw *hw); - void (*power_down_phy) (struct pch_gbe_hw *hw); - s32 (*read_mac_addr) (struct pch_gbe_hw *); -}; - /** * struct pch_gbe_mac_info - MAC information * @addr[6]: Store the MAC address @@ -394,17 +368,6 @@ struct pch_gbe_phy_info { /*! * @ingroup Gigabit Ether driver Layer - * @struct pch_gbe_bus_info - * @brief Bus information - */ -struct pch_gbe_bus_info { - u8 type; - u8 speed; - u8 width; -}; - -/*! - * @ingroup Gigabit Ether driver Layer * @struct pch_gbe_hw * @brief Hardware information */ @@ -414,10 +377,8 @@ struct pch_gbe_hw { struct pch_gbe_regs __iomem *reg; spinlock_t miim_lock; - const struct pch_gbe_functions *func; struct pch_gbe_mac_info mac; struct pch_gbe_phy_info phy; - struct pch_gbe_bus_info bus; }; /** @@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev); /* pch_gbe_mac.c */ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, u16 data); #endif /* _PCH_GBE_H_ */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c deleted file mode 100644 index 51250363566b..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ -#include "pch_gbe.h" -#include "pch_gbe_phy.h" -#include "pch_gbe_api.h" - -/* bus type values */ -#define pch_gbe_bus_type_unknown 0 -#define pch_gbe_bus_type_pci 1 -#define pch_gbe_bus_type_pcix 2 -#define pch_gbe_bus_type_pci_express 3 -#define pch_gbe_bus_type_reserved 4 - -/* bus speed values */ -#define pch_gbe_bus_speed_unknown 0 -#define pch_gbe_bus_speed_33 1 -#define pch_gbe_bus_speed_66 2 -#define pch_gbe_bus_speed_100 3 -#define pch_gbe_bus_speed_120 4 -#define pch_gbe_bus_speed_133 5 -#define pch_gbe_bus_speed_2500 6 -#define pch_gbe_bus_speed_reserved 7 - -/* bus width values */ -#define pch_gbe_bus_width_unknown 0 -#define pch_gbe_bus_width_pcie_x1 1 -#define pch_gbe_bus_width_pcie_x2 2 -#define pch_gbe_bus_width_pcie_x4 4 -#define pch_gbe_bus_width_32 5 -#define pch_gbe_bus_width_64 6 -#define pch_gbe_bus_width_reserved 7 - -/** - * pch_gbe_plat_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw) -{ - hw->bus.type = pch_gbe_bus_type_pci_express; - hw->bus.speed = pch_gbe_bus_speed_2500; - hw->bus.width = pch_gbe_bus_width_pcie_x1; -} - -/** - * pch_gbe_plat_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * Negative value: Failed-EBUSY - */ -static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) -{ - s32 ret_val; - - ret_val = pch_gbe_phy_get_id(hw); - if (ret_val) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); - return ret_val; - } - pch_gbe_phy_init_setting(hw); - /* Setup Mac interface option RGMII */ -#ifdef PCH_GBE_MAC_IFOP_RGMII - pch_gbe_phy_set_rgmii(hw); -#endif - return ret_val; -} - -static const struct pch_gbe_functions pch_gbe_ops = { - .get_bus_info = pch_gbe_plat_get_bus_info, - .init_hw = pch_gbe_plat_init_hw, - .read_phy_reg = pch_gbe_phy_read_reg_miic, - .write_phy_reg = pch_gbe_phy_write_reg_miic, - .reset_phy = pch_gbe_phy_hw_reset, - .sw_reset_phy = pch_gbe_phy_sw_reset, - .power_up_phy = pch_gbe_phy_power_up, - .power_down_phy = pch_gbe_phy_power_down, - .read_mac_addr = pch_gbe_mac_read_mac_addr -}; - -/** - * pch_gbe_plat_init_function_pointers - Init func ptrs - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) -{ - /* Set PHY parameter */ - hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Set function pointers */ - hw->func = &pch_gbe_ops; -} - -/** - * pch_gbe_hal_setup_init_funcs - Initializes function pointers - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) -{ - if (!hw->reg) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: Registers not mapped\n"); - return -ENOSYS; - } - pch_gbe_plat_init_function_pointers(hw); - return 0; -} - -/** - * pch_gbe_hal_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) -{ - if (!hw->func->get_bus_info) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->get_bus_info(hw); -} - -/** - * pch_gbe_hal_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) -{ - if (!hw->func->init_hw) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->init_hw(hw); -} - -/** - * pch_gbe_hal_read_phy_reg - Reads PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The buffer to store the 16-bit read. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 *data) -{ - if (!hw->func->read_phy_reg) - return 0; - return hw->func->read_phy_reg(hw, offset, data); -} - -/** - * pch_gbe_hal_write_phy_reg - Writes PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The value to write. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 data) -{ - if (!hw->func->write_phy_reg) - return 0; - return hw->func->write_phy_reg(hw, offset, data); -} - -/** - * pch_gbe_hal_phy_hw_reset - Hard PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->reset_phy(hw); -} - -/** - * pch_gbe_hal_phy_sw_reset - Soft PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->sw_reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->sw_reset_phy(hw); -} - -/** - * pch_gbe_hal_read_mac_addr - Reads MAC address - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) -{ - if (!hw->func->read_mac_addr) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->read_mac_addr(hw); -} - -/** - * pch_gbe_hal_power_up_phy - Power up PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_up_phy) - hw->func->power_up_phy(hw); -} - -/** - * pch_gbe_hal_power_down_phy - Power down PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_down_phy) - hw->func->power_down_phy(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h deleted file mode 100644 index 91ce07c8306c..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ -#ifndef _PCH_GBE_API_H_ -#define _PCH_GBE_API_H_ - -#include "pch_gbe_phy.h" - -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw); - -#endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 731ce1e419e4..adaa0024adfe 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -17,7 +17,7 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" /** * pch_gbe_stats - Stats item information @@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev, u32 advertising; int ret; - pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); memcpy(©_ecmd, ecmd, sizeof(*ecmd)); @@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev, *regs_buff++ = ioread32(&hw->reg->INT_ST + i); /* PHY register */ for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) { - pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp); + pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp); *regs_buff++ = tmp; } } @@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev, err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring); if (err) goto err_setup_tx; - /* save the new, restore the old in order to free it, - * then restore the new back again */ -#ifdef RINGFREE - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - pch_gbe_free_rx_resources(adapter, adapter->rx_ring); - pch_gbe_free_tx_resources(adapter, adapter->tx_ring); - kfree(tx_old); - kfree(rx_old); - adapter->rx_ring = rxdr; - adapter->tx_ring = txdr; -#else pch_gbe_free_rx_resources(adapter, rx_old); pch_gbe_free_tx_resources(adapter, tx_old); kfree(tx_old); kfree(rx_old); adapter->rx_ring = rxdr; adapter->tx_ring = txdr; -#endif err = pch_gbe_up(adapter); } return err; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 34a1581eda95..43c0c10dfeb7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -18,7 +18,7 @@ */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/ptp_classify.h> @@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_DMA_ALIGN 0 #define PCH_GBE_DMA_PADDING 2 #define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */ -#define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ @@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION; #define MINNOW_PHY_RESET_GPIO 13 -static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; - static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); @@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) * Returns: * 0: Successful. */ -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) +static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) { struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 adr1a, adr1b; @@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) /* Read the MAC address. and store to the private data */ pch_gbe_mac_read_mac_addr(hw); iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET); -#ifdef PCH_GBE_MAC_IFOP_RGMII iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); -#endif pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); /* Setup the receive addresses */ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); @@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count) pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); } - -/** - * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses - * @hw: Pointer to the HW structure - * @mc_addr_list: Array of multicast addresses to program - * @mc_addr_count: Number of multicast addresses to program - * @mar_used_count: The first MAC Address register free to program - * @mar_total_num: Total number of supported MAC Address Registers - */ -static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 mar_used_count, u32 mar_total_num) -{ - u32 i, adrmask; - - /* Load the first set of multicast addresses into the exact - * filters (RAR). If there are not enough to fill the RAR - * array, clear the filters. - */ - for (i = mar_used_count; i < mar_total_num; i++) { - if (mc_addr_count) { - pch_gbe_mac_mar_set(hw, mc_addr_list, i); - mc_addr_count--; - mc_addr_list += ETH_ALEN; - } else { - /* Clear MAC address mask */ - adrmask = ioread32(&hw->reg->ADDR_MASK); - iowrite32((adrmask | (0x0001 << i)), - &hw->reg->ADDR_MASK); - /* wait busy */ - pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); - /* Clear MAC address */ - iowrite32(0, &hw->reg->mac_adr[i].high); - iowrite32(0, &hw->reg->mac_adr[i].low); - } - } -} - /** * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings * @hw: Pointer to the HW structure @@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) void pch_gbe_reset(struct pch_gbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pch_gbe_hw *hw = &adapter->hw; + s32 ret_val; - pch_gbe_mac_reset_hw(&adapter->hw); + pch_gbe_mac_reset_hw(hw); /* reprogram multicast address register after reset */ pch_gbe_set_multi(netdev); /* Setup the receive address. */ - pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); - if (pch_gbe_hal_init_hw(&adapter->hw)) - netdev_err(netdev, "Hardware Error\n"); + pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES); + + ret_val = pch_gbe_phy_get_id(hw); + if (ret_val) { + netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); + return; + } + pch_gbe_phy_init_setting(hw); + /* Setup Mac interface option RGMII */ + pch_gbe_phy_set_rgmii(hw); } /** @@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, unsigned long rgmii = 0; /* Set the RGMII control. */ -#ifdef PCH_GBE_MAC_IFOP_RGMII switch (speed) { case SPEED_10: rgmii = (PCH_GBE_RGMII_RATE_2_5M | @@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, break; } iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#else /* GMII */ - rgmii = 0; - iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#endif } static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed, u16 duplex) @@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048; hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Initialize the hardware-specific values */ - if (pch_gbe_hal_setup_init_funcs(hw)) { - netdev_err(netdev, "Hardware Initialization Failure\n"); - return -EIO; - } if (pch_gbe_alloc_queues(adapter)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; @@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev) err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); if (err) goto err_setup_rx; - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); err = pch_gbe_up(adapter); if (err) goto err_up; @@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev) err_up: if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); err_setup_rx: pch_gbe_free_tx_resources(adapter, adapter->tx_ring); @@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev) pch_gbe_down(adapter); if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_tx_resources(adapter, adapter->tx_ring); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); return 0; @@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev) struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; - u8 *mta_list; - u32 rctl; - int i; - int mc_count; + u32 rctl, adrmask; + int mc_count, i; netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); - /* Check for Promiscuous and All Multicast modes */ + /* By default enable address & multicast filtering */ rctl = ioread32(&hw->reg->RX_MODE); + rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN; + + /* Promiscuous mode disables all hardware address filtering */ + if (netdev->flags & IFF_PROMISC) + rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); + + /* If we want to monitor more multicast addresses than the hardware can + * support then disable hardware multicast filtering. + */ mc_count = netdev_mc_count(netdev); - if ((netdev->flags & IFF_PROMISC)) { - rctl &= ~PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else if ((netdev->flags & IFF_ALLMULTI)) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; + if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES) rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - if (mc_count >= PCH_GBE_MAR_ENTRIES) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); - } - } + iowrite32(rctl, &hw->reg->RX_MODE); - if (mc_count >= PCH_GBE_MAR_ENTRIES) - return; - mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC); - if (!mta_list) + /* If we're not using multicast filtering then there's no point + * configuring the unused MAC address registers. + */ + if (!(rctl & PCH_GBE_MLT_FIL_EN)) return; - /* The shared function expects a packed array of only addresses. */ - i = 0; - netdev_for_each_mc_addr(ha, netdev) { - if (i == mc_count) - break; - memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN); + /* Load the first set of multicast addresses into MAC address registers + * for use by hardware filtering. + */ + i = 1; + netdev_for_each_mc_addr(ha, netdev) + pch_gbe_mac_mar_set(hw, ha->addr, i++); + + /* If there are spare MAC registers, mask & clear them */ + for (; i < PCH_GBE_MAR_ENTRIES; i++) { + /* Clear MAC address mask */ + adrmask = ioread32(&hw->reg->ADDR_MASK); + iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); + /* Clear MAC address */ + iowrite32(0, &hw->reg->mac_adr[i].high); + iowrite32(0, &hw->reg->mac_adr[i].low); } - pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1, - PCH_GBE_MAR_ENTRIES); - kfree(mta_list); netdev_dbg(netdev, "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", @@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev) } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D0, 0); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake up status */ pch_gbe_mac_set_wol_event(hw, 0); @@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev) pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } else { - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } @@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device) return err; } pci_set_master(pdev); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake on lan control and status */ pch_gbe_mac_set_wol_event(hw, 0); @@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); unregister_netdev(netdev); - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); free_netdev(netdev); } @@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "PHY initialize error\n"); goto err_free_adapter; } - pch_gbe_hal_get_bus_info(&adapter->hw); /* Read the MAC address. and store to the private data */ - ret = pch_gbe_hal_read_mac_addr(&adapter->hw); + ret = pch_gbe_mac_read_mac_addr(&adapter->hw); if (ret) { dev_err(&pdev->dev, "MAC address Read Error\n"); goto err_free_adapter; @@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, return 0; err_free_adapter: - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); err_free_netdev: free_netdev(netdev); return ret; @@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = { .shutdown = pch_gbe_shutdown, .err_handler = &pch_gbe_err_handler }; - - -static int __init pch_gbe_init_module(void) -{ - int ret; - - pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION); - ret = pci_register_driver(&pch_gbe_driver); - if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { - if (copybreak == 0) { - pr_info("copybreak disabled\n"); - } else { - pr_info("copybreak enabled for packets <= %u bytes\n", - copybreak); - } - } - return ret; -} - -static void __exit pch_gbe_exit_module(void) -{ - pci_unregister_driver(&pch_gbe_driver); -} - -module_init(pch_gbe_init_module); -module_exit(pch_gbe_exit_module); +module_pci_driver(pch_gbe_driver); MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver"); MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); @@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); -module_param(copybreak, uint, 0644); -MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); - /* pch_gbe_main.c */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index a5cad5ea9436..6b35b573beef 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) * pch_gbe_phy_sw_reset - PHY software reset * @hw: Pointer to the HW structure */ -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) +static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) { u16 phy_ctrl; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h index 95ad0151ad02..23ac38711619 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -21,12 +21,10 @@ #define PCH_GBE_PHY_REGS_LEN 32 #define PCH_GBE_PHY_RESET_DELAY_US 10 -#define PCH_GBE_MAC_IFOP_RGMII s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw); s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data); s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_power_up(struct pch_gbe_hw *hw); void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index b5ea2a56106e..1df28f2edd1f 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -2,7 +2,7 @@ # Packet engine device configuration # -config NET_PACKET_ENGINE +config NET_VENDOR_PACKET_ENGINES bool "Packet Engine devices" default y depends on PCI @@ -14,7 +14,7 @@ config NET_PACKET_ENGINE the questions about packet engine devices. If you say Y, you will be asked for your specific card in the following questions. -if NET_PACKET_ENGINE +if NET_VENDOR_PACKET_ENGINES config HAMACHI tristate "Packet Engines Hamachi GNIC-II support" @@ -40,4 +40,4 @@ config YELLOWFIN To compile this driver as a module, choose M here: the module will be called yellowfin. This is recommended. -endif # NET_PACKET_ENGINE +endif # NET_VENDOR_PACKET_ENGINES diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 1cd39c9a0345..52ad80621335 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -566,9 +566,8 @@ static int netxen_send_cmd_descs(struct netxen_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { - u32 i, producer, consumer; + u32 i, producer; struct netxen_cmd_buffer *pbuf; - struct cmd_desc_type0 *cmd_desc; struct nx_host_tx_ring *tx_ring; i = 0; @@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; - consumer = tx_ring->sw_consumer; if (nr_desc >= netxen_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); @@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, } do { - cmd_desc = &cmd_desc_arr[i]; - pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; @@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter, static int netxen_parse_md_template(struct netxen_adapter *adapter) { int num_of_entries, buff_level, e_cnt, esize; - int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0; + int rv = 0, sane_start = 0, sane_end = 0; char *dbuff; void *template_buff = adapter->mdump.md_template; char *dump_buff = adapter->mdump.md_capture_buff; @@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter) break; case RDEND: entry->hdr.driver_flags |= NX_DUMP_SKIP; - if (!sane_end) - end_cnt = e_cnt; sane_end += 1; break; case CNTRL: diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 8259e8309320..69aa7fc392c5 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct skb_frag_struct *frag; u32 producer; - int frag_count, no_of_desc; + int frag_count; u32 num_txd = tx_ring->num_desc; frag_count = skb_shinfo(skb)->nr_frags + 1; @@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) frag_count = 1 + skb_shinfo(skb)->nr_frags; } - /* 4 fragments per cmd des */ - no_of_desc = (frag_count + 3) >> 2; if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_stop_queue(netdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index e0680ce91328..12b4c2ab5796 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hw_info *p_info = &p_hwfn->hw_info; enum qed_pci_personality personality; enum dcbx_protocol_type id; - char *name; int i; for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { @@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, continue; personality = qed_dcbx_app_update[i].personality; - name = qed_dcbx_app_update[i].name; qed_dcbx_set_params(p_data, p_info, enable, prio, tc, type, personality); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index bee10c1781fb..8faceb691657 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12444,6 +12444,8 @@ struct public_drv_mb { #define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 #define DRV_MSG_CODE_STATS_TYPE_RDMA 4 +#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 + #define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 #define DRV_MSG_CODE_BIST_TEST 0x001e0000 @@ -12543,6 +12545,15 @@ struct public_drv_mb { #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000 + /* Resource Allocation params - Driver version support */ #define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 #define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 @@ -12596,6 +12607,9 @@ struct public_drv_mb { #define FW_MSG_CODE_PHY_OK 0x00110000 #define FW_MSG_CODE_OK 0x00160000 #define FW_MSG_CODE_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 #define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 #define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 @@ -12687,6 +12701,8 @@ struct mcp_public_data { struct public_func func[MCP_GLOB_FUNC_MAX]; }; +#define MAX_I2C_TRANSACTION_SIZE 16 + /* OCBB definitions */ enum tlvs { /* Category 1: Device Properties */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 758a9a5127fa..dbe81310c0b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -2102,6 +2102,28 @@ out: return status; } +static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, + u8 dev_addr, u32 offset, u32 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, + offset, len, buf); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, @@ -2144,6 +2166,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, + .read_module_eeprom = &qed_read_module_eeprom, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9d9e533bccdc..15f1b3294289 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2466,6 +2466,55 @@ out: return rc; } +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) +{ + u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0; + u32 resp, param; + int rc; + + nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK; + nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK; + + addr = offset; + offset = 0; + bytes_left = len; + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, + MAX_I2C_TRANSACTION_SIZE); + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((addr + offset) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK; + nvm_offset |= (bytes_to_copy << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK; + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_READ, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) + return -ENODEV; + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return -EINVAL; + + offset += buf_size; + bytes_left -= buf_size; + } + + return 0; +} + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 632a838f1fe3..047976d5c6e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -840,6 +840,22 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); /** + * @brief Read from sfp + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param port - transceiver port + * @param addr - I2C address + * @param offset - offset in sfp + * @param len - buffer length + * @param p_buf - buffer to read into + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); + +/** * @brief indicates whether the MFW objects [under mcp_info] are accessible * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f4a0f8ff8261..b37857f3f950 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1780,6 +1780,92 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } +static int qede_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct qede_dev *edev = netdev_priv(dev); + u8 buf[4]; + int rc; + + /* Read first 4 bytes to find the sfp type */ + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A0, 0, 4); + if (rc) { + DP_ERR(edev, "Failed reading EEPROM data %d\n", rc); + return rc; + } + + switch (buf[0]) { + case 0x3: /* SFP, SFP+, SFP-28 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case 0xc: /* QSFP */ + case 0xd: /* QSFP+ */ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case 0x11: /* QSFP-28 */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]); + return -EINVAL; + } + + return 0; +} + +static int qede_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct qede_dev *edev = netdev_priv(dev); + u32 start_addr = ee->offset, size = 0; + u8 *buf = data; + int rc = 0; + + /* Read A0 section */ + if (ee->offset < ETH_MODULE_SFF_8079_LEN) { + /* Limit transfer size to the A0 section boundary */ + if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN) + size = ETH_MODULE_SFF_8079_LEN - ee->offset; + else + size = ee->len; + + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A0, + start_addr, size); + if (rc) { + DP_ERR(edev, "Failed reading A0 section %d\n", rc); + return rc; + } + + buf += size; + start_addr += size; + } + + /* Read A2 section */ + if (start_addr >= ETH_MODULE_SFF_8079_LEN && + start_addr < ETH_MODULE_SFF_8472_LEN) { + size = ee->len - size; + /* Limit transfer size to the A2 section boundary */ + if (start_addr + size > ETH_MODULE_SFF_8472_LEN) + size = ETH_MODULE_SFF_8472_LEN - start_addr; + start_addr -= ETH_MODULE_SFF_8079_LEN; + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A2, + start_addr, size); + if (rc) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Failed reading A2 section %d\n", rc); + return 0; + } + } + + return rc; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -1813,6 +1899,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_module_info = qede_get_module_info, + .get_module_eeprom = qede_get_module_eeprom, .get_eee = qede_get_eee, .set_eee = qede_set_eee, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b823bfe2ea4d..f9a327c821eb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1116,7 +1116,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!edev->xdp_prog; xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0; return 0; default: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 0c744b9c6e0a..77e386ebff09 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) vp->max_tx_bw = MAX_BW; vp->min_tx_bw = MIN_BW; vp->spoofchk = false; - random_ether_addr(vp->mac); + eth_random_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", vp->mac, i); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index b9a7548ec6a0..0afc3d335d56 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->netdev_ops = &rmnet_vnd_ops; rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; - random_ether_addr(rmnet_dev->dev_addr); + eth_random_addr(rmnet_dev->dev_addr); rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; /* Raw IP mode */ diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 7c69f4c8134d..e1cd934c2e4f 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -99,7 +99,7 @@ config R8169 depends on PCI select FW_LOADER select CRC32 - select MII + select PHYLIB ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a3f69901ac87..49a6e25ddc2b 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -15,7 +15,7 @@ #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/ethtool.h> -#include <linux/mii.h> +#include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/in.h> @@ -25,7 +25,6 @@ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/firmware.h> -#include <linux/pci-aspm.h> #include <linux/prefetch.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> @@ -35,7 +34,6 @@ #define RTL8169_VERSION "2.3LK-NAPI" #define MODULENAME "r8169" -#define PFX MODULENAME ": " #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" @@ -57,19 +55,6 @@ #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" -#ifdef RTL8169_DEBUG -#define assert(expr) \ - if (!(expr)) { \ - printk( "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr,__FILE__,__func__,__LINE__); \ - } -#define dprintk(fmt, args...) \ - do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) -#else -#define assert(expr) do {} while (0) -#define dprintk(fmt, args...) do {} while (0) -#endif /* RTL8169_DEBUG */ - #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) @@ -95,7 +80,6 @@ static const int multicast_filter_limit = 32; #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RTL8169_TX_TIMEOUT (6*HZ) -#define RTL8169_PHY_TIMEOUT (10*HZ) /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) @@ -399,12 +383,6 @@ enum rtl_registers { FuncForceEvent = 0xfc, }; -enum rtl8110_registers { - TBICSR = 0x64, - TBI_ANAR = 0x68, - TBI_LPAR = 0x6a, -}; - enum rtl8168_8101_registers { CSIDR = 0x64, CSIAR = 0x68, @@ -571,14 +549,6 @@ enum rtl_register_content { PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ ASPM_en = (1 << 0), /* ASPM enable */ - /* TBICSR p.28 */ - TBIReset = 0x80000000, - TBILoopback = 0x40000000, - TBINwEnable = 0x20000000, - TBINwRestart = 0x10000000, - TBILinkOk = 0x02000000, - TBINwComplete = 0x01000000, - /* CPlusCmd p.31 */ EnableBist = (1 << 15), // 8168 8101 Mac_dbgo_oe = (1 << 14), // 8168 8101 @@ -732,7 +702,6 @@ enum rtl_flag { RTL_FLAG_TASK_ENABLED, RTL_FLAG_TASK_SLOW_PENDING, RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_PHY_PENDING, RTL_FLAG_MAX }; @@ -760,7 +729,6 @@ struct rtl8169_private { dma_addr_t RxPhyAddr; void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ - struct timer_list timer; u16 cp_cmd; u16 event_slow; @@ -776,14 +744,7 @@ struct rtl8169_private { void (*disable)(struct rtl8169_private *); } jumbo_ops; - int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); - int (*get_link_ksettings)(struct net_device *, - struct ethtool_link_ksettings *); - void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct rtl8169_private *tp); - unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); - unsigned int (*link_ok)(struct rtl8169_private *tp); - int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); struct { @@ -792,7 +753,8 @@ struct rtl8169_private { struct work_struct work; } wk; - struct mii_if_info mii; + unsigned supports_gmii:1; + struct mii_bus *mii_bus; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; @@ -1143,21 +1105,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) rtl_writephy(tp, reg_addr, (val & ~m) | p); } -static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, - int val) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl_writephy(tp, location, val); -} - -static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return rtl_readphy(tp, location); -} - DECLARE_RTL_COND(rtl_ephyar_cond) { return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; @@ -1478,54 +1425,22 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) RTL_R8(tp, ChipCmd); } -static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBIReset; -} - -static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) -{ - return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; -} - -static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBILinkOk; -} - -static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) -{ - return RTL_R8(tp, PHYstatus) & LinkStatus; -} - -static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) -{ - RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset); -} - -static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) -{ - unsigned int val; - - val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; - rtl_writephy(tp, MII_BMCR, val & 0xffff); -} - static void rtl_link_chg_patch(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return; if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, ERIAR_EXGMAC); - } else if (RTL_R8(tp, PHYstatus) & _100bps) { + } else if (phydev->speed == SPEED_100) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1543,7 +1458,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1555,7 +1470,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { - if (RTL_R8(tp, PHYstatus) & _10bps) { + if (phydev->speed == SPEED_10) { rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, @@ -1567,25 +1482,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) } } -static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp) -{ - struct device *d = tp_to_dev(tp); - - if (tp->link_ok(tp)) { - rtl_link_chg_patch(tp); - /* This is to cancel a scheduled suspend if there's one. */ - pm_request_resume(d); - netif_carrier_on(dev); - if (net_ratelimit()) - netif_info(tp, ifup, dev, "link up\n"); - } else { - netif_carrier_off(dev); - netif_info(tp, ifdown, dev, "link down\n"); - pm_runtime_idle(d); - } -} - #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl8169_get_wol(struct rtl8169_private *tp) @@ -1626,21 +1522,11 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - - pm_runtime_get_noresume(d); rtl_lock_work(tp); - wol->supported = WAKE_ANY; - if (pm_runtime_active(d)) - wol->wolopts = __rtl8169_get_wol(tp); - else - wol->wolopts = tp->saved_wolopts; - + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); - - pm_runtime_put_noidle(d); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1716,18 +1602,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct rtl8169_private *tp = netdev_priv(dev); struct device *d = tp_to_dev(tp); + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + pm_runtime_get_noresume(d); rtl_lock_work(tp); + tp->saved_wolopts = wol->wolopts; + if (pm_runtime_active(d)) - __rtl8169_set_wol(tp, wol->wolopts); - else - tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); rtl_unlock_work(tp); - device_set_wakeup_enable(d, wol->wolopts); + device_set_wakeup_enable(d, tp->saved_wolopts); pm_runtime_put_noidle(d); @@ -1759,124 +1648,6 @@ static int rtl8169_get_regs_len(struct net_device *dev) return R8169_REGS_SIZE; } -static int rtl8169_set_speed_tbi(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 ignored) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret = 0; - u32 reg; - - reg = RTL_R32(tp, TBICSR); - if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && - (duplex == DUPLEX_FULL)) { - RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart)); - } else if (autoneg == AUTONEG_ENABLE) - RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart); - else { - netif_warn(tp, link, dev, - "incorrect speed setting refused in TBI mode\n"); - ret = -EOPNOTSUPP; - } - - return ret; -} - -static int rtl8169_set_speed_xmii(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 adv) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int giga_ctrl, bmcr; - int rc = -EINVAL; - - rtl_writephy(tp, 0x1f, 0x0000); - - if (autoneg == AUTONEG_ENABLE) { - int auto_nego; - - auto_nego = rtl_readphy(tp, MII_ADVERTISE); - auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); - - if (adv & ADVERTISED_10baseT_Half) - auto_nego |= ADVERTISE_10HALF; - if (adv & ADVERTISED_10baseT_Full) - auto_nego |= ADVERTISE_10FULL; - if (adv & ADVERTISED_100baseT_Half) - auto_nego |= ADVERTISE_100HALF; - if (adv & ADVERTISED_100baseT_Full) - auto_nego |= ADVERTISE_100FULL; - - auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - - giga_ctrl = rtl_readphy(tp, MII_CTRL1000); - giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - - /* The 8100e/8101e/8102e do Fast Ethernet only. */ - if (tp->mii.supports_gmii) { - if (adv & ADVERTISED_1000baseT_Half) - giga_ctrl |= ADVERTISE_1000HALF; - if (adv & ADVERTISED_1000baseT_Full) - giga_ctrl |= ADVERTISE_1000FULL; - } else if (adv & (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full)) { - netif_info(tp, link, dev, - "PHY does not support 1000Mbps\n"); - goto out; - } - - bmcr = BMCR_ANENABLE | BMCR_ANRESTART; - - rtl_writephy(tp, MII_ADVERTISE, auto_nego); - rtl_writephy(tp, MII_CTRL1000, giga_ctrl); - } else { - if (speed == SPEED_10) - bmcr = 0; - else if (speed == SPEED_100) - bmcr = BMCR_SPEED100; - else - goto out; - - if (duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - } - - rtl_writephy(tp, MII_BMCR, bmcr); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03) { - if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { - rtl_writephy(tp, 0x17, 0x2138); - rtl_writephy(tp, 0x0e, 0x0260); - } else { - rtl_writephy(tp, 0x17, 0x2108); - rtl_writephy(tp, 0x0e, 0x0000); - } - } - - rc = 0; -out: - return rc; -} - -static int rtl8169_set_speed(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 advertising) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); - if (ret < 0) - goto out; - - if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && - (advertising & ADVERTISED_1000baseT_Full) && - !pci_is_pcie(tp->pci_dev)) { - mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); - } -out: - return ret; -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@ -1940,76 +1711,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } -static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 status; - u32 supported, advertising; - - supported = - SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->base.port = PORT_FIBRE; - - status = RTL_R32(tp, TBICSR); - advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; - cmd->base.autoneg = !!(status & TBINwEnable); - - cmd->base.speed = SPEED_1000; - cmd->base.duplex = DUPLEX_FULL; /* Always set */ - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - - return 0; -} - -static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - mii_ethtool_get_link_ksettings(&tp->mii, cmd); - - return 0; -} - -static int rtl8169_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - - rtl_lock_work(tp); - rc = tp->get_link_ksettings(dev, cmd); - rtl_unlock_work(tp); - - return rc; -} - -static int rtl8169_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - u32 advertising; - - if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising)) - return -EINVAL; - - del_timer_sync(&tp->timer); - - rtl_lock_work(tp); - rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, - cmd->base.duplex, advertising); - rtl_unlock_work(tp); - - return rc; -} - static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -2185,13 +1886,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int rtl8169_nway_reset(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return mii_nway_restart(&tp->mii); -} - /* * Interrupt coalescing * @@ -2264,7 +1958,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev) const struct rtl_coalesce_info *ci; int rc; - rc = rtl8169_get_link_ksettings(dev, &ecmd); + rc = phy_ethtool_get_link_ksettings(dev, &ecmd); if (rc < 0) return ERR_PTR(rc); @@ -2422,9 +2116,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_ts_info = ethtool_op_get_ts_info, - .nway_reset = rtl8169_nway_reset, - .get_link_ksettings = rtl8169_get_link_ksettings, - .set_link_ksettings = rtl8169_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -2537,15 +2231,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, "unknown MAC, using family default\n"); tp->mac_version = default_version; } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_42 : RTL_GIGA_MAC_VER_43; } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_45 : RTL_GIGA_MAC_VER_47; } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_46 : RTL_GIGA_MAC_VER_48; } @@ -2553,7 +2247,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, static void rtl8169_print_mac_version(struct rtl8169_private *tp) { - dprintk("mac_version = 0x%02x\n", tp->mac_version); + netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); } struct phy_reg { @@ -4405,62 +4099,16 @@ static void rtl_hw_phy_config(struct net_device *dev) } } -static void rtl_phy_work(struct rtl8169_private *tp) -{ - struct timer_list *timer = &tp->timer; - unsigned long timeout = RTL8169_PHY_TIMEOUT; - - assert(tp->mac_version > RTL_GIGA_MAC_VER_01); - - if (tp->phy_reset_pending(tp)) { - /* - * A busy loop could burn quite a few cycles on nowadays CPU. - * Let's delay the execution of the timer for a few ticks. - */ - timeout = HZ/10; - goto out_mod_timer; - } - - if (tp->link_ok(tp)) - return; - - netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); - - tp->phy_reset_enable(tp); - -out_mod_timer: - mod_timer(timer, jiffies + timeout); -} - static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { if (!test_and_set_bit(flag, tp->wk.flags)) schedule_work(&tp->wk.work); } -static void rtl8169_phy_timer(struct timer_list *t) -{ - struct rtl8169_private *tp = from_timer(tp, t, timer); - - rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); -} - -DECLARE_RTL_COND(rtl_phy_reset_cond) -{ - return tp->phy_reset_pending(tp); -} - -static void rtl8169_phy_reset(struct net_device *dev, - struct rtl8169_private *tp) -{ - tp->phy_reset_enable(tp); - rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); -} - static bool rtl_tbi_enabled(struct rtl8169_private *tp) { return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(tp, PHYstatus) & TBI_Enable); + (RTL_R8(tp, PHYstatus) & TBI_Enable); } static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) @@ -4468,7 +4116,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_hw_phy_config(dev); if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); } @@ -4478,23 +4127,18 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); if (tp->mac_version == RTL_GIGA_MAC_VER_02) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + netif_dbg(tp, drv, dev, + "Set PHY Reg 0x0bh = 0x00h\n"); rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } - rtl8169_phy_reset(dev, tp); + /* We may have called phy_speed_down before */ + phy_speed_up(dev->phydev); - rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0)); - - if (rtl_tbi_enabled(tp)) - netif_info(tp, link, dev, "TBI auto-negotiating\n"); + genphy_soft_reset(dev->phydev); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@ -4539,34 +4183,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct rtl8169_private *tp = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(ifr); - - return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; -} - -static int rtl_xmii_ioctl(struct rtl8169_private *tp, - struct mii_ioctl_data *data, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = 32; /* Internal PHY */ - return 0; - - case SIOCGMIIREG: - data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); - return 0; - - case SIOCSMIIREG: - rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); - return 0; - } - return -EOPNOTSUPP; -} + if (!netif_running(dev)) + return -ENODEV; -static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) -{ - return -EOPNOTSUPP; + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void rtl_init_mdio_ops(struct rtl8169_private *tp) @@ -4594,30 +4214,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) } } -static void rtl_speed_down(struct rtl8169_private *tp) -{ - u32 adv; - int lpa; - - rtl_writephy(tp, 0x1f, 0x0000); - lpa = rtl_readphy(tp, MII_LPA); - - if (lpa & (LPA_10HALF | LPA_10FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; - else if (lpa & (LPA_100HALF | LPA_100FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; - else - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0); - - rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - adv); -} - static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -4639,56 +4235,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp)) return false; - rtl_speed_down(tp); + phy_speed_down(tp->dev->phydev, false); rtl_wol_suspend_quirk(tp); return true; } -static void r8168_phy_power_up(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0000); - break; - default: - break; - } - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); - - /* give MAC/PHY some time to resume */ - msleep(20); -} - -static void r8168_phy_power_down(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); - break; - - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0200); - default: - rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); - break; - } -} - static void r8168_pll_power_down(struct rtl8169_private *tp) { if (r8168_check_dash(tp)) @@ -4701,8 +4256,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if (rtl_wol_pll_power_down(tp)) return; - r8168_phy_power_down(tp); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: @@ -4754,7 +4307,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) break; } - r8168_phy_power_up(tp); + phy_resume(tp->dev->phydev); + /* give MAC/PHY some time to resume */ + msleep(20); } static void rtl_pll_power_down(struct rtl8169_private *tp) @@ -5172,8 +4727,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { - dprintk("Set MAC Reg C+CR Offset 0xe0. " - "Bit-3 and bit-14 MUST be 1\n"); + netif_dbg(tp, drv, tp->dev, + "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n"); tp->cp_cmd |= (1 << 14); } @@ -5236,12 +4791,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) rtl_csi_write(tp, 0x070c, csi | val << 24); } -static void rtl_csi_access_enable_1(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x17); -} - -static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) { rtl_csi_access_enable(tp, 0x27); } @@ -5290,6 +4840,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) RTL_W8(tp, Config3, data); } +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + if (enable) { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } +} + static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5337,7 +4898,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) { 0x07, 0, 0x2000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); @@ -5346,7 +4907,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5359,7 +4920,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5383,7 +4944,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { 0x06, 0x0080, 0x0000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); @@ -5399,7 +4960,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { 0x03, 0x0400, 0x0220 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); @@ -5413,14 +4974,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); __rtl_hw_start_8168cp(tp); } static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_disable_clock_request(tp); @@ -5435,7 +4996,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5453,7 +5014,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { 0x0c, 0x0100, 0x0020 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5482,7 +5043,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { 0x0a, 0x0000, 0x0040 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); @@ -5507,7 +5068,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { 0x19, 0x0000, 0x0224 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); @@ -5536,11 +5097,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5611,7 +5174,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5646,9 +5209,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) @@ -5681,9 +5244,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) @@ -5700,8 +5263,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); @@ -5711,7 +5273,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5780,6 +5342,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) r8168_mac_ocp_write(tp, 0xe63e, 0x0000); r8168_mac_ocp_write(tp, 0xc094, 0x0000); r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep(struct rtl8169_private *tp) @@ -5793,7 +5357,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5831,11 +5395,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) @@ -5847,14 +5412,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); rtl_hw_start_8168ep(tp); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) @@ -5868,8 +5434,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); rtl_hw_start_8168ep(tp); @@ -5889,6 +5454,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) data = r8168_mac_ocp_read(tp, 0xe860); data |= 0x0080; r8168_mac_ocp_write(tp, 0xe860, data); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168(struct rtl8169_private *tp) @@ -6006,8 +5573,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) break; default: - printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", - tp->dev->name, tp->mac_version); + netif_err(tp, drv, tp->dev, + "unknown chipset (mac_version = %d)\n", + tp->mac_version); break; } } @@ -6026,7 +5594,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) }; u8 cfg1; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, FIX_NAK_1); @@ -6045,7 +5613,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -6100,7 +5668,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) { 0x1e, 0, 0x4000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); /* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); @@ -6384,7 +5952,6 @@ static void rtl_reset_work(struct rtl8169_private *tp) napi_enable(&tp->napi); rtl_hw_start(tp); netif_wake_queue(dev); - rtl8169_check_link_status(dev, tp); } static void rtl8169_tx_timeout(struct net_device *dev) @@ -7001,7 +6568,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) rtl8169_pcierr_interrupt(dev); if (status & LinkChg) - rtl8169_check_link_status(dev, tp); + phy_mac_interrupt(dev->phydev); rtl_irq_enable_all(tp); } @@ -7015,7 +6582,6 @@ static void rtl_task(struct work_struct *work) /* XXX - keep rtl_slow_event_work() as first element. */ { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, - { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } }; struct rtl8169_private *tp = container_of(work, struct rtl8169_private, wk.work); @@ -7084,11 +6650,51 @@ static void rtl8169_rx_missed(struct net_device *dev) RTL_W32(tp, RxMissed, 0); } +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(ndev->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0); + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + /* Ensure to advertise everything, incl. pause */ + phydev->advertising = phydev->supported; + + phy_attached_info(phydev); + + return 0; +} + static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - del_timer_sync(&tp->timer); + phy_stop(dev->phydev); napi_disable(&tp->napi); netif_stop_queue(dev); @@ -7129,6 +6735,8 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); + phy_disconnect(dev->phydev); + pci_free_irq(pdev, 0, tp); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, @@ -7189,6 +6797,10 @@ static int rtl_open(struct net_device *dev) if (retval < 0) goto err_release_fw_2; + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + rtl_lock_work(tp); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); @@ -7204,17 +6816,17 @@ static int rtl_open(struct net_device *dev) if (!rtl8169_init_counter_offsets(tp)) netif_warn(tp, hw, dev, "counter reset/update failed\n"); + phy_start(dev->phydev); netif_start_queue(dev); rtl_unlock_work(tp); - tp->saved_wolopts = 0; pm_runtime_put_sync(&pdev->dev); - - rtl8169_check_link_status(dev, tp); out: return retval; +err_free_irq: + pci_free_irq(pdev, 0, tp); err_release_fw_2: rtl_release_firmware(tp); rtl8169_rx_clear(tp); @@ -7293,6 +6905,7 @@ static void rtl8169_net_suspend(struct net_device *dev) if (!netif_running(dev)) return; + phy_stop(dev->phydev); netif_device_detach(dev); netif_stop_queue(dev); @@ -7323,6 +6936,9 @@ static void __rtl8169_resume(struct net_device *dev) netif_device_attach(dev); rtl_pll_power_up(tp); + rtl8169_init_phy(dev, tp); + + phy_start(tp->dev->phydev); rtl_lock_work(tp); napi_enable(&tp->napi); @@ -7336,9 +6952,6 @@ static int rtl8169_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_init_phy(dev, tp); if (netif_running(dev)) __rtl8169_resume(dev); @@ -7352,13 +6965,10 @@ static int rtl8169_runtime_suspend(struct device *device) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if (!tp->TxDescArray) { - rtl_pll_power_down(tp); + if (!tp->TxDescArray) return 0; - } rtl_lock_work(tp); - tp->saved_wolopts = __rtl8169_get_wol(tp); __rtl8169_set_wol(tp, WAKE_ANY); rtl_unlock_work(tp); @@ -7383,11 +6993,8 @@ static int rtl8169_runtime_resume(struct device *device) rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); - tp->saved_wolopts = 0; rtl_unlock_work(tp); - rtl8169_init_phy(dev, tp); - __rtl8169_resume(dev); return 0; @@ -7455,7 +7062,7 @@ static void rtl_shutdown(struct pci_dev *pdev) rtl8169_hw_reset(tp); if (system_state == SYSTEM_POWER_OFF) { - if (__rtl8169_get_wol(tp) & WAKE_ANY) { + if (tp->saved_wolopts) { rtl_wol_suspend_quirk(tp); rtl_wol_shutdown_quirk(tp); } @@ -7476,6 +7083,7 @@ static void rtl_remove_one(struct pci_dev *pdev) netif_napi_del(&tp->napi); unregister_netdev(dev); + mdiobus_unregister(tp->mii_bus); rtl_release_firmware(tp); @@ -7561,6 +7169,68 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond) return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; } +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct phy_device *phydev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_IGNORE_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", + PCI_DEVID(pdev->bus->number, pdev->devfn)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = mdiobus_register(new_bus); + if (ret) + return ret; + + phydev = mdiobus_get_phy(new_bus, 0); + if (!phydev) { + mdiobus_unregister(new_bus); + return -ENODEV; + } + + /* PHY will be woken up in rtl_open() */ + phy_suspend(phydev); + + tp->mii_bus = new_bus; + + return 0; +} + static void rtl_hw_init_8168g(struct rtl8169_private *tp) { u32 data; @@ -7618,7 +7288,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; struct rtl8169_private *tp; - struct mii_if_info *mii; struct net_device *dev; int chipset, region, i; int rc; @@ -7638,19 +7307,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->dev = dev; tp->pci_dev = pdev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); - - mii = &tp->mii; - mii->dev = dev; - mii->mdio_read = rtl_mdio_read; - mii->mdio_write = rtl_mdio_write; - mii->phy_id_mask = 0x1f; - mii->reg_num_mask = 0x1f; - mii->supports_gmii = cfg->has_gmii; - - /* disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); + tp->supports_gmii = cfg->has_gmii; /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); @@ -7689,6 +7346,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, cfg->default_ver); + if (rtl_tbi_enabled(tp)) { + dev_err(&pdev->dev, "TBI fiber mode not supported\n"); + return -ENODEV; + } + tp->cp_cmd = RTL_R16(tp, CPlusCmd); if ((sizeof(dma_addr_t) > 4) && @@ -7737,22 +7399,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* override BIOS settings, use userspace tools to enable WOL */ __rtl8169_set_wol(tp, 0); - if (rtl_tbi_enabled(tp)) { - tp->set_speed = rtl8169_set_speed_tbi; - tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi; - tp->phy_reset_enable = rtl8169_tbi_reset_enable; - tp->phy_reset_pending = rtl8169_tbi_reset_pending; - tp->link_ok = rtl8169_tbi_link_ok; - tp->do_ioctl = rtl_tbi_ioctl; - } else { - tp->set_speed = rtl8169_set_speed_xmii; - tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii; - tp->phy_reset_enable = rtl8169_xmii_reset_enable; - tp->phy_reset_pending = rtl8169_xmii_reset_pending; - tp->link_ok = rtl8169_xmii_link_ok; - tp->do_ioctl = rtl_xmii_ioctl; - } - mutex_init(&tp->wk.mutex); u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); @@ -7824,8 +7470,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->event_slow = cfg->event_slow; tp->coalesce_info = cfg->coalesce_info; - timer_setup(&tp->timer, rtl8169_phy_timer, 0); - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), @@ -7836,10 +7480,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - rc = register_netdev(dev); - if (rc < 0) + rc = r8169_mdio_register(tp); + if (rc) return rc; + /* chip gets powered up in rtl_open() */ + rtl_pll_power_down(tp); + + rc = register_netdev(dev); + if (rc) + goto err_mdio_unregister; + netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", rtl_chip_infos[chipset].name, dev->dev_addr, (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), @@ -7854,12 +7505,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (r8168_check_dash(tp)) rtl8168_driver_start(tp); - netif_carrier_off(dev); - if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev); return 0; + +err_mdio_unregister: + mdiobus_unregister(tp->mii_bus); + return rc; } static struct pci_driver rtl8169_pci_driver = { diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0d811c02ff34..c06f2df895c2 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1167,7 +1167,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset) } static void ravb_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *estats, u64 *data) { struct ravb_private *priv = netdev_priv(ndev); int i = 0; @@ -1199,7 +1199,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: - memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); + memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); break; } } @@ -1564,7 +1564,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* TAG and timestamp required flag */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; - desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); } skb_tx_timestamp(skb); @@ -1597,7 +1597,8 @@ drop: } static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* If skb needs TX timestamp, it is handled in network control queue */ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5614fd231bbe..314aeb5dd921 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -622,7 +622,6 @@ static struct sh_eth_cpu_data r7s72100_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -672,7 +671,6 @@ static struct sh_eth_cpu_data r8a7740_data = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -798,7 +796,6 @@ static struct sh_eth_cpu_data r8a77980_data = { .hw_swap = 1, .nbst = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -851,7 +848,6 @@ static struct sh_eth_cpu_data sh7724_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; static void sh_eth_set_rate_sh7757(struct net_device *ndev) @@ -898,7 +894,6 @@ static struct sh_eth_cpu_data sh7757_data = { .hw_swap = 1, .no_ade = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .rtrate = 1, .dual_port = 1, }; @@ -978,7 +973,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -1467,7 +1461,7 @@ static int sh_eth_dev_init(struct net_device *ndev) /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) - sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); + sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR); /* all sh_eth int mask */ sh_eth_write(ndev, 0, EESIPR); @@ -1527,9 +1521,9 @@ static int sh_eth_dev_init(struct net_device *ndev) /* mask reset */ if (mdp->cd->apr) - sh_eth_write(ndev, APR_AP, APR); + sh_eth_write(ndev, 1, APR); if (mdp->cd->mpr) - sh_eth_write(ndev, MPR_MP, MPR); + sh_eth_write(ndev, 1, MPR); if (mdp->cd->tpauser) sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 726c55a82dd7..140ad2c57095 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT { /* APR */ enum APR_BIT { - APR_AP = 0x00000001, + APR_AP = 0x0000ffff, }; /* MPR */ enum MPR_BIT { - MPR_MP = 0x00000001, + MPR_MP = 0x0000ffff, }; /* TRSCER */ @@ -403,8 +403,7 @@ enum DESC_I_BIT { /* RPADIR */ enum RPADIR_BIT { - RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, - RPADIR_PADR = 0x0003f, + RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff, }; /* FDR */ @@ -488,7 +487,6 @@ struct sh_eth_cpu_data { u32 ecsipr_value; u32 fdr_value; u32 fcftr_value; - u32 rpadir_value; /* interrupt checking mask */ u32 tx_check; diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 3bac58d0f88b..c5c297e78d06 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o obj-$(CONFIG_SFC) += sfc.o + +obj-$(CONFIG_SFC_FALCON) += falcon/ diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 019cef1d3cf7..3d76fd1504c2 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx) return -ENOMEM; for (i = 0; i < efx->vf_count; i++) { - random_ether_addr(nic_data->vf[i].mac); + eth_random_addr(nic_data->vf[i].mac); nic_data->vf[i].efx = NULL; nic_data->vf[i].vlan = EFX_EF10_NO_VLAN; @@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, { struct efx_ef10_nic_data *nic_data = efx->nic_data; struct ef10_vf *vf; - u16 old_vlan, new_vlan; + u16 new_vlan; int rc = 0, rc2 = 0; if (vf_i >= efx->vf_count) @@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, } /* Do the actual vlan change */ - old_vlan = vf->vlan; vf->vlan = new_vlan; /* Restore everything in reverse order */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index ce3a177081a8..330233286e78 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx) static int efx_process_channel(struct efx_channel *channel, int budget) { struct efx_tx_queue *tx_queue; + struct list_head rx_list; int spent; if (unlikely(!channel->enabled)) return 0; + /* Prepare the batch receive list */ + EFX_WARN_ON_PARANOID(channel->rx_list != NULL); + INIT_LIST_HEAD(&rx_list); + channel->rx_list = &rx_list; + efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->pkts_compl = 0; tx_queue->bytes_compl = 0; @@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget) } } + /* Receive any packets we queued up */ + netif_receive_skb_list(channel->rx_list); + channel->rx_list = NULL; + return spent; } @@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel) goto fail; } + channel->rx_list = NULL; + return 0; fail: diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 65568925c3ef..961b92979640 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -448,6 +448,7 @@ enum efx_sync_events_state { * __efx_rx_packet(), or zero if there is none * @rx_pkt_index: Ring index of first buffer for next packet to be delivered * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 + * @rx_list: list of SKBs from current RX, awaiting processing * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel * @sync_events_state: Current state of sync events on this channel @@ -500,6 +501,8 @@ struct efx_channel { unsigned int rx_pkt_n_frags; unsigned int rx_pkt_index; + struct list_head *rx_list; + struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d2e254f2f72b..396ff01298cd 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, return; /* Pass the packet up */ - netif_receive_skb(skb); + if (channel->rx_list != NULL) + /* Add to list, will pass up later */ + list_add_tail(&skb->list, channel->rx_list); + else + /* No list, so pass it up now */ + netif_receive_skb(skb); } /* Handle a received packet. Second half: Touches packet payload. */ diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 949aaef390b6..15c62c160953 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int card_idx = -1; void __iomem *ioaddr; int chip_idx = (int) ent->driver_data; - int irq; struct net_device *dev; struct epic_private *ep; int i, ret, option = 0, duplex = 0; @@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ret = pci_enable_device(pdev); if (ret) goto out; - irq = pdev->irq; if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) { dev_err(&pdev->dev, "no PCI region space\n"); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index e080d3e7c582..01589b6982e4 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -780,11 +780,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) static int netsec_napi_poll(struct napi_struct *napi, int budget) { struct netsec_priv *priv; - struct net_device *ndev; int tx, rx, done, todo; priv = container_of(napi, struct netsec_priv, napi); - ndev = priv->ndev; todo = budget; do { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f08625a02cea..7b923362ee55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -61,6 +61,7 @@ struct rk_priv_data { struct clk *mac_clk_tx; struct clk *clk_mac_ref; struct clk *clk_mac_refout; + struct clk *clk_mac_speed; struct clk *aclk_mac; struct clk *pclk_mac; struct clk *clk_phy; @@ -83,6 +84,64 @@ struct rk_priv_data { (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) +#define PX30_GRF_GMAC_CON1 0x0904 + +/* PX30_GRF_GMAC_CON1 */ +#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ + GRF_BIT(6)) +#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define PX30_GMAC_SPEED_100M GRF_BIT(2) + +static void px30_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_PHY_INTF_SEL_RMII); +} + +static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + int ret; + + if (IS_ERR(bsp_priv->clk_mac_speed)) { + dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_10M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n", + __func__, ret); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_100M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n", + __func__, ret); + + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static const struct rk_gmac_ops px30_ops = { + .set_to_rmii = px30_set_to_rmii, + .set_rmii_speed = px30_set_rmii_speed, +}; + #define RK3128_GRF_MAC_CON0 0x0168 #define RK3128_GRF_MAC_CON1 0x016c @@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) } } + bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed"); + if (IS_ERR(bsp_priv->clk_mac_speed)) + dev_err(dev, "cannot get clock %s\n", "clk_mac_speed"); + if (bsp_priv->clock_input) { dev_info(dev, "clock input from PHY\n"); } else { @@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) if (!IS_ERR(bsp_priv->mac_clk_tx)) clk_prepare_enable(bsp_priv->mac_clk_tx); + if (!IS_ERR(bsp_priv->clk_mac_speed)) + clk_prepare_enable(bsp_priv->clk_mac_speed); + /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_prepare_enable(bsp_priv->clk_mac); @@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) clk_disable_unprepare(bsp_priv->pclk_mac); clk_disable_unprepare(bsp_priv->mac_clk_tx); + + clk_disable_unprepare(bsp_priv->clk_mac_speed); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_disable_unprepare(bsp_priv->clk_mac); @@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,px30-gmac", .data = &px30_ops }, { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops }, { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 65bc3556bd8f..edb6053bd980 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -407,6 +407,19 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) } } +static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) { u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); @@ -441,6 +454,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, }; @@ -468,5 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index fe8b536b13f8..79911eefc2a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -183,6 +183,7 @@ struct stmmac_dma_ops { void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); }; @@ -236,6 +237,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_dma_qmode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, qmode, __args) #define stmmac_set_dma_bfsize(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, set_bfsize, __args) @@ -444,17 +447,22 @@ struct stmmac_mode_ops { struct stmmac_priv; struct tc_cls_u32_offload; +struct tc_cbs_qopt_offload; struct stmmac_tc_ops { int (*init)(struct stmmac_priv *priv); int (*setup_cls_u32)(struct stmmac_priv *priv, struct tc_cls_u32_offload *cls); + int (*setup_cbs)(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt); }; #define stmmac_tc_init(__priv, __args...) \ stmmac_do_callback(__priv, tc, init, __args) #define stmmac_tc_setup_cls_u32(__priv, __args...) \ stmmac_do_callback(__priv, tc, setup_cls_u32, __args) +#define stmmac_tc_setup_cbs(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cbs, __args) struct stmmac_regs_off { u32 ptp_off; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 60f59abab009..d9e60cfd8a85 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3778,7 +3778,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); return 0; @@ -3795,6 +3795,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return stmmac_setup_tc_block(priv, type_data); + case TC_SETUP_QDISC_CBS: + return stmmac_tc_setup_cbs(priv, priv, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 2258cd8cc844..1a96dd9c1091 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -289,7 +289,67 @@ static int tc_init(struct stmmac_priv *priv) return 0; } +static int tc_setup_cbs(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue = qopt->queue; + u32 ptr, speed_div; + u32 mode_to_use; + u64 value; + int ret; + + /* Queue 0 is not AVB capable */ + if (queue <= 0 || queue >= tx_queues_count) + return -EINVAL; + if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) + return -EOPNOTSUPP; + + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + } else if (!qopt->enable) { + return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB); + } + + /* Port Transmit Rate and Speed Divider */ + ptr = (priv->speed == SPEED_100) ? 4 : 8; + speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000; + + /* Final adjustments for HW */ + value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); + + value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); + + value = qopt->hicredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); + + value = qopt->locredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); + + ret = stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + if (ret) + return ret; + + dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n", + queue, qopt->sendslope, qopt->idleslope, + qopt->hicredit, qopt->locredit); + return 0; +} + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, }; diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index a5dd627fe2f9..d42f47f6c632 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb, } static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct vnet_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 88c12474a0c3..9319d84bf49f 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p) bmsr = err; if (bmsr & BMSR_LSTATUS) { - u16 adv, lpa; - - err = mii_read(np, np->phy_addr, MII_ADVERTISE); - if (err < 0) - goto out; - adv = err; - - err = mii_read(np, np->phy_addr, MII_LPA); - if (err < 0) - goto out; - lpa = err; - - err = mii_read(np, np->phy_addr, MII_ESTATUS); - if (err < 0) - goto out; link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; - } lp->active_speed = current_speed; lp->active_duplex = current_duplex; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index a94f50442613..12539b357a78 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb, } static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port = __tx_port_find(vp, skb); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 163d8d16bc24..dc966ddb6d81 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) struct rx_map *dm; struct rxf_fifo *f; struct rxdb *db; - struct sk_buff *skb; int delta; ENTER; @@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) DBG("db=%p f=%p\n", db, f); dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); DBG("dm=%p\n", dm); - skb = dm->skb; rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ rxfd->va_lo = rxdd->va_lo; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 358edab9e72e..00761fe59848 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -253,23 +253,24 @@ struct cpsw_ss_regs { #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ /* Bit definitions for the CPSW2_CONTROL register */ -#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */ -#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */ -#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */ -#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */ -#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */ -#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */ -#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */ -#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ -#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ -#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ -#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */ -#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */ -#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ -#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ -#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ -#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ -#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ +#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */ +#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */ +#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */ +#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */ +#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */ +#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */ +#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */ +#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */ +#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */ +#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */ +#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */ +#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */ +#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */ +#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */ +#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */ +#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */ +#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */ +#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */ #define CTRL_V2_TS_BITS \ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ @@ -281,7 +282,7 @@ struct cpsw_ss_regs { #define CTRL_V3_TS_BITS \ - (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ TS_LTYPE1_EN) @@ -2927,7 +2928,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); } else { - random_ether_addr(priv_sl2->mac_addr); + eth_random_addr(priv_sl2->mac_addr); dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); } diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 6f63c8729afc..b4ea58dc8caf 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) dev_consume_skb_any(skb); dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", mtype, seqid); - } else if (time_after(jiffies, skb_cb->tmo)) { + break; + } + + if (time_after(jiffies, skb_cb->tmo)) { /* timeout any expired skbs over 1s */ dev_dbg(cpts->dev, "expiring tx timestamp mtype %u seqid %04x\n", diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index e40aa3e31af2..a1d335a3c5e4 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) return err; } -static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, - select_queue_fallback_t fallback) -{ - return 0; -} - static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_vlan_rx_add_vid = netcp_rx_add_vid, .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, .ndo_tx_timeout = netcp_ndo_tx_timeout, - .ndo_select_queue = netcp_select_queue, + .ndo_select_queue = dev_pick_tx_zero, .ndo_setup_tc = netcp_setup_tc, }; @@ -2052,7 +2045,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (is_valid_ether_addr(efuse_mac_addr)) ether_addr_copy(ndev->dev_addr, efuse_mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); @@ -2061,7 +2054,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (mac_addr) ether_addr_copy(ndev->dev_addr, mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); } ret = of_property_read_string(node_interface, "rx-channel", diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 2a0c06e0f730..42f1f518dad6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -70,7 +70,8 @@ #define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ #define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit * only. This is not documented - * in the HW spec */ + * in the HW spec + */ /* Define for programming the MAC address into the EmacLite */ #define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) @@ -94,11 +95,11 @@ -#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */ +#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ #define ALIGNMENT 4 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ -#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) +#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT) #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be @@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, /* Set up to output the remaining data */ align_buffer = 0; - to_u8_ptr = (u8 *) &align_buffer; - from_u8_ptr = (u8 *) from_u16_ptr; + to_u8_ptr = (u8 *)&align_buffer; + from_u8_ptr = (u8 *)from_u16_ptr; /* Output the remaining data */ for (; length > 0; length--) @@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u32 align_buffer; from_u32_ptr = src_ptr; - to_u16_ptr = (u16 *) dest_ptr; + to_u16_ptr = (u16 *)dest_ptr; for (; length > 3; length -= 4) { /* Copy each word into the temporary buffer */ @@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u8 *to_u8_ptr, *from_u8_ptr; /* Set up to read the remaining data */ - to_u8_ptr = (u8 *) to_u16_ptr; + to_u8_ptr = (u8 *)to_u16_ptr; align_buffer = *from_u32_ptr++; - from_u8_ptr = (u8 *) &align_buffer; + from_u8_ptr = (u8 *)&align_buffer; /* Read the remaining data */ for (; length > 0; length--) @@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; } else if (drvdata->tx_ping_pong != 0) { /* If the expected buffer is full, try the other buffer, - * if it is configured in HW */ + * if it is configured in HW + */ addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, return -1; /* Buffer was full, return failure */ /* Write the frame to the buffer */ - xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); + xemaclite_aligned_write(data, (u32 __force *)addr, byte_count); xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), addr + XEL_TPLR_OFFSET); @@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame - * has been transmitted */ + * has been transmitted + */ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); @@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, * xemaclite_recv_data - Receive a frame * @drvdata: Pointer to the Emaclite device private data * @data: Address where the data is to be received + * @maxlen: Maximum supported ethernet packet length * * This function is intended to be called from the interrupt context or * with a wrapper which waits for the receive frame to be available. @@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) /* The instance is out of sync, try other buffer if other * buffer is configured, return 0 otherwise. If the instance is * out of sync, do not update the 'next_rx_buf_to_use' since it - * will correct on subsequent calls */ + * will correct on subsequent calls + */ if (drvdata->rx_ping_pong != 0) addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) return 0; /* No data was available */ } - /* Get the protocol type of the ethernet frame that arrived */ + /* Get the protocol type of the ethernet frame that arrived + */ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); /* Check if received ethernet frame is a raw ethernet frame - * or an IP packet or an ARP packet */ + * or an IP packet or an ARP packet + */ if (proto_type > ETH_DATA_LEN) { if (proto_type == ETH_P_IP) { @@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; else /* Field contains type other than IP or ARP, use max - * frame size and let user parse it */ + * frame size and let user parse it + */ length = ETH_FRAME_LEN + ETH_FCS_LEN; } else /* Use the length in the frame, plus the header and trailer */ @@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = maxlen; /* Read from the EmacLite device */ - xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), + xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET), data, length); /* Acknowledge the frame */ @@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; - xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); + xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN); xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); @@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /** * xemaclite_set_mac_address - Set the MAC address for this device * @dev: Pointer to the network device instance - * @addr: Void pointer to the sockaddr structure + * @address: Void pointer to the sockaddr structure * * This function copies the HW address from the sockaddr strucutre to the * net_device structure and updates the address in HW. @@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev) struct net_local *lp = netdev_priv(dev); dev->stats.tx_packets++; - if (lp->deferred_skb) { - if (xemaclite_send_data(lp, - (u8 *) lp->deferred_skb->data, - lp->deferred_skb->len) != 0) - return; - else { - dev->stats.tx_bytes += lp->deferred_skb->len; - dev_kfree_skb_irq(lp->deferred_skb); - lp->deferred_skb = NULL; - netif_trans_update(dev); /* prevent tx timeout */ - netif_wake_queue(dev); - } - } + + if (!lp->deferred_skb) + return; + + if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data, + lp->deferred_skb->len)) + return; + + dev->stats.tx_bytes += lp->deferred_skb->len; + dev_kfree_skb_irq(lp->deferred_skb); + lp->deferred_skb = NULL; + netif_trans_update(dev); /* prevent tx timeout */ + netif_wake_queue(dev); } /** @@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev) return; } - /* - * A new skb should have the data halfword aligned, but this code is + /* A new skb should have the data halfword aligned, but this code is * here just in case that isn't true. Calculate how many * bytes we should reserve to get the data to start on a word - * boundary */ + * boundary + */ align = BUFFER_ALIGN(skb->data); if (align) skb_reserve(skb, align); skb_reserve(skb, 2); - len = xemaclite_recv_data(lp, (u8 *) skb->data, len); + len = xemaclite_recv_data(lp, (u8 *)skb->data, len); if (!len) { dev->stats.rx_errors++; @@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev) * @dev_id: Void pointer to the network device instance used as callback * reference * + * Return: IRQ_HANDLED + * * This function handles the Tx and Rx interrupts of the EmacLite device. */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) @@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp) unsigned long end = jiffies + 2; /* wait for the MDIO interface to not be busy or timeout - after some time. - */ + * after some time. + */ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (time_before_eq(end, jiffies)) { @@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", + "%s(phy_id=%i, reg=%x) == %x\n", __func__, phy_id, reg, rc); return rc; @@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * * This function waits till the device is ready to accept a new MDIO * request and then writes the val to the MDIO Write Data register. + * + * Return: 0 upon success or a negative error upon failure */ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) @@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u32 ctrl_reg; dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n", + "%s(phy_id=%i, reg=%x, val=%x)\n", __func__, phy_id, reg, val); if (xemaclite_mdio_wait(lp)) @@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, /** * xemaclite_mdio_setup - Register mii_bus for the Emaclite device * @lp: Pointer to the Emaclite device private data - * @ofdev: Pointer to OF device structure + * @dev: Pointer to OF device structure * * This function enables MDIO bus in the Emaclite device and registers a * mii_bus. @@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev) * This function sets the MAC address, requests an IRQ and enables interrupts * for the Emaclite device and starts the Tx queue. * It also connects to the phy device, if MDIO is included in Emaclite device. + * + * Return: 0 on success. -ENODEV, if PHY cannot be connected. + * Non-zero error value on failure. */ static int xemaclite_open(struct net_device *dev) { @@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev) * This function stops the Tx queue, disables interrupts and frees the IRQ for * the Emaclite device. * It also disconnects the phy device associated with the Emaclite device. + * + * Return: 0, always. */ static int xemaclite_close(struct net_device *dev) { @@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) new_skb = orig_skb; spin_lock_irqsave(&lp->reset_lock, flags); - if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) { + if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) { /* If the Emaclite Tx buffer is busy, stop the Tx queue and * defer the skb for transmission during the ISR, after the - * current transmission is complete */ + * current transmission is complete + */ netif_stop_queue(dev); lp->deferred_skb = new_skb; /* Take the time stamp now, since we can't do this in an ISR. */ @@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s) { u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); - if (p) { - return (bool)*p; - } else { - dev_warn(&ofdev->dev, "Parameter %s not found," - "defaulting to false\n", s); + if (!p) { + dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s); return false; } + + return (bool)*p; } static const struct net_device_ops xemaclite_netdev_ops; @@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. * @ofdev: Pointer to OF device structure - * @match: Pointer to the structure used for matching a device * * This function probes for the Emaclite device in the device tree. * It initializes the driver data structure and the hardware, sets the MAC diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 750954be5a74..d3eae1239045 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1395,8 +1395,8 @@ static void fjes_watch_unshare_task(struct work_struct *work) while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) && (wait_time < 3000)) { - for (epidx = 0; epidx < hw->max_epid; epidx++) { - if (epidx == hw->my_epid) + for (epidx = 0; epidx < max_epid; epidx++) { + if (epidx == my_epid) continue; is_shared = fjes_hw_epid_is_shared(hw->hw_info.share, @@ -1453,8 +1453,8 @@ static void fjes_watch_unshare_task(struct work_struct *work) } if (hw->hw_info.buffer_unshare_reserve_bit) { - for (epidx = 0; epidx < hw->max_epid; epidx++) { - if (epidx == hw->my_epid) + for (epidx = 0; epidx < max_epid; epidx++) { + if (epidx == my_epid) continue; if (test_bit(epidx, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ada33c2d9ac2..6acb6b5718b9 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -236,7 +236,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, } /* Update tunnel dst according to Geneve options. */ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, - gnvh->options, gnvh->opt_len * 4); + gnvh->options, gnvh->opt_len * 4, + TUNNEL_GENEVE_OPT); } else { /* Drop packets w/ critical options, * since we don't support any... @@ -418,11 +419,12 @@ static int geneve_hlen(struct genevehdr *gh) return sizeof(*gh) + gh->opt_len * 4; } -static struct sk_buff **geneve_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *geneve_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct genevehdr *gh, *gh2; unsigned int hlen, gh_len, off_gnv; const struct packet_offload *ptype; @@ -449,7 +451,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, goto out; } - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -674,7 +676,8 @@ static void geneve_build_header(struct genevehdr *geneveh, geneveh->proto_type = htons(ETH_P_TEB); geneveh->rsvd2 = 0; - ip_tunnel_info_opts_get(geneveh->options, info); + if (info->key.tun_flags & TUNNEL_GENEVE_OPT) + ip_tunnel_info_opts_get(geneveh->options, info); } static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index ec629a730005..7a145172d503 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1255,7 +1255,7 @@ out: return skb->len; } -static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { +static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_LINK] = { .type = NLA_U32, }, [GTPA_VERSION] = { .type = NLA_U32, }, [GTPA_TID] = { .type = NLA_U64, }, diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 32f49c4ce457..d79a69dd2146 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -878,10 +878,8 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) static void decode_prio_command(struct sixpack *sp, unsigned char cmd) { - unsigned char channel; int actual; - channel = cmd & SIXP_CHN_MASK; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ /* RX and DCD flags can only be set in the same prio command, @@ -933,10 +931,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) static void decode_std_command(struct sixpack *sp, unsigned char cmd) { - unsigned char checksum = 0, rest = 0, channel; + unsigned char checksum = 0, rest = 0; short i; - channel = cmd & SIXP_CHN_MASK; switch (cmd & SIXP_CMD_MASK) { /* normal command */ case SIXP_SEOF: if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index dd1d6e115145..cf4f40a04194 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) } static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct net_device_context *ndc = netdev_priv(ndev); @@ -343,9 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, if (vf_ops->ndo_select_queue) txq = vf_ops->ndo_select_queue(vf_netdev, skb, - accel_priv, fallback); + sb_dev, fallback); else - txq = fallback(vf_netdev, skb); + txq = fallback(vf_netdev, skb, NULL); /* Record the queue selected by VF so that it can be * used for common case where VF has more queues than diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index adde8fc45588..cfda146f3b3b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; - void *accel_priv = NULL; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; @@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) return NET_XMIT_SUCCESS; } } - - /* For packets that are non-multicast and not bridged we will pass - * the necessary information so that the lowerdev can distinguish - * the source of the packets via the accel_priv value. - */ - accel_priv = vlan->accel_priv; xmit_world: skb->dev = vlan->lowerdev; - return dev_queue_xmit_accel(skb, accel_priv); + return dev_queue_xmit_accel(skb, + netdev_get_sb_channel(dev) ? dev : NULL); } static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) @@ -1647,6 +1641,7 @@ static int macvlan_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: + case NETDEV_DOWN: case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 4f390fa557e4..d00d42c845b7 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, } static u16 net_failover_select_queue(struct net_device *dev, - struct sk_buff *skb, void *accel_priv, + struct sk_buff *skb, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct net_failover_info *nfo_info = netdev_priv(dev); @@ -128,9 +129,9 @@ static u16 net_failover_select_queue(struct net_device *dev, if (ops->ndo_select_queue) txq = ops->ndo_select_queue(primary_dev, skb, - accel_priv, fallback); + sb_dev, fallback); else - txq = fallback(primary_dev, skb); + txq = fallback(primary_dev, skb, NULL); qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index 449b2a1a1800..0fee1d06c084 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -13,3 +13,7 @@ endif ifneq ($(CONFIG_NET_DEVLINK),) netdevsim-objs += devlink.o fib.o endif + +ifneq ($(CONFIG_XFRM_OFFLOAD),) +netdevsim-objs += ipsec.o +endif diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 75c25306d234..c36d2a768202 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -92,7 +92,7 @@ static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { static bool nsim_xdp_offload_active(struct netdevsim *ns) { - return ns->xdp_prog_mode == XDP_ATTACHED_HW; + return ns->xdp_hw.prog; } static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded) @@ -195,14 +195,14 @@ static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf) return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns)); } -static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) +static int +nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf, + struct xdp_attachment_info *xdp) { int err; - if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) { - NSIM_EA(bpf->extack, "program loaded with different flags"); + if (!xdp_attachment_flags_ok(xdp, bpf)) return -EBUSY; - } if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS"); @@ -219,18 +219,7 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) return err; } - if (ns->xdp_prog) - bpf_prog_put(ns->xdp_prog); - - ns->xdp_prog = bpf->prog; - ns->xdp_flags = bpf->flags; - - if (!bpf->prog) - ns->xdp_prog_mode = XDP_ATTACHED_NONE; - else if (bpf->command == XDP_SETUP_PROG) - ns->xdp_prog_mode = XDP_ATTACHED_DRV; - else - ns->xdp_prog_mode = XDP_ATTACHED_HW; + xdp_attachment_setup(xdp, bpf); return 0; } @@ -290,10 +279,6 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled"); return -EINVAL; } - if (nsim_xdp_offload_active(ns)) { - NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog"); - return -EBUSY; - } return 0; } @@ -567,22 +552,21 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) nsim_bpf_destroy_prog(bpf->offload.prog); return 0; case XDP_QUERY_PROG: - bpf->prog_attached = ns->xdp_prog_mode; - bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0; - bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0; - return 0; + return xdp_attachment_query(&ns->xdp, bpf); + case XDP_QUERY_PROG_HW: + return xdp_attachment_query(&ns->xdp_hw, bpf); case XDP_SETUP_PROG: err = nsim_setup_prog_checks(ns, bpf); if (err) return err; - return nsim_xdp_set_prog(ns, bpf); + return nsim_xdp_set_prog(ns, bpf, &ns->xdp); case XDP_SETUP_PROG_HW: err = nsim_setup_prog_hw_checks(ns, bpf); if (err) return err; - return nsim_xdp_set_prog(ns, bpf); + return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw); case BPF_OFFLOAD_MAP_ALLOC: if (!ns->bpf_map_accept) return -EOPNOTSUPP; @@ -637,6 +621,7 @@ void nsim_bpf_uninit(struct netdevsim *ns) { WARN_ON(!list_empty(&ns->bpf_bound_progs)); WARN_ON(!list_empty(&ns->bpf_bound_maps)); - WARN_ON(ns->xdp_prog); + WARN_ON(ns->xdp.prog); + WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->bpf_offloaded); } diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c new file mode 100644 index 000000000000..2dcf6cc269d0 --- /dev/null +++ b/drivers/net/netdevsim/ipsec.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#include <crypto/aead.h> +#include <linux/debugfs.h> +#include <net/xfrm.h> + +#include "netdevsim.h" + +#define NSIM_IPSEC_AUTH_BITS 128 + +static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct netdevsim *ns = filp->private_data; + struct nsim_ipsec *ipsec = &ns->ipsec; + size_t bufsize; + char *buf, *p; + int len; + int i; + + /* the buffer needed is + * (num SAs * 3 lines each * ~60 bytes per line) + one more line + */ + bufsize = (ipsec->count * 4 * 60) + 60; + buf = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + p = buf; + p += snprintf(p, bufsize - (p - buf), + "SA count=%u tx=%u\n", + ipsec->count, ipsec->tx); + + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + struct nsim_sa *sap = &ipsec->sa[i]; + + if (!sap->used) + continue; + + p += snprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", + i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], + sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", + i, be32_to_cpu(sap->xs->id.spi), + sap->xs->id.proto, sap->salt, sap->crypt); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] key=0x%08x %08x %08x %08x\n", + i, sap->key[0], sap->key[1], + sap->key[2], sap->key[3]); + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf); + + kfree(buf); + return len; +} + +static const struct file_operations ipsec_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = nsim_dbg_netdev_ops_read, +}; + +static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec) +{ + u32 i; + + if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search sa table */ + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->sa[i].used) + return i; + } + + return -ENOSPC; +} + +static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + int key_len; + + if (!xs->aead) { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + NSIM_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* 160 accounts for 16 byte key and 4 byte salt */ + if (key_len > NSIM_IPSEC_AUTH_BITS) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len == NSIM_IPSEC_AUTH_BITS) { + *mysalt = 0; + } else { + netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n"); + return -EINVAL; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +static int nsim_ipsec_add_sa(struct xfrm_state *xs) +{ + struct nsim_ipsec *ipsec; + struct net_device *dev; + struct netdevsim *ns; + struct nsim_sa sa; + u16 sa_idx; + int ret; + + dev = xs->xso.dev; + ns = netdev_priv(dev); + ipsec = &ns->ipsec; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = nsim_ipsec_find_empty_idx(ipsec); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&sa, 0, sizeof(sa)); + sa.used = true; + sa.xs = xs; + + if (sa.xs->id.proto & IPPROTO_ESP) + sa.crypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for SA table\n"); + return ret; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sa.rx = true; + + if (xs->props.family == AF_INET6) + memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); + } + + /* the preparations worked, so save the info */ + memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa)); + + /* the XFRM stack doesn't like offload_handle == 0, + * so add a bitflag in case our array index is 0 + */ + xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID; + ipsec->count++; + + return 0; +} + +static void nsim_ipsec_del_sa(struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + u16 sa_idx; + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (!ipsec->sa[sa_idx].used) { + netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n", + sa_idx); + return; + } + + memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa)); + ipsec->count--; +} + +static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + + ipsec->ok++; + + return true; +} + +static const struct xfrmdev_ops nsim_xfrmdev_ops = { + .xdo_dev_state_add = nsim_ipsec_add_sa, + .xdo_dev_state_delete = nsim_ipsec_del_sa, + .xdo_dev_offload_ok = nsim_ipsec_offload_ok, +}; + +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + struct xfrm_state *xs; + struct nsim_sa *tsa; + u32 sa_idx; + + /* do we even need to check this packet? */ + if (!skb->sp) + return true; + + if (unlikely(!skb->sp->len)) { + netdev_err(ns->netdev, "no xfrm state len = %d\n", + skb->sp->len); + return false; + } + + xs = xfrm_input_state(skb); + if (unlikely(!xs)) { + netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs); + return false; + } + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) { + netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n", + sa_idx, NSIM_IPSEC_MAX_SA_COUNT); + return false; + } + + tsa = &ipsec->sa[sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx); + return false; + } + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto); + return false; + } + + ipsec->tx++; + + return true; +} + +void nsim_ipsec_init(struct netdevsim *ns) +{ + ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops; + +#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + ns->netdev->features |= NSIM_ESP_FEATURES; + ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES; + + ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns, + &ipsec_dbg_fops); +} + +void nsim_ipsec_teardown(struct netdevsim *ns) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + + if (ipsec->count) + netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n", + ipsec->count); + debugfs_remove_recursive(ipsec->pfile); +} diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index ec68f38213d9..a7b179f0d954 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -171,6 +171,8 @@ static int nsim_init(struct net_device *dev) if (err) goto err_unreg_dev; + nsim_ipsec_init(ns); + return 0; err_unreg_dev: @@ -186,6 +188,7 @@ static void nsim_uninit(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + nsim_ipsec_teardown(ns); nsim_devlink_teardown(ns); debugfs_remove_recursive(ns->ddir); nsim_bpf_uninit(ns); @@ -203,11 +206,15 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + if (!nsim_ipsec_tx(ns, skb)) + goto out; + u64_stats_update_begin(&ns->syncp); ns->tx_packets++; ns->tx_bytes += skb->len; u64_stats_update_end(&ns->syncp); +out: dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -221,8 +228,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu) { struct netdevsim *ns = netdev_priv(dev); - if (ns->xdp_prog_mode == XDP_ATTACHED_DRV && - new_mtu > NSIM_XDP_MAX_MTU) + if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU) return -EBUSY; dev->mtu = new_mtu; @@ -260,7 +266,7 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f) switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb, - ns, ns); + ns, ns, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns); return 0; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 8ca50b72c328..0aeabbe81cc6 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -18,6 +18,7 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/u64_stats_sync.h> +#include <net/xdp.h> #define DRV_NAME "netdevsim" @@ -29,6 +30,27 @@ struct bpf_prog; struct dentry; struct nsim_vf_config; +#define NSIM_IPSEC_MAX_SA_COUNT 33 +#define NSIM_IPSEC_VALID BIT(31) + +struct nsim_sa { + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + bool used; + bool crypt; + bool rx; +}; + +struct nsim_ipsec { + struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT]; + struct dentry *pfile; + u32 count; + u32 tx; + u32 ok; +}; + struct netdevsim { struct net_device *netdev; @@ -46,9 +68,8 @@ struct netdevsim { struct bpf_prog *bpf_offloaded; u32 bpf_offloaded_id; - u32 xdp_flags; - int xdp_prog_mode; - struct bpf_prog *xdp_prog; + struct xdp_attachment_info xdp; + struct xdp_attachment_info xdp_hw; u32 prog_id_gen; @@ -67,6 +88,7 @@ struct netdevsim { #if IS_ENABLED(CONFIG_NET_DEVLINK) struct devlink *devlink; #endif + struct nsim_ipsec ipsec; }; extern struct dentry *nsim_ddir; @@ -148,6 +170,25 @@ static inline void nsim_devlink_exit(void) } #endif +#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) +void nsim_ipsec_init(struct netdevsim *ns); +void nsim_ipsec_teardown(struct netdevsim *ns); +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb); +#else +static inline void nsim_ipsec_init(struct netdevsim *ns) +{ +} + +static inline void nsim_ipsec_teardown(struct netdevsim *ns) +{ +} + +static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + return true; +} +#endif + static inline struct netdevsim *to_nsim(struct device *ptr) { return container_of(ptr, struct netdevsim, dev); diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 9f6f7ccd44f7..b12023bc2cab 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -430,7 +430,7 @@ static int ntb_netdev_probe(struct device *client_dev) ndev->hw_features = ndev->features; ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS); - random_ether_addr(ndev->perm_addr); + eth_random_addr(ndev->perm_addr); memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); ndev->netdev_ops = &ntb_netdev_ops; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 343989f9f9d9..b7ff0af419b4 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -28,7 +28,7 @@ config MDIO_BCM_IPROC config MDIO_BCM_UNIMAC tristate "Broadcom UniMAC MDIO bus controller" - depends on HAS_IOMEM && OF_MDIO + depends on HAS_IOMEM help This module provides a driver for the Broadcom UniMAC MDIO busses. This hardware can be found in the Broadcom GENET Ethernet MAC @@ -92,7 +92,8 @@ config MDIO_CAVIUM config MDIO_GPIO tristate "GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GPIOLIB + depends on MDIO_BITBANG + depends on GPIOLIB || COMPILE_TEST ---help--- Supports GPIO lib-based MDIO busses. @@ -214,6 +215,7 @@ config SFP tristate "SFP cage support" depends on I2C && PHYLINK select MDIO_I2C + imply HWMON config AMD_PHY tristate "AMD PHYs" diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 49ac678eb2dc..78cad134a79e 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -21,6 +21,7 @@ #define MII_DP83811_SGMII_CTRL 0x09 #define MII_DP83811_INT_STAT1 0x12 #define MII_DP83811_INT_STAT2 0x13 +#define MII_DP83811_INT_STAT3 0x18 #define MII_DP83811_RESET_CTRL 0x1f #define DP83811_HW_RESET BIT(15) @@ -44,6 +45,11 @@ #define DP83811_OVERVOLTAGE_INT_EN BIT(6) #define DP83811_UNDERVOLTAGE_INT_EN BIT(7) +/* INT_STAT3 bits */ +#define DP83811_LPS_INT_EN BIT(0) +#define DP83811_NO_FRAME_INT_EN BIT(3) +#define DP83811_POR_DONE_INT_EN BIT(4) + #define MII_DP83811_RXSOP1 0x04a5 #define MII_DP83811_RXSOP2 0x04a6 #define MII_DP83811_RXSOP3 0x04a7 @@ -81,6 +87,10 @@ static int dp83811_ack_interrupt(struct phy_device *phydev) if (err < 0) return err; + err = phy_read(phydev, MII_DP83811_INT_STAT3); + if (err < 0) + return err; + return 0; } @@ -216,6 +226,18 @@ static int dp83811_config_intr(struct phy_device *phydev) DP83811_UNDERVOLTAGE_INT_EN); err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status); + if (err < 0) + return err; + + misr_status = phy_read(phydev, MII_DP83811_INT_STAT3); + if (misr_status < 0) + return misr_status; + + misr_status |= (DP83811_LPS_INT_EN | + DP83811_NO_FRAME_INT_EN | + DP83811_POR_DONE_INT_EN); + + err = phy_write(phydev, MII_DP83811_INT_STAT3, misr_status); } else { err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); @@ -223,6 +245,10 @@ static int dp83811_config_intr(struct phy_device *phydev) return err; err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); + if (err < 0) + return err; + + err = phy_write(phydev, MII_DP83811_INT_STAT3, 0); } return err; @@ -258,21 +284,19 @@ static int dp83811_config_init(struct phy_device *phydev) if (err < 0) return err; + value = phy_read(phydev, MII_DP83811_SGMII_CTRL); if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - value = phy_read(phydev, MII_DP83811_SGMII_CTRL); - if (!(value & DP83811_SGMII_EN)) { - err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, (DP83811_SGMII_EN | value)); - if (err < 0) - return err; - } else { - err = phy_write(phydev, MII_DP83811_SGMII_CTRL, - (~DP83811_SGMII_EN & value)); - if (err < 0) - return err; - } + } else { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (~DP83811_SGMII_EN & value)); } + if (err < 0) + + return err; + value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN; return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 001fe1df7557..67b260877f30 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -259,10 +259,8 @@ static int __init fixed_mdio_bus_init(void) int ret; pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); - goto err_pdev; - } + if (IS_ERR(pdev)) + return PTR_ERR(pdev); fmb->mii_bus = mdiobus_alloc(); if (fmb->mii_bus == NULL) { @@ -287,7 +285,6 @@ err_mdiobus_alloc: mdiobus_free(fmb->mii_bus); err_mdiobus_reg: platform_device_unregister(pdev); -err_pdev: return ret; } module_init(fixed_mdio_bus_init); diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 082ffef0dec4..bc90764a8b8d 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -20,23 +20,23 @@ struct mdio_mux_gpio_state { struct gpio_descs *gpios; void *mux_handle; + int values[]; }; static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, void *data) { struct mdio_mux_gpio_state *s = data; - int values[s->gpios->ndescs]; unsigned int n; if (current_child == desired_child) return 0; for (n = 0; n < s->gpios->ndescs; n++) - values[n] = (desired_child >> n) & 1; + s->values[n] = (desired_child >> n) & 1; gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc, - values); + s->values); return 0; } @@ -44,15 +44,21 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, static int mdio_mux_gpio_probe(struct platform_device *pdev) { struct mdio_mux_gpio_state *s; + struct gpio_descs *gpios; int r; - s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); - if (!s) + gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(gpios)) + return PTR_ERR(gpios); + + s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs), + GFP_KERNEL); + if (!s) { + gpiod_put_array(gpios); return -ENOMEM; + } - s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(s->gpios)) - return PTR_ERR(s->gpios); + s->gpios = gpios; r = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 537297d2b4b4..d2baedc4ea91 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -467,6 +467,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL(phy_mii_ioctl); +static int phy_config_aneg(struct phy_device *phydev) +{ + if (phydev->drv->config_aneg) + return phydev->drv->config_aneg(phydev); + else + return genphy_config_aneg(phydev); +} + /** * phy_start_aneg_priv - start auto-negotiation for this PHY device * @phydev: the phy_device struct @@ -493,10 +501,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) /* Invalidate LP advertising flags */ phydev->lp_advertising = 0; - if (phydev->drv->config_aneg) - err = phydev->drv->config_aneg(phydev); - else - err = genphy_config_aneg(phydev); + err = phy_config_aneg(phydev); if (err < 0) goto out_unlock; @@ -546,6 +551,84 @@ int phy_start_aneg(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start_aneg); +static int phy_poll_aneg_done(struct phy_device *phydev) +{ + unsigned int retries = 100; + int ret; + + do { + msleep(100); + ret = phy_aneg_done(phydev); + } while (!ret && --retries); + + if (!ret) + return -ETIMEDOUT; + + return ret < 0 ? ret : 0; +} + +/** + * phy_speed_down - set speed to lowest speed supported by both link partners + * @phydev: the phy_device struct + * @sync: perform action synchronously + * + * Description: Typically used to save energy when waiting for a WoL packet + * + * WARNING: Setting sync to false may cause the system being unable to suspend + * in case the PHY generates an interrupt when finishing the autonegotiation. + * This interrupt may wake up the system immediately after suspend. + * Therefore use sync = false only if you're sure it's safe with the respective + * network chip. + */ +int phy_speed_down(struct phy_device *phydev, bool sync) +{ + u32 adv = phydev->lp_advertising & phydev->supported; + u32 adv_old = phydev->advertising; + int ret; + + if (phydev->autoneg != AUTONEG_ENABLE) + return 0; + + if (adv & PHY_10BT_FEATURES) + phydev->advertising &= ~(PHY_100BT_FEATURES | + PHY_1000BT_FEATURES); + else if (adv & PHY_100BT_FEATURES) + phydev->advertising &= ~PHY_1000BT_FEATURES; + + if (phydev->advertising == adv_old) + return 0; + + ret = phy_config_aneg(phydev); + if (ret) + return ret; + + return sync ? phy_poll_aneg_done(phydev) : 0; +} +EXPORT_SYMBOL_GPL(phy_speed_down); + +/** + * phy_speed_up - (re)set advertised speeds to all supported speeds + * @phydev: the phy_device struct + * + * Description: Used to revert the effect of phy_speed_down + */ +int phy_speed_up(struct phy_device *phydev) +{ + u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES; + u32 adv_old = phydev->advertising; + + if (phydev->autoneg != AUTONEG_ENABLE) + return 0; + + phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask); + + if (phydev->advertising == adv_old) + return 0; + + return phy_config_aneg(phydev); +} +EXPORT_SYMBOL_GPL(phy_speed_up); + /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 082fb40c656d..7fc8508b5231 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -37,6 +37,9 @@ #define RTL8201F_ISR 0x1e #define RTL8201F_IER 0x13 +#define RTL8366RB_POWER_SAVE 0x15 +#define RTL8366RB_POWER_SAVE_ON BIT(12) + MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); @@ -128,6 +131,37 @@ static int rtl8211f_config_intr(struct phy_device *phydev) return phy_write_paged(phydev, 0xa42, RTL821x_INER, val); } +static int rtl8211_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = genphy_config_aneg(phydev); + if (ret < 0) + return ret; + + /* Quirk was copied from vendor driver. Unfortunately it includes no + * description of the magic numbers. + */ + if (phydev->speed == SPEED_100 && phydev->autoneg == AUTONEG_DISABLE) { + phy_write(phydev, 0x17, 0x2138); + phy_write(phydev, 0x0e, 0x0260); + } else { + phy_write(phydev, 0x17, 0x2108); + phy_write(phydev, 0x0e, 0x0000); + } + + return 0; +} + +static int rtl8211c_config_init(struct phy_device *phydev) +{ + /* RTL8211C has an issue when operating in Gigabit slave mode */ + phy_set_bits(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + + return genphy_config_init(phydev); +} + static int rtl8211f_config_init(struct phy_device *phydev) { int ret; @@ -159,6 +193,24 @@ static int rtl8211b_resume(struct phy_device *phydev) return genphy_resume(phydev); } +static int rtl8366rb_config_init(struct phy_device *phydev) +{ + int ret; + + ret = genphy_config_init(phydev); + if (ret < 0) + return ret; + + ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE, + RTL8366RB_POWER_SAVE_ON); + if (ret) { + dev_err(&phydev->mdio.dev, + "error enabling power management\n"); + } + + return ret; +} + static struct phy_driver realtek_drvs[] = { { .phy_id = 0x00008201, @@ -179,6 +231,14 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { + .phy_id = 0x001cc910, + .name = "RTL8211 Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_aneg = rtl8211_config_aneg, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, + }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", .phy_id_mask = 0x001fffff, @@ -191,6 +251,14 @@ static struct phy_driver realtek_drvs[] = { .suspend = rtl8211b_suspend, .resume = rtl8211b_resume, }, { + .phy_id = 0x001cc913, + .name = "RTL8211C Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_init = rtl8211c_config_init, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, + }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", .phy_id_mask = 0x001fffff, @@ -223,6 +291,15 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + .phy_id = 0x001cc961, + .name = "RTL8366RB Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &rtl8366rb_config_init, + .suspend = genphy_suspend, + .resume = genphy_resume, }, }; @@ -230,10 +307,13 @@ module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc816, 0x001fffff }, + { 0x001cc910, 0x001fffff }, { 0x001cc912, 0x001fffff }, + { 0x001cc913, 0x001fffff }, { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, { 0x001cc916, 0x001fffff }, + { 0x001cc961, 0x001fffff }, { } }; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index c4c92db86dfa..5661226cf75b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1,5 +1,7 @@ +#include <linux/ctype.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> +#include <linux/hwmon.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/jiffies.h> @@ -131,6 +133,12 @@ struct sfp { unsigned int sm_retries; struct sfp_eeprom_id id; +#if IS_ENABLED(CONFIG_HWMON) + struct sfp_diag diag; + struct device *hwmon_dev; + char *hwmon_name; +#endif + }; static bool sff_module_supported(const struct sfp_eeprom_id *id) @@ -316,6 +324,719 @@ static unsigned int sfp_check(void *buf, size_t len) return check; } +/* hwmon */ +#if IS_ENABLED(CONFIG_HWMON) +static umode_t sfp_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct sfp *sfp = data; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_lcrit_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_lcrit: + case hwmon_temp_crit: + return 0444; + default: + return 0; + } + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_min_alarm: + case hwmon_in_max_alarm: + case hwmon_in_lcrit_alarm: + case hwmon_in_crit_alarm: + case hwmon_in_min: + case hwmon_in_max: + case hwmon_in_lcrit: + case hwmon_in_crit: + return 0444; + default: + return 0; + } + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + case hwmon_curr_min_alarm: + case hwmon_curr_max_alarm: + case hwmon_curr_lcrit_alarm: + case hwmon_curr_crit_alarm: + case hwmon_curr_min: + case hwmon_curr_max: + case hwmon_curr_lcrit: + case hwmon_curr_crit: + return 0444; + default: + return 0; + } + case hwmon_power: + /* External calibration of receive power requires + * floating point arithmetic. Doing that in the kernel + * is not easy, so just skip it. If the module does + * not require external calibration, we can however + * show receiver power, since FP is then not needed. + */ + if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL && + channel == 1) + return 0; + switch (attr) { + case hwmon_power_input: + case hwmon_power_min_alarm: + case hwmon_power_max_alarm: + case hwmon_power_lcrit_alarm: + case hwmon_power_crit_alarm: + case hwmon_power_min: + case hwmon_power_max: + case hwmon_power_lcrit: + case hwmon_power_crit: + return 0444; + default: + return 0; + } + default: + return 0; + } +} + +static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value) +{ + __be16 val; + int err; + + err = sfp_read(sfp, true, reg, &val, sizeof(val)); + if (err < 0) + return err; + + *value = be16_to_cpu(val); + + return 0; +} + +static void sfp_hwmon_to_rx_power(long *value) +{ + *value = DIV_ROUND_CLOSEST(*value, 100); +} + +static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset, + long *value) +{ + if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL) + *value = DIV_ROUND_CLOSEST(*value * slope, 256) + offset; +} + +static void sfp_hwmon_calibrate_temp(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_t_slope), + be16_to_cpu(sfp->diag.cal_t_offset), value); + + if (*value >= 0x8000) + *value -= 0x10000; + + *value = DIV_ROUND_CLOSEST(*value * 1000, 256); +} + +static void sfp_hwmon_calibrate_vcc(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_v_slope), + be16_to_cpu(sfp->diag.cal_v_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 10); +} + +static void sfp_hwmon_calibrate_bias(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txi_slope), + be16_to_cpu(sfp->diag.cal_txi_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 500); +} + +static void sfp_hwmon_calibrate_tx_power(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txpwr_slope), + be16_to_cpu(sfp->diag.cal_txpwr_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 10); +} + +static int sfp_hwmon_read_temp(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_temp(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_vcc(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_vcc(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_bias(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_bias(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_tx_power(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_tx_power(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_rx_power(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_to_rx_power(value); + + return 0; +} + +static int sfp_hwmon_temp(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_temp_input: + return sfp_hwmon_read_temp(sfp, SFP_TEMP, value); + + case hwmon_temp_lcrit: + *value = be16_to_cpu(sfp->diag.temp_low_alarm); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_min: + *value = be16_to_cpu(sfp->diag.temp_low_warn); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + case hwmon_temp_max: + *value = be16_to_cpu(sfp->diag.temp_high_warn); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_crit: + *value = be16_to_cpu(sfp->diag.temp_high_alarm); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TEMP_LOW); + return 0; + + case hwmon_temp_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TEMP_LOW); + return 0; + + case hwmon_temp_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TEMP_HIGH); + return 0; + + case hwmon_temp_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TEMP_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_vcc(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_in_input: + return sfp_hwmon_read_vcc(sfp, SFP_VCC, value); + + case hwmon_in_lcrit: + *value = be16_to_cpu(sfp->diag.volt_low_alarm); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_min: + *value = be16_to_cpu(sfp->diag.volt_low_warn); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_max: + *value = be16_to_cpu(sfp->diag.volt_high_warn); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_crit: + *value = be16_to_cpu(sfp->diag.volt_high_alarm); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_VCC_LOW); + return 0; + + case hwmon_in_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_VCC_LOW); + return 0; + + case hwmon_in_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_VCC_HIGH); + return 0; + + case hwmon_in_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_VCC_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_bias(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_curr_input: + return sfp_hwmon_read_bias(sfp, SFP_TX_BIAS, value); + + case hwmon_curr_lcrit: + *value = be16_to_cpu(sfp->diag.bias_low_alarm); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_min: + *value = be16_to_cpu(sfp->diag.bias_low_warn); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_max: + *value = be16_to_cpu(sfp->diag.bias_high_warn); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_crit: + *value = be16_to_cpu(sfp->diag.bias_high_alarm); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TX_BIAS_LOW); + return 0; + + case hwmon_curr_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TX_BIAS_LOW); + return 0; + + case hwmon_curr_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TX_BIAS_HIGH); + return 0; + + case hwmon_curr_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TX_BIAS_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_tx_power(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_power_input: + return sfp_hwmon_read_tx_power(sfp, SFP_TX_POWER, value); + + case hwmon_power_lcrit: + *value = be16_to_cpu(sfp->diag.txpwr_low_alarm); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_min: + *value = be16_to_cpu(sfp->diag.txpwr_low_warn); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_max: + *value = be16_to_cpu(sfp->diag.txpwr_high_warn); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_crit: + *value = be16_to_cpu(sfp->diag.txpwr_high_alarm); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TXPWR_LOW); + return 0; + + case hwmon_power_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TXPWR_LOW); + return 0; + + case hwmon_power_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TXPWR_HIGH); + return 0; + + case hwmon_power_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TXPWR_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_rx_power(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_power_input: + return sfp_hwmon_read_rx_power(sfp, SFP_RX_POWER, value); + + case hwmon_power_lcrit: + *value = be16_to_cpu(sfp->diag.rxpwr_low_alarm); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_min: + *value = be16_to_cpu(sfp->diag.rxpwr_low_warn); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_max: + *value = be16_to_cpu(sfp->diag.rxpwr_high_warn); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_crit: + *value = be16_to_cpu(sfp->diag.rxpwr_high_alarm); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM1_RXPWR_LOW); + return 0; + + case hwmon_power_min_alarm: + err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN1_RXPWR_LOW); + return 0; + + case hwmon_power_max_alarm: + err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN1_RXPWR_HIGH); + return 0; + + case hwmon_power_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM1_RXPWR_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *value) +{ + struct sfp *sfp = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + return sfp_hwmon_temp(sfp, attr, value); + case hwmon_in: + return sfp_hwmon_vcc(sfp, attr, value); + case hwmon_curr: + return sfp_hwmon_bias(sfp, attr, value); + case hwmon_power: + switch (channel) { + case 0: + return sfp_hwmon_tx_power(sfp, attr, value); + case 1: + return sfp_hwmon_rx_power(sfp, attr, value); + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops sfp_hwmon_ops = { + .is_visible = sfp_hwmon_is_visible, + .read = sfp_hwmon_read, +}; + +static u32 sfp_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_chip = { + .type = hwmon_chip, + .config = sfp_hwmon_chip_config, +}; + +static u32 sfp_hwmon_temp_config[] = { + HWMON_T_INPUT | + HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | + HWMON_T_CRIT | HWMON_T_LCRIT | + HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = { + .type = hwmon_temp, + .config = sfp_hwmon_temp_config, +}; + +static u32 sfp_hwmon_vcc_config[] = { + HWMON_I_INPUT | + HWMON_I_MAX | HWMON_I_MIN | + HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM | + HWMON_I_CRIT | HWMON_I_LCRIT | + HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = { + .type = hwmon_in, + .config = sfp_hwmon_vcc_config, +}; + +static u32 sfp_hwmon_bias_config[] = { + HWMON_C_INPUT | + HWMON_C_MAX | HWMON_C_MIN | + HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM | + HWMON_C_CRIT | HWMON_C_LCRIT | + HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = { + .type = hwmon_curr, + .config = sfp_hwmon_bias_config, +}; + +static u32 sfp_hwmon_power_config[] = { + /* Transmit power */ + HWMON_P_INPUT | + HWMON_P_MAX | HWMON_P_MIN | + HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | + HWMON_P_CRIT | HWMON_P_LCRIT | + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + /* Receive power */ + HWMON_P_INPUT | + HWMON_P_MAX | HWMON_P_MIN | + HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | + HWMON_P_CRIT | HWMON_P_LCRIT | + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_power_channel_info = { + .type = hwmon_power, + .config = sfp_hwmon_power_config, +}; + +static const struct hwmon_channel_info *sfp_hwmon_info[] = { + &sfp_hwmon_chip, + &sfp_hwmon_vcc_channel_info, + &sfp_hwmon_temp_channel_info, + &sfp_hwmon_bias_channel_info, + &sfp_hwmon_power_channel_info, + NULL, +}; + +static const struct hwmon_chip_info sfp_hwmon_chip_info = { + .ops = &sfp_hwmon_ops, + .info = sfp_hwmon_info, +}; + +static int sfp_hwmon_insert(struct sfp *sfp) +{ + int err, i; + + if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE) + return 0; + + if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) + return 0; + + if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) + /* This driver in general does not support address + * change. + */ + return 0; + + err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag)); + if (err < 0) + return err; + + sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL); + if (!sfp->hwmon_name) + return -ENODEV; + + for (i = 0; sfp->hwmon_name[i]; i++) + if (hwmon_is_bad_char(sfp->hwmon_name[i])) + sfp->hwmon_name[i] = '_'; + + sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev, + sfp->hwmon_name, sfp, + &sfp_hwmon_chip_info, + NULL); + + return PTR_ERR_OR_ZERO(sfp->hwmon_dev); +} + +static void sfp_hwmon_remove(struct sfp *sfp) +{ + hwmon_device_unregister(sfp->hwmon_dev); + kfree(sfp->hwmon_name); +} +#else +static int sfp_hwmon_insert(struct sfp *sfp) +{ + return 0; +} + +static void sfp_hwmon_remove(struct sfp *sfp) +{ +} +#endif + /* Helpers */ static void sfp_module_tx_disable(struct sfp *sfp) { @@ -636,6 +1357,10 @@ static int sfp_sm_mod_probe(struct sfp *sfp) dev_warn(sfp->dev, "module address swap to access page 0xA2 is not supported.\n"); + ret = sfp_hwmon_insert(sfp); + if (ret < 0) + return ret; + ret = sfp_module_insert(sfp->sfp_bus, &sfp->id); if (ret < 0) return ret; @@ -647,6 +1372,8 @@ static void sfp_sm_mod_remove(struct sfp *sfp) { sfp_module_remove(sfp->sfp_bus); + sfp_hwmon_remove(sfp); + if (sfp->mod_phy) sfp_sm_phy_detach(sfp); diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index d9dd8fbfffc7..fbf9ad429593 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -72,6 +72,10 @@ #define PHY_ID_VSC8572 0x000704d0 #define PHY_ID_VSC8574 0x000704a0 #define PHY_ID_VSC8601 0x00070420 +#define PHY_ID_VSC7385 0x00070450 +#define PHY_ID_VSC7388 0x00070480 +#define PHY_ID_VSC7395 0x00070550 +#define PHY_ID_VSC7398 0x00070580 #define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -116,6 +120,137 @@ static int vsc824x_config_init(struct phy_device *phydev) return err; } +#define VSC73XX_EXT_PAGE_ACCESS 0x1f + +static int vsc73xx_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, VSC73XX_EXT_PAGE_ACCESS); +} + +static int vsc73xx_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page); +} + +static void vsc73xx_config_init(struct phy_device *phydev) +{ + /* Receiver init */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x0c, 0x0300, 0x0200); + phy_write(phydev, 0x1f, 0x0000); + + /* Config LEDs 0x61 */ + phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061); +} + +static int vsc738x_config_init(struct phy_device *phydev) +{ + u16 rev; + /* This magic sequence appear in the application note + * "VSC7385/7388 PHY Configuration". + * + * Maybe one day we will get to know what it all means. + */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x10, 0xb68a); + phy_modify(phydev, 0x12, 0xff07, 0x0003); + phy_modify(phydev, 0x11, 0x00ff, 0x00a2); + phy_write(phydev, 0x10, 0x968a); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + /* Read revision */ + rev = phy_read(phydev, MII_PHYSID2); + rev &= 0x0f; + + /* Special quirk for revision 0 */ + if (rev == 0) { + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x12, 0x0000); + phy_write(phydev, 0x11, 0x0689); + phy_write(phydev, 0x10, 0x8f92); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x12, 0x0000); + phy_write(phydev, 0x11, 0x0e35); + phy_write(phydev, 0x10, 0x9786); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x17, 0xff80); + phy_write(phydev, 0x17, 0x0000); + } + + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x12, 0x0048); + + if (rev == 0) { + phy_write(phydev, 0x1f, 0x2a30); + phy_write(phydev, 0x14, 0x6600); + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x18, 0xa24e); + } else { + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x16, 0x0fc0, 0x0240); + phy_modify(phydev, 0x14, 0x6000, 0x4000); + /* bits 14-15 in extended register 0x14 controls DACG amplitude + * 6 = -8%, 2 is hardware default + */ + phy_write(phydev, 0x1f, 0x0001); + phy_modify(phydev, 0x14, 0xe000, 0x6000); + phy_write(phydev, 0x1f, 0x0000); + } + + vsc73xx_config_init(phydev); + + return genphy_config_init(phydev); +} + +static int vsc739x_config_init(struct phy_device *phydev) +{ + /* This magic sequence appears in the VSC7395 SparX-G5e application + * note "VSC7395/VSC7398 PHY Configuration" + * + * Maybe one day we will get to know what it all means. + */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x10, 0xb68a); + phy_modify(phydev, 0x12, 0xff07, 0x0003); + phy_modify(phydev, 0x11, 0x00ff, 0x00a2); + phy_write(phydev, 0x10, 0x968a); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x12, 0x0048); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x16, 0x0fc0, 0x0240); + phy_modify(phydev, 0x14, 0x6000, 0x4000); + phy_write(phydev, 0x1f, 0x0001); + phy_modify(phydev, 0x14, 0xe000, 0x6000); + phy_write(phydev, 0x1f, 0x0000); + + vsc73xx_config_init(phydev); + + return genphy_config_init(phydev); +} + +static int vsc73xx_config_aneg(struct phy_device *phydev) +{ + /* The VSC73xx switches does not like to be instructed to + * do autonegotiation in any way, it prefers that you just go + * with the power-on/reset defaults. Writing some registers will + * just make autonegotiation permanently fail. + */ + return 0; +} + /* This adds a skew for both TX and RX clocks, so the skew should only be * applied to "rgmii-id" interfaces. It may not work as expected * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */ @@ -319,6 +454,42 @@ static struct phy_driver vsc82xx_driver[] = { .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { + .phy_id = PHY_ID_VSC7385, + .name = "Vitesse VSC7385", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7388, + .name = "Vitesse VSC7388", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7395, + .name = "Vitesse VSC7395", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7398, + .name = "Vitesse VSC7398", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, @@ -358,6 +529,10 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8572, 0x000ffff0 }, { PHY_ID_VSC8574, 0x000ffff0 }, + { PHY_ID_VSC7385, 0x000ffff0 }, + { PHY_ID_VSC7388, 0x000ffff0 }, + { PHY_ID_VSC7395, 0x000ffff0 }, + { PHY_ID_VSC7398, 0x000ffff0 }, { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 2e5150b0b8d5..74a8782313cf 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -33,17 +33,22 @@ struct gmii2rgmii { struct phy_device *phy_dev; struct phy_driver *phy_drv; struct phy_driver conv_phy_drv; - int addr; + struct mdio_device *mdio; }; static int xgmiitorgmii_read_status(struct phy_device *phydev) { struct gmii2rgmii *priv = phydev->priv; + struct mii_bus *bus = priv->mdio->bus; + int addr = priv->mdio->addr; u16 val = 0; + int err; - priv->phy_drv->read_status(phydev); + err = priv->phy_drv->read_status(phydev); + if (err < 0) + return err; - val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); + val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG); val &= ~XILINX_GMII2RGMII_SPEED_MASK; if (phydev->speed == SPEED_1000) @@ -53,7 +58,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) else val |= BMCR_SPEED10; - mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val); + mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val); return 0; } @@ -81,7 +86,12 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) return -EPROBE_DEFER; } - priv->addr = mdiodev->addr; + if (!priv->phy_dev->drv) { + dev_info(dev, "Attached phy not ready\n"); + return -EPROBE_DEFER; + } + + priv->mdio = mdiodev; priv->phy_drv = priv->phy_dev->drv; memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, sizeof(struct phy_driver)); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index b070959737ff..6a047d30e8c6 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -41,11 +41,6 @@ #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) -static struct team_port *team_port_get_rcu(const struct net_device *dev) -{ - return rcu_dereference(dev->rx_handler_data); -} - static struct team_port *team_port_get_rtnl(const struct net_device *dev) { struct team_port *port = rtnl_dereference(dev->rx_handler_data); @@ -1707,7 +1702,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/tun.c b/drivers/net/tun.c index f5727baac84a..0a3134712652 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -607,7 +607,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct tun_struct *tun = netdev_priv(dev); u16 ret; @@ -1268,7 +1269,6 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) return tun_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_id = tun_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index b1b3d8f7e67d..b654f05b2ccd 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -693,24 +693,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) u32 phyid; struct asix_common_private *priv; - usbnet_get_endpoints(dev,intf); + usbnet_get_endpoints(dev, intf); - /* Get the MAC address */ - if (dev->driver_info->data & FLAG_EEPROM_MAC) { - for (i = 0; i < (ETH_ALEN >> 1); i++) { - ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i, - 0, 2, buf + i * 2, 0); - if (ret < 0) - break; - } + /* Maybe the boot loader passed the MAC address via device tree */ + if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { + netif_dbg(dev, ifup, dev->net, + "MAC address read from device tree"); } else { - ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, - 0, 0, ETH_ALEN, buf, 0); - } + /* Try getting the MAC address from EEPROM */ + if (dev->driver_info->data & FLAG_EEPROM_MAC) { + for (i = 0; i < (ETH_ALEN >> 1); i++) { + ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, + 0x04 + i, 0, 2, buf + i * 2, + 0); + if (ret < 0) + break; + } + } else { + ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, + 0, 0, ETH_ALEN, buf, 0); + } - if (ret < 0) { - netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); - return ret; + if (ret < 0) { + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", + ret); + return ret; + } } asix_set_netdev_dev_addr(dev, buf); diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 18d36dff97ea..424053bd8b21 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -869,6 +869,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); + /* fall through */ case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 288ecd999171..78b16eb9e58c 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -99,6 +99,7 @@ static void tx_complete(struct urb *req) struct net_device *dev = skb->dev; struct usbpn_dev *pnd = netdev_priv(dev); int status = req->status; + unsigned long flags; switch (status) { case 0: @@ -109,16 +110,17 @@ static void tx_complete(struct urb *req) case -ECONNRESET: case -ESHUTDOWN: dev->stats.tx_aborted_errors++; + /* fall through */ default: dev->stats.tx_errors++; dev_dbg(&dev->dev, "TX error (%d)\n", status); } dev->stats.tx_packets++; - spin_lock(&pnd->tx_lock); + spin_lock_irqsave(&pnd->tx_lock, flags); pnd->tx_queue--; netif_wake_queue(dev); - spin_unlock(&pnd->tx_lock); + spin_unlock_irqrestore(&pnd->tx_lock, flags); dev_kfree_skb_any(skb); usb_free_urb(req); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e53883ad6107..184c24baca15 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -999,6 +999,7 @@ static void read_bulk_callback(struct urb *urb) struct hso_net *odev = urb->context; struct net_device *net; int result; + unsigned long flags; int status = urb->status; /* is al ok? (Filip: Who's Al ?) */ @@ -1028,11 +1029,11 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network * stack if the packet is complete. */ - spin_lock(&odev->net_lock); + spin_lock_irqsave(&odev->net_lock, flags); packetizeRx(odev, urb->transfer_buffer, urb->actual_length, (urb->transfer_buffer_length > urb->actual_length) ? 1 : 0); - spin_unlock(&odev->net_lock); + spin_unlock_irqrestore(&odev->net_lock, flags); } /* We are done with this URB, resubmit it. Prep the USB to wait for @@ -1193,6 +1194,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status); @@ -1216,10 +1218,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) if (serial->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); /* Valid data, handle RX data */ - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; put_rxbuf_data_and_resubmit_bulk_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } /* @@ -1502,12 +1504,13 @@ static void tiocmget_intr_callback(struct urb *urb) DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification)); } else { + unsigned long flags; UART_state_bitmap = le16_to_cpu(serial_state_notification-> UART_state_bitmap); prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; icount = &tiocmget->icount; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if ((UART_state_bitmap & B_OVERRUN) != (prev_UART_state_bitmap & B_OVERRUN)) icount->parity++; @@ -1530,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb) (prev_UART_state_bitmap & B_RX_CARRIER)) icount->dcd++; tiocmget->prev_UART_state_bitmap = UART_state_bitmap; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); tiocmget->intr_completed = 1; wake_up_interruptible(&tiocmget->waitq); } @@ -1729,7 +1732,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, /* starts a transmit */ static void hso_kick_transmit(struct hso_serial *serial) { - u8 *temp; unsigned long flags; int res; @@ -1745,14 +1747,12 @@ static void hso_kick_transmit(struct hso_serial *serial) goto out; /* Switch pointers around to avoid memcpy */ - temp = serial->tx_buffer; - serial->tx_buffer = serial->tx_data; - serial->tx_data = temp; + swap(serial->tx_buffer, serial->tx_data); serial->tx_data_count = serial->tx_buffer_count; serial->tx_buffer_count = 0; - /* If temp is set, it means we switched buffers */ - if (temp && serial->write_data) { + /* If serial->tx_data is set, it means we switched buffers */ + if (serial->tx_data && serial->write_data) { res = serial->write_data(serial); if (res >= 0) serial->tx_urb_used = 1; @@ -1852,6 +1852,7 @@ static void intr_callback(struct urb *urb) struct hso_serial *serial; unsigned char *port_req; int status = urb->status; + unsigned long flags; int i; usb_mark_last_busy(urb->dev); @@ -1879,7 +1880,7 @@ static void intr_callback(struct urb *urb) if (serial != NULL) { hso_dbg(0x1, "Pending read interrupt on port %d\n", i); - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if (serial->rx_state == RX_IDLE && serial->port.count > 0) { /* Setup and send a ctrl req read on @@ -1893,7 +1894,8 @@ static void intr_callback(struct urb *urb) hso_dbg(0x1, "Already a read pending on port %d or port not open\n", i); } - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, + flags); } } } @@ -1920,6 +1922,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) { @@ -1927,9 +1930,9 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) return; } - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1971,14 +1974,15 @@ static void ctrl_callback(struct urb *urb) struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) return; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1994,9 +1998,9 @@ static void ctrl_callback(struct urb *urb) (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { /* response to a read command */ serial->rx_urb_filled[0] = 1; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); put_rxbuf_data_and_resubmit_ctrl_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } else { hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index f1605833c5cf..913e50bab0a2 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -587,7 +587,7 @@ static void kaweth_usb_receive(struct urb *urb) struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; - + unsigned long flags; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; @@ -619,12 +619,12 @@ static void kaweth_usb_receive(struct urb *urb) net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } - spin_lock(&kaweth->device_lock); + spin_lock_irqsave(&kaweth->device_lock, flags); if (IS_BLOCKED(kaweth->status)) { - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); return; } - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); if(status && status != -EREMOTEIO && count != 1) { dev_err(&kaweth->intf->dev, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index ed10d49eb5e0..ac25981ee4d5 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1721,7 +1721,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) "MAC address read from EEPROM"); } else { /* generate random MAC */ - random_ether_addr(addr); + eth_random_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2a58607a6aea..124211afb023 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1252,6 +1252,7 @@ static void read_bulk_callback(struct urb *urb) int status = urb->status; struct rx_agg *agg; struct r8152 *tp; + unsigned long flags; agg = urb->context; if (!agg) @@ -1281,9 +1282,9 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length < ETH_ZLEN) break; - spin_lock(&tp->rx_lock); + spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); - spin_unlock(&tp->rx_lock); + spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: @@ -1311,6 +1312,7 @@ static void write_bulk_callback(struct urb *urb) struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; + unsigned long flags; int status = urb->status; agg = urb->context; @@ -1332,9 +1334,9 @@ static void write_bulk_callback(struct urb *urb) stats->tx_bytes += agg->skb_len; } - spin_lock(&tp->tx_lock); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock(&tp->tx_lock); + spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); @@ -1374,6 +1376,7 @@ static void intr_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); + /* fall through */ case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, @@ -2739,6 +2742,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) r8152_mdio_write(tp, MII_BMCR, data); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + /* fall through */ default: if (data != PHY_STAT_LAN_ON) diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 48ba80a8ca5c..80373a9171dd 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -391,6 +391,7 @@ static void read_bulk_callback(struct urb *urb) u16 rx_stat; int status = urb->status; int result; + unsigned long flags; dev = urb->context; if (!dev) @@ -432,9 +433,9 @@ static void read_bulk_callback(struct urb *urb) netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len; - spin_lock(&dev->rx_pool_lock); + spin_lock_irqsave(&dev->rx_pool_lock, flags); skb = pull_skb(dev); - spin_unlock(&dev->rx_pool_lock); + spin_unlock_irqrestore(&dev->rx_pool_lock, flags); if (!skb) goto resched; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 53085c63277b..2ff08bc103a9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2343,7 +2343,6 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) return virtnet_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_id = virtnet_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f6bb1d54d4bd..ababba37d735 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, return vh; } -static struct sk_buff **vxlan_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *vxlan_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct vxlanhdr *vh, *vh2; unsigned int hlen, off_vx; int flush = 1; @@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -636,9 +637,62 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } -/* Add new entry to forwarding table -- assumes lock held */ +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, + const u8 *mac, __u16 state, + __be32 src_vni, __u8 ndm_flags) +{ + struct vxlan_fdb *f; + + f = kmalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + f->state = state; + f->flags = ndm_flags; + f->updated = f->used = jiffies; + f->vni = src_vni; + INIT_LIST_HEAD(&f->remotes); + memcpy(f->eth_addr, mac, ETH_ALEN); + + return f; +} + static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, + __u16 state, __be16 port, __be32 src_vni, + __be32 vni, __u32 ifindex, __u8 ndm_flags, + struct vxlan_fdb **fdb) +{ + struct vxlan_rdst *rd = NULL; + struct vxlan_fdb *f; + int rc; + + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) + return -ENOSPC; + + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); + if (!f) + return -ENOMEM; + + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + if (rc < 0) { + kfree(f); + return rc; + } + + ++vxlan->addrcnt; + hlist_add_head_rcu(&f->hlist, + vxlan_fdb_head(vxlan, mac, src_vni)); + + *fdb = f; + + return 0; +} + +/* Add new entry to forwarding table -- assumes lock held */ +static int vxlan_fdb_update(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, __u32 ifindex, __u8 ndm_flags) @@ -687,37 +741,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->cfg.addrmax && - vxlan->addrcnt >= vxlan->cfg.addrmax) - return -ENOSPC; - /* Disallow replace to add a multicast entry */ if ((flags & NLM_F_REPLACE) && (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) return -EOPNOTSUPP; netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); - f = kmalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return -ENOMEM; - - notify = 1; - f->state = state; - f->flags = ndm_flags; - f->updated = f->used = jiffies; - f->vni = src_vni; - INIT_LIST_HEAD(&f->remotes); - memcpy(f->eth_addr, mac, ETH_ALEN); - - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); - if (rc < 0) { - kfree(f); + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, + vni, ifindex, ndm_flags, &f); + if (rc < 0) return rc; - } - - ++vxlan->addrcnt; - hlist_add_head_rcu(&f->hlist, - vxlan_fdb_head(vxlan, mac, src_vni)); + notify = 1; } if (notify) { @@ -741,13 +775,15 @@ static void vxlan_fdb_free(struct rcu_head *head) kfree(f); } -static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) +static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, + bool do_notify) { netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); + if (do_notify) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -863,7 +899,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EAFNOSUPPORT; spin_lock_bh(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, port, src_vni, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); @@ -897,7 +933,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, goto out; } - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); out: return 0; @@ -1006,7 +1042,7 @@ static bool vxlan_snoop(struct net_device *dev, /* close off race between vxlan_flush and incoming packets */ if (netif_running(dev)) - vxlan_fdb_create(vxlan, src_mac, src_ip, + vxlan_fdb_update(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, @@ -2119,7 +2155,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; - if (info->options_len) + if (info->options_len && + info->key.tun_flags & TUNNEL_VXLAN_OPT) md = ip_tunnel_info_opts(info); ttl = info->key.ttl; tos = info->key.tos; @@ -2364,7 +2401,7 @@ static void vxlan_cleanup(struct timer_list *t) "garbage collect %pM\n", f->eth_addr); f->state = NUD_STALE; - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } else if (time_before(timeout, next_timer)) next_timer = timeout; } @@ -2415,7 +2452,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) spin_lock_bh(&vxlan->hash_lock); f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); spin_unlock_bh(&vxlan->hash_lock); } @@ -2469,7 +2506,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } } spin_unlock_bh(&vxlan->hash_lock); @@ -3160,6 +3197,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb *f = NULL; int err; err = vxlan_dev_configure(net, dev, conf, false, extack); @@ -3173,24 +3211,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, err = vxlan_fdb_create(vxlan, all_zeros_mac, &vxlan->default_dst.remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_EXCL | NLM_F_CREATE, vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) return err; } err = register_netdevice(dev); + if (err) + goto errout; + + err = rtnl_configure_link(dev, NULL); if (err) { - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); - return err; + unregister_netdevice(dev); + goto errout; } + /* notify default fdb entry */ + if (f) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); + list_add(&vxlan->next, &vn->vxlan_list); return 0; +errout: + if (f) + vxlan_fdb_destroy(vxlan, f, false); + return err; } static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], @@ -3425,6 +3474,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct vxlan_rdst *dst = &vxlan->default_dst; struct vxlan_rdst old_dst; struct vxlan_config conf; + struct vxlan_fdb *f = NULL; int err; err = vxlan_nl2conf(tb, data, @@ -3453,16 +3503,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], err = vxlan_fdb_create(vxlan, all_zeros_mac, &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_CREATE | NLM_F_APPEND, vxlan->cfg.dst_port, dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; } + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); } spin_unlock_bh(&vxlan->hash_lock); } diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index bd46b2552980..2a3f0f1a2b0a 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2134,7 +2134,6 @@ static void fst_openport(struct fst_port_info *port) { int signals; - int txq_length; /* Only init things if card is actually running. This allows open to * succeed for downloads etc. @@ -2161,7 +2160,6 @@ fst_openport(struct fst_port_info *port) else netif_carrier_off(port_to_dev(port)); - txq_length = port->txqe - port->txqs; port->txqe = 0; port->txqs = 0; } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 90a4ad9a2d08..093bd21f574d 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1491,7 +1491,6 @@ static int lmc_rx(struct net_device *dev) lmc_softc_t *sc = dev_to_sc(dev); int i; int rx_work_limit = LMC_RXDESCS; - unsigned int next_rx; int rxIntLoopCnt; /* debug -baz */ int localLengthErrCnt = 0; long stat; @@ -1505,7 +1504,6 @@ static int lmc_rx(struct net_device *dev) rxIntLoopCnt = 0; /* debug -baz */ i = sc->lmc_next_rx % LMC_RXDESCS; - next_rx = sc->lmc_next_rx; while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) { diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 4c417903e9be..094cea775d0c 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -566,13 +566,12 @@ static void i2400m_msg_ack_hook(struct i2400m *i2400m, { int result; struct device *dev = i2400m_dev(i2400m); - unsigned ack_type, ack_status; + unsigned int ack_type; char strerr[32]; /* Chew on the message, we might need some information from * here */ ack_type = le16_to_cpu(l3l4_hdr->type); - ack_status = le16_to_cpu(l3l4_hdr->status); switch (ack_type) { case I2400M_MT_CMD_ENTER_POWERSAVE: /* This is just left here for the sake of example, as diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index a89b5685e68b..e9fc168bb734 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -1552,7 +1552,6 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) int ret, itr; struct device *dev = i2400m_dev(i2400m); struct i2400m_fw *i2400m_fw; - const struct i2400m_bcf_hdr *bcf; /* Firmware data */ const struct firmware *fw; const char *fw_name; @@ -1574,7 +1573,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) } /* Load firmware files to memory. */ - for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) { + for (itr = 0, ret = -ENOENT; ; itr++) { fw_name = i2400m->bus_fw_names[itr]; if (fw_name == NULL) { dev_err(dev, "Could not find a usable firmware image\n"); diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index a654687b5fa2..9ab3f0fdfea4 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -535,14 +535,12 @@ void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb, { struct net_device *net_dev = i2400m->wimax_dev.net_dev; struct device *dev = i2400m_dev(i2400m); - int protocol; d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n", i2400m, skb, skb->len, cs); switch(cs) { case I2400M_CS_IPV4_0: case I2400M_CS_IPV4: - protocol = ETH_P_IP; i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev, skb->data - ETH_HLEN, cpu_to_be16(ETH_P_IP)); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e60bea4604e4..1665066f4e24 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah) ath_err(common, "eeprom contains invalid mac address: %pM\n", common->macaddr); - random_ether_addr(common->macaddr); + eth_random_addr(common->macaddr); ath_err(common, "random mac address will be used: %pM\n", common->macaddr); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 510f6b8e717d..fa3e8ddfe9a9 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1279,7 +1279,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) static u16 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { skb->priority = cfg80211_classify8021d(skb, NULL); return mwifiex_1d_to_wmm_queue[skb->priority]; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 54c9f6ab0c8c..f4122c8fdd97 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1907,7 +1907,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) reject_agg, ctrl_agg_size, agg_size); rtlpriv->hw->max_rx_aggregation_subframes = - (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF); + (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT); } EXPORT_SYMBOL(rtl_rx_ampdu_apply); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 78ebe494fef0..92274c237200 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -148,14 +148,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue) } static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) - return fallback(dev, skb) % dev->real_num_tx_queues; + return fallback(dev, skb, NULL) % dev->real_num_tx_queues; xenvif_set_skb_hash(vif, skb); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a57daecf1d57..d67cd379d156 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -545,7 +545,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb) } static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index d963baf8e53a..e92391d6d1bd 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -367,14 +367,23 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev, phy_interface_t iface; struct device_node *phy_np; struct phy_device *phy; + int ret; iface = of_get_phy_mode(np); if (iface < 0) return NULL; - - phy_np = of_parse_phandle(np, "phy-handle", 0); - if (!phy_np) - return NULL; + if (of_phy_is_fixed_link(np)) { + ret = of_phy_register_fixed_link(np); + if (ret < 0) { + netdev_err(dev, "broken fixed-link specification\n"); + return NULL; + } + phy_np = of_node_get(np); + } else { + phy_np = of_parse_phandle(np, "phy-handle", 0); + if (!phy_np) + return NULL; + } phy = of_phy_connect(dev, phy_np, hndlr, 0, iface); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 474c988d2e95..d137c480db46 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE config PTP_1588_CLOCK_QORIQ tristate "Freescale QorIQ 1588 timer as PTP clock" - depends on GIANFAR + depends on GIANFAR || FSL_DPAA_ETH depends on PTP_1588_CLOCK default y help diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index e8652c148c52..a14c317b5a38 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -39,11 +39,12 @@ /* Caller must hold qoriq_ptp->lock. */ static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; u64 ns; u32 lo, hi; - lo = qoriq_read(&qoriq_ptp->regs->tmr_cnt_l); - hi = qoriq_read(&qoriq_ptp->regs->tmr_cnt_h); + lo = qoriq_read(®s->ctrl_regs->tmr_cnt_l); + hi = qoriq_read(®s->ctrl_regs->tmr_cnt_h); ns = ((u64) hi) << 32; ns |= lo; return ns; @@ -52,16 +53,18 @@ static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) /* Caller must hold qoriq_ptp->lock. */ static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; u32 hi = ns >> 32; u32 lo = ns & 0xffffffff; - qoriq_write(&qoriq_ptp->regs->tmr_cnt_l, lo); - qoriq_write(&qoriq_ptp->regs->tmr_cnt_h, hi); + qoriq_write(®s->ctrl_regs->tmr_cnt_l, lo); + qoriq_write(®s->ctrl_regs->tmr_cnt_h, hi); } /* Caller must hold qoriq_ptp->lock. */ static void set_alarm(struct qoriq_ptp *qoriq_ptp) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; u64 ns; u32 lo, hi; @@ -70,16 +73,18 @@ static void set_alarm(struct qoriq_ptp *qoriq_ptp) ns -= qoriq_ptp->tclk_period; hi = ns >> 32; lo = ns & 0xffffffff; - qoriq_write(&qoriq_ptp->regs->tmr_alarm1_l, lo); - qoriq_write(&qoriq_ptp->regs->tmr_alarm1_h, hi); + qoriq_write(®s->alarm_regs->tmr_alarm1_l, lo); + qoriq_write(®s->alarm_regs->tmr_alarm1_h, hi); } /* Caller must hold qoriq_ptp->lock. */ static void set_fipers(struct qoriq_ptp *qoriq_ptp) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + set_alarm(qoriq_ptp); - qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); } /* @@ -89,16 +94,17 @@ static void set_fipers(struct qoriq_ptp *qoriq_ptp) static irqreturn_t isr(int irq, void *priv) { struct qoriq_ptp *qoriq_ptp = priv; + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; struct ptp_clock_event event; u64 ns; u32 ack = 0, lo, hi, mask, val; - val = qoriq_read(&qoriq_ptp->regs->tmr_tevent); + val = qoriq_read(®s->ctrl_regs->tmr_tevent); if (val & ETS1) { ack |= ETS1; - hi = qoriq_read(&qoriq_ptp->regs->tmr_etts1_h); - lo = qoriq_read(&qoriq_ptp->regs->tmr_etts1_l); + hi = qoriq_read(®s->etts_regs->tmr_etts1_h); + lo = qoriq_read(®s->etts_regs->tmr_etts1_l); event.type = PTP_CLOCK_EXTTS; event.index = 0; event.timestamp = ((u64) hi) << 32; @@ -108,8 +114,8 @@ static irqreturn_t isr(int irq, void *priv) if (val & ETS2) { ack |= ETS2; - hi = qoriq_read(&qoriq_ptp->regs->tmr_etts2_h); - lo = qoriq_read(&qoriq_ptp->regs->tmr_etts2_l); + hi = qoriq_read(®s->etts_regs->tmr_etts2_h); + lo = qoriq_read(®s->etts_regs->tmr_etts2_l); event.type = PTP_CLOCK_EXTTS; event.index = 1; event.timestamp = ((u64) hi) << 32; @@ -130,16 +136,16 @@ static irqreturn_t isr(int irq, void *priv) hi = ns >> 32; lo = ns & 0xffffffff; spin_lock(&qoriq_ptp->lock); - qoriq_write(&qoriq_ptp->regs->tmr_alarm2_l, lo); - qoriq_write(&qoriq_ptp->regs->tmr_alarm2_h, hi); + qoriq_write(®s->alarm_regs->tmr_alarm2_l, lo); + qoriq_write(®s->alarm_regs->tmr_alarm2_h, hi); spin_unlock(&qoriq_ptp->lock); qoriq_ptp->alarm_value = ns; } else { - qoriq_write(&qoriq_ptp->regs->tmr_tevent, ALM2); + qoriq_write(®s->ctrl_regs->tmr_tevent, ALM2); spin_lock(&qoriq_ptp->lock); - mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask = qoriq_read(®s->ctrl_regs->tmr_temask); mask &= ~ALM2EN; - qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + qoriq_write(®s->ctrl_regs->tmr_temask, mask); spin_unlock(&qoriq_ptp->lock); qoriq_ptp->alarm_value = 0; qoriq_ptp->alarm_interval = 0; @@ -153,7 +159,7 @@ static irqreturn_t isr(int irq, void *priv) } if (ack) { - qoriq_write(&qoriq_ptp->regs->tmr_tevent, ack); + qoriq_write(®s->ctrl_regs->tmr_tevent, ack); return IRQ_HANDLED; } else return IRQ_NONE; @@ -169,6 +175,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) u32 tmr_add; int neg_adj = 0; struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; if (scaled_ppm < 0) { neg_adj = 1; @@ -186,7 +193,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - qoriq_write(&qoriq_ptp->regs->tmr_add, tmr_add); + qoriq_write(®s->ctrl_regs->tmr_add, tmr_add); return 0; } @@ -250,6 +257,7 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; unsigned long flags; u32 bit, mask; @@ -266,23 +274,23 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp, return -EINVAL; } spin_lock_irqsave(&qoriq_ptp->lock, flags); - mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask = qoriq_read(®s->ctrl_regs->tmr_temask); if (on) mask |= bit; else mask &= ~bit; - qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + qoriq_write(®s->ctrl_regs->tmr_temask, mask); spin_unlock_irqrestore(&qoriq_ptp->lock, flags); return 0; case PTP_CLK_REQ_PPS: spin_lock_irqsave(&qoriq_ptp->lock, flags); - mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask = qoriq_read(®s->ctrl_regs->tmr_temask); if (on) mask |= PP1EN; else mask &= ~PP1EN; - qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + qoriq_write(®s->ctrl_regs->tmr_temask, mask); spin_unlock_irqrestore(&qoriq_ptp->lock, flags); return 0; @@ -313,10 +321,12 @@ static int qoriq_ptp_probe(struct platform_device *dev) { struct device_node *node = dev->dev.of_node; struct qoriq_ptp *qoriq_ptp; + struct qoriq_ptp_registers *regs; struct timespec64 now; int err = -ENOMEM; u32 tmr_ctrl; unsigned long flags; + void __iomem *base; qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL); if (!qoriq_ptp) @@ -351,7 +361,7 @@ static int qoriq_ptp_probe(struct platform_device *dev) pr_err("irq not in device tree\n"); goto no_node; } - if (request_irq(qoriq_ptp->irq, isr, 0, DRIVER, qoriq_ptp)) { + if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) { pr_err("request_irq failed\n"); goto no_node; } @@ -368,12 +378,27 @@ static int qoriq_ptp_probe(struct platform_device *dev) spin_lock_init(&qoriq_ptp->lock); - qoriq_ptp->regs = ioremap(qoriq_ptp->rsrc->start, - resource_size(qoriq_ptp->rsrc)); - if (!qoriq_ptp->regs) { + base = ioremap(qoriq_ptp->rsrc->start, + resource_size(qoriq_ptp->rsrc)); + if (!base) { pr_err("ioremap ptp registers failed\n"); goto no_ioremap; } + + qoriq_ptp->base = base; + + if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) { + qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET; + qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET; + qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET; + qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET; + } else { + qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET; + qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET; + qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET; + qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET; + } + ktime_get_real_ts64(&now); ptp_qoriq_settime(&qoriq_ptp->caps, &now); @@ -383,13 +408,14 @@ static int qoriq_ptp_probe(struct platform_device *dev) spin_lock_irqsave(&qoriq_ptp->lock, flags); - qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl); - qoriq_write(&qoriq_ptp->regs->tmr_add, qoriq_ptp->tmr_add); - qoriq_write(&qoriq_ptp->regs->tmr_prsc, qoriq_ptp->tmr_prsc); - qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + regs = &qoriq_ptp->regs; + qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl); + qoriq_write(®s->ctrl_regs->tmr_add, qoriq_ptp->tmr_add); + qoriq_write(®s->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc); + qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); set_alarm(qoriq_ptp); - qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); + qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); spin_unlock_irqrestore(&qoriq_ptp->lock, flags); @@ -405,7 +431,7 @@ static int qoriq_ptp_probe(struct platform_device *dev) return 0; no_clock: - iounmap(qoriq_ptp->regs); + iounmap(qoriq_ptp->base); no_ioremap: release_resource(qoriq_ptp->rsrc); no_resource: @@ -419,12 +445,13 @@ no_memory: static int qoriq_ptp_remove(struct platform_device *dev) { struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev); + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; - qoriq_write(&qoriq_ptp->regs->tmr_temask, 0); - qoriq_write(&qoriq_ptp->regs->tmr_ctrl, 0); + qoriq_write(®s->ctrl_regs->tmr_temask, 0); + qoriq_write(®s->ctrl_regs->tmr_ctrl, 0); ptp_clock_unregister(qoriq_ptp->clock); - iounmap(qoriq_ptp->regs); + iounmap(qoriq_ptp->base); release_resource(qoriq_ptp->rsrc); free_irq(qoriq_ptp->irq, qoriq_ptp); kfree(qoriq_ptp); @@ -434,6 +461,7 @@ static int qoriq_ptp_remove(struct platform_device *dev) static const struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, + { .compatible = "fsl,fman-ptp-timer" }, {}, }; MODULE_DEVICE_TABLE(of, match_table); diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index c7e484f70654..7c5a25ddf832 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -95,4 +95,14 @@ config CCWGROUP tristate default (LCS || CTCM || QETH) +config ISM + tristate "Support for ISM vPCI Adapter" + depends on PCI && SMC + default n + help + Select this option if you want to use the Internal Shared Memory + vPCI Adapter. + + To compile as a module choose M. The module name is ism. + If unsure, choose N. endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 513b7ae64980..f2d6bbe57a6f 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -15,3 +15,6 @@ qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o obj-$(CONFIG_QETH_L2) += qeth_l2.o qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o obj-$(CONFIG_QETH_L3) += qeth_l3.o + +ism-y := ism_drv.o +obj-$(CONFIG_ISM) += ism.o diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h new file mode 100644 index 000000000000..0aab90817326 --- /dev/null +++ b/drivers/s390/net/ism.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef S390_ISM_H +#define S390_ISM_H + +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <net/smc.h> + +#define UTIL_STR_LEN 16 + +/* + * Do not use the first word of the DMB bits to ensure 8 byte aligned access. + */ +#define ISM_DMB_WORD_OFFSET 1 +#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32) +#define ISM_NR_DMBS 1920 + +#define ISM_REG_SBA 0x1 +#define ISM_REG_IEQ 0x2 +#define ISM_READ_GID 0x3 +#define ISM_ADD_VLAN_ID 0x4 +#define ISM_DEL_VLAN_ID 0x5 +#define ISM_SET_VLAN 0x6 +#define ISM_RESET_VLAN 0x7 +#define ISM_QUERY_INFO 0x8 +#define ISM_QUERY_RGID 0x9 +#define ISM_REG_DMB 0xA +#define ISM_UNREG_DMB 0xB +#define ISM_SIGNAL_IEQ 0xE +#define ISM_UNREG_SBA 0x11 +#define ISM_UNREG_IEQ 0x12 + +#define ISM_ERROR 0xFFFF + +struct ism_req_hdr { + u32 cmd; + u16 : 16; + u16 len; +}; + +struct ism_resp_hdr { + u32 cmd; + u16 ret; + u16 len; +}; + +union ism_reg_sba { + struct { + struct ism_req_hdr hdr; + u64 sba; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_ieq { + struct { + struct ism_req_hdr hdr; + u64 ieq; + u64 len; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_read_gid { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u64 gid; + } response; +} __aligned(16); + +union ism_qi { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u32 version; + u32 max_len; + u64 ism_state; + u64 my_gid; + u64 sba; + u64 ieq; + u32 ieq_len; + u32 : 32; + u32 dmbs_owned; + u32 dmbs_used; + u32 vlan_required; + u32 vlan_nr_ids; + u16 vlan_id[64]; + } response; +} __aligned(64); + +union ism_query_rgid { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 vlan_valid; + u32 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb; + u32 dmb_len; + u32 sba_idx; + u32 vlan_valid; + u32 vlan_id; + u64 rgid; + } request; + struct { + struct ism_resp_hdr hdr; + u64 dmb_tok; + } response; +} __aligned(32); + +union ism_sig_ieq { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 trigger_irq; + u32 event_code; + u64 info; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(32); + +union ism_unreg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb_tok; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_cmd_simple { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(8); + +union ism_set_vlan_id { + struct { + struct ism_req_hdr hdr; + u64 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +struct ism_eq_header { + u64 idx; + u64 ieq_len; + u64 entry_len; + u64 : 64; +}; + +struct ism_eq { + struct ism_eq_header header; + struct smcd_event entry[15]; +}; + +struct ism_sba { + u32 s : 1; /* summary bit */ + u32 e : 1; /* event bit */ + u32 : 30; + u32 dmb_bits[ISM_NR_DMBS / 32]; + u32 reserved[3]; + u16 dmbe_mask[ISM_NR_DMBS]; +}; + +struct ism_dev { + spinlock_t lock; + struct pci_dev *pdev; + struct smcd_dev *smcd; + + void __iomem *ctl; + + struct ism_sba *sba; + dma_addr_t sba_dma_addr; + DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS); + + struct ism_eq *ieq; + dma_addr_t ieq_dma_addr; + + int ieq_idx; +}; + +#define ISM_CREATE_REQ(dmb, idx, sf, offset) \ + ((dmb) | (idx) << 24 | (sf) << 23 | (offset)) + +static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data, + unsigned int size) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size); + + return zpci_write_block(req, data, dmb_req); +} + +#endif /* S390_ISM_H */ diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c new file mode 100644 index 000000000000..c0631895154e --- /dev/null +++ b/drivers/s390/net/ism_drv.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ISM driver for s390. + * + * Copyright IBM Corp. 2018 + */ +#define KMSG_COMPONENT "ism" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/err.h> +#include <net/smc.h> + +#include <asm/debug.h> + +#include "ism.h" + +MODULE_DESCRIPTION("ISM driver for s390"); +MODULE_LICENSE("GPL"); + +#define PCI_DEVICE_ID_IBM_ISM 0x04ED +#define DRV_NAME "ism" + +static const struct pci_device_id ism_device_table[] = { + { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ism_device_table); + +static debug_info_t *ism_debug_info; + +static int ism_cmd(struct ism_dev *ism, void *cmd) +{ + struct ism_req_hdr *req = cmd; + struct ism_resp_hdr *resp = cmd; + + memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req)); + memcpy_toio(ism->ctl, req, sizeof(*req)); + + WRITE_ONCE(resp->ret, ISM_ERROR); + + memcpy_fromio(resp, ism->ctl, sizeof(*resp)); + if (resp->ret) { + debug_text_event(ism_debug_info, 0, "cmd failure"); + debug_event(ism_debug_info, 0, resp, sizeof(*resp)); + goto out; + } + memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp), + resp->len - sizeof(*resp)); +out: + return resp->ret; +} + +static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) +{ + union ism_cmd_simple cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = cmd_code; + cmd.request.hdr.len = sizeof(cmd.request); + + return ism_cmd(ism, &cmd); +} + +static int query_info(struct ism_dev *ism) +{ + union ism_qi cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_INFO; + cmd.request.hdr.len = sizeof(cmd.request); + + if (ism_cmd(ism, &cmd)) + goto out; + + debug_text_event(ism_debug_info, 3, "query info"); + debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); +out: + return 0; +} + +static int register_sba(struct ism_dev *ism) +{ + union ism_reg_sba cmd; + dma_addr_t dma_handle; + struct ism_sba *sba; + + sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, + &dma_handle, GFP_KERNEL); + if (!sba) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_SBA; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.sba = dma_handle; + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle); + return -EIO; + } + + ism->sba = sba; + ism->sba_dma_addr = dma_handle; + + return 0; +} + +static int register_ieq(struct ism_dev *ism) +{ + union ism_reg_ieq cmd; + dma_addr_t dma_handle; + struct ism_eq *ieq; + + ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, + &dma_handle, GFP_KERNEL); + if (!ieq) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.ieq = dma_handle; + cmd.request.len = sizeof(*ieq); + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle); + return -EIO; + } + + ism->ieq = ieq; + ism->ieq_idx = -1; + ism->ieq_dma_addr = dma_handle; + + return 0; +} + +static int unregister_sba(struct ism_dev *ism) +{ + if (!ism->sba) + return 0; + + if (ism_cmd_simple(ism, ISM_UNREG_SBA)) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->sba, ism->sba_dma_addr); + + ism->sba = NULL; + ism->sba_dma_addr = 0; + + return 0; +} + +static int unregister_ieq(struct ism_dev *ism) +{ + if (!ism->ieq) + return 0; + + if (ism_cmd_simple(ism, ISM_UNREG_IEQ)) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->ieq, ism->ieq_dma_addr); + + ism->ieq = NULL; + ism->ieq_dma_addr = 0; + + return 0; +} + +static int ism_read_local_gid(struct ism_dev *ism) +{ + union ism_read_gid cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_READ_GID; + cmd.request.hdr.len = sizeof(cmd.request); + + ret = ism_cmd(ism, &cmd); + if (ret) + goto out; + + ism->smcd->local_gid = cmd.response.gid; +out: + return ret; +} + +static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid, + u32 vid) +{ + struct ism_dev *ism = smcd->priv; + union ism_query_rgid cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_RGID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.vlan_valid = vid_valid; + cmd.request.vlan_id = vid; + + return ism_cmd(ism, &cmd); +} + +static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + clear_bit(dmb->sba_idx, ism->sba_bitmap); + dma_free_coherent(&ism->pdev->dev, dmb->dmb_len, + dmb->cpu_addr, dmb->dma_addr); +} + +static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + unsigned long bit; + + if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) + return -EINVAL; + + if (!dmb->sba_idx) { + bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, + ISM_DMB_BIT_OFFSET); + if (bit == ISM_NR_DMBS) + return -ENOMEM; + + dmb->sba_idx = bit; + } + if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || + test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) + return -EINVAL; + + dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, + &dmb->dma_addr, GFP_KERNEL | + __GFP_NOWARN | __GFP_NOMEMALLOC | + __GFP_COMP | __GFP_NORETRY); + if (!dmb->cpu_addr) + clear_bit(dmb->sba_idx, ism->sba_bitmap); + + return dmb->cpu_addr ? 0 : -ENOMEM; +} + +static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_reg_dmb cmd; + int ret; + + ret = ism_alloc_dmb(ism, dmb); + if (ret) + goto out; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb = dmb->dma_addr; + cmd.request.dmb_len = dmb->dmb_len; + cmd.request.sba_idx = dmb->sba_idx; + cmd.request.vlan_valid = dmb->vlan_valid; + cmd.request.vlan_id = dmb->vlan_id; + cmd.request.rgid = dmb->rgid; + + ret = ism_cmd(ism, &cmd); + if (ret) { + ism_free_dmb(ism, dmb); + goto out; + } + dmb->dmb_tok = cmd.response.dmb_tok; +out: + return ret; +} + +static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_unreg_dmb cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_UNREG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb_tok = dmb->dmb_tok; + + ret = ism_cmd(ism, &cmd); + if (ret) + goto out; + + ism_free_dmb(ism, dmb); +out: + return ret; +} + +static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_set_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); +} + +static int ism_reset_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); +} + +static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq, + u32 event_code, u64 info) +{ + struct ism_dev *ism = smcd->priv; + union ism_sig_ieq cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.trigger_irq = trigger_irq; + cmd.request.event_code = event_code; + cmd.request.info = info; + + return ism_cmd(ism, &cmd); +} + +static unsigned int max_bytes(unsigned int start, unsigned int len, + unsigned int boundary) +{ + return min(boundary - (start & (boundary - 1)), len); +} + +static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, + bool sf, unsigned int offset, void *data, unsigned int size) +{ + struct ism_dev *ism = smcd->priv; + unsigned int bytes; + u64 dmb_req; + int ret; + + while (size) { + bytes = max_bytes(offset, size, PAGE_SIZE); + dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, + offset); + + ret = __ism_move(ism, dmb_req, data, bytes); + if (ret) + return ret; + + size -= bytes; + data += bytes; + offset += bytes; + } + + return 0; +} + +static void ism_handle_event(struct ism_dev *ism) +{ + struct smcd_event *entry; + + while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { + if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) + ism->ieq_idx = 0; + + entry = &ism->ieq->entry[ism->ieq_idx]; + debug_event(ism_debug_info, 2, entry, sizeof(*entry)); + smcd_handle_event(ism->smcd, entry); + } +} + +static irqreturn_t ism_handle_irq(int irq, void *data) +{ + struct ism_dev *ism = data; + unsigned long bit, end; + unsigned long *bv; + + bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; + end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; + + spin_lock(&ism->lock); + ism->sba->s = 0; + barrier(); + for (bit = 0;;) { + bit = find_next_bit_inv(bv, end, bit); + if (bit >= end) + break; + + clear_bit_inv(bit, bv); + barrier(); + smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; + } + + if (ism->sba->e) { + ism->sba->e = 0; + barrier(); + ism_handle_event(ism); + } + spin_unlock(&ism->lock); + return IRQ_HANDLED; +} + +static const struct smcd_ops ism_ops = { + .query_remote_gid = ism_query_rgid, + .register_dmb = ism_register_dmb, + .unregister_dmb = ism_unregister_dmb, + .add_vlan_id = ism_add_vlan_id, + .del_vlan_id = ism_del_vlan_id, + .set_vlan_required = ism_set_vlan_required, + .reset_vlan_required = ism_reset_vlan_required, + .signal_event = ism_signal_ieq, + .move_data = ism_move, +}; + +static int ism_dev_init(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret <= 0) + goto out; + + ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, + pci_name(pdev), ism); + if (ret) + goto free_vectors; + + ret = register_sba(ism); + if (ret) + goto free_irq; + + ret = register_ieq(ism); + if (ret) + goto unreg_sba; + + ret = ism_read_local_gid(ism); + if (ret) + goto unreg_ieq; + + ret = smcd_register_dev(ism->smcd); + if (ret) + goto unreg_ieq; + + query_info(ism); + return 0; + +unreg_ieq: + unregister_ieq(ism); +unreg_sba: + unregister_sba(ism); +free_irq: + free_irq(pci_irq_vector(pdev, 0), ism); +free_vectors: + pci_free_irq_vectors(pdev); +out: + return ret; +} + +static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ism_dev *ism; + int ret; + + ism = kzalloc(sizeof(*ism), GFP_KERNEL); + if (!ism) + return -ENOMEM; + + spin_lock_init(&ism->lock); + dev_set_drvdata(&pdev->dev, ism); + ism->pdev = pdev; + + ret = pci_enable_device_mem(pdev); + if (ret) + goto err; + + ret = pci_request_mem_regions(pdev, DRV_NAME); + if (ret) + goto err_disable; + + ism->ctl = pci_iomap(pdev, 2, 0); + if (!ism->ctl) + goto err_resource; + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + goto err_unmap; + + pci_set_dma_seg_boundary(pdev, SZ_1M - 1); + pci_set_dma_max_seg_size(pdev, SZ_1M); + pci_set_master(pdev); + + ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, + ISM_NR_DMBS); + if (!ism->smcd) + goto err_unmap; + + ism->smcd->priv = ism; + ret = ism_dev_init(ism); + if (ret) + goto err_free; + + return 0; + +err_free: + smcd_free_dev(ism->smcd); +err_unmap: + pci_iounmap(pdev, ism->ctl); +err_resource: + pci_release_mem_regions(pdev); +err_disable: + pci_disable_device(pdev); +err: + kfree(ism); + dev_set_drvdata(&pdev->dev, NULL); + return ret; +} + +static void ism_dev_exit(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + + smcd_unregister_dev(ism->smcd); + unregister_ieq(ism); + unregister_sba(ism); + free_irq(pci_irq_vector(pdev, 0), ism); + pci_free_irq_vectors(pdev); +} + +static void ism_remove(struct pci_dev *pdev) +{ + struct ism_dev *ism = dev_get_drvdata(&pdev->dev); + + ism_dev_exit(ism); + + smcd_free_dev(ism->smcd); + pci_iounmap(pdev, ism->ctl); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); + dev_set_drvdata(&pdev->dev, NULL); + kfree(ism); +} + +static int ism_suspend(struct device *dev) +{ + struct ism_dev *ism = dev_get_drvdata(dev); + + ism_dev_exit(ism); + return 0; +} + +static int ism_resume(struct device *dev) +{ + struct ism_dev *ism = dev_get_drvdata(dev); + + return ism_dev_init(ism); +} + +static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume); + +static struct pci_driver ism_driver = { + .name = DRV_NAME, + .id_table = ism_device_table, + .probe = ism_probe, + .remove = ism_remove, + .driver = { + .pm = &ism_pm_ops, + }, +}; + +static int __init ism_init(void) +{ + int ret; + + ism_debug_info = debug_register("ism", 2, 1, 16); + if (!ism_debug_info) + return -ENODEV; + + debug_register_view(ism_debug_info, &debug_hex_ascii_view); + ret = pci_register_driver(&ism_driver); + if (ret) + debug_unregister(ism_debug_info); + + return ret; +} + +static void __exit ism_exit(void) +{ + pci_unregister_driver(&ism_driver); + debug_unregister(ism_debug_info); +} + +module_init(ism_init); +module_exit(ism_exit); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a246a618f9a4..a932aac62d0e 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -465,7 +465,6 @@ struct qeth_qdio_out_buffer { struct sk_buff_head skb_list; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; - struct qaob *aob; struct qeth_qdio_out_q *q; struct qeth_qdio_out_buffer *next_pending; }; @@ -662,7 +661,6 @@ struct qeth_card_info { int portno; enum qeth_card_types type; enum qeth_link_types link_type; - int is_multicast_different; int initial_mtu; int max_mtu; int broadcast_capable; @@ -935,6 +933,19 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, data, QETH_PROT_IPV6); } +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, + int ipv); +static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, + struct sk_buff *skb, + int ipv, int cast_type) +{ + if (IS_IQD(card) && cast_type != RTN_UNICAST) + return card->qdio.out_qs[card->qdio.no_out_queues - 1]; + if (!card->qdio.do_prio_queueing) + return card->qdio.out_qs[card->qdio.default_out_queue]; + return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)]; +} + extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; extern const struct attribute_group *qeth_generic_attr_groups[]; @@ -972,7 +983,6 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, void *); struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, enum qeth_prot_versions); -int qeth_query_setadapterparms(struct qeth_card *); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); @@ -998,11 +1008,6 @@ int qeth_query_switch_attributes(struct qeth_card *card, int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); -int qeth_bridgeport_query_ports(struct qeth_card *card, - enum qeth_sbp_roles *role, enum qeth_sbp_states *state); -int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); -int qeth_bridgeport_an_set(struct qeth_card *card, int enable); -int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); @@ -1026,7 +1031,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); -int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); void qeth_trace_features(struct qeth_card *); void qeth_close_dev(struct qeth_card *); int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d01ac29fd986..d80972b9bfc7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -473,7 +473,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == QETH_QDIO_BUF_HANDLED_DELAYED)) { /* for recovery situations */ - q->bufs[bidx]->aob = q->bufstates[bidx].aob; qeth_init_qdio_out_buf(q, bidx); QETH_CARD_TEXT(q->card, 2, "clprecov"); } @@ -510,7 +509,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, } qeth_notify_skbs(buffer->q, buffer, notification); - buffer->aob = NULL; /* Free dangling allocations. The attached skbs are handled by * qeth_cleanup_handled_pending(). */ @@ -1267,8 +1265,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum qeth_qdio_buffer_states newbufstate) + struct qeth_qdio_out_buffer *buf) { int i; @@ -1276,23 +1273,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); - if (newbufstate == QETH_QDIO_BUF_EMPTY) { - qeth_release_skbs(buf); - } + qeth_release_skbs(buf); + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; - buf->buffer->element[i].length = 0; - buf->buffer->element[i].addr = NULL; - buf->buffer->element[i].eflags = 0; - buf->buffer->element[i].sflags = 0; } - buf->buffer->element[15].eflags = 0; - buf->buffer->element[15].sflags = 0; + + qeth_scrub_qdio_buffer(buf->buffer, + QETH_MAX_BUFFER_ELEMENTS(queue->card)); buf->next_element_to_fill = 0; - atomic_set(&buf->state, newbufstate); + atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) @@ -1303,7 +1296,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); - qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); + qeth_clear_output_buffer(q, q->bufs[j]); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; @@ -1544,8 +1537,6 @@ static void qeth_determine_card_type(struct qeth_card *card) card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; card->info.type = CARD_RDEV(card)->id.driver_info; card->qdio.no_out_queues = QETH_MAX_QUEUES; - if (card->info.type == QETH_CARD_TYPE_IQD) - card->info.is_multicast_different = 0x0103; qeth_update_from_chp_desc(card); } @@ -2473,32 +2464,20 @@ static int qeth_ulp_setup(struct qeth_card *card) static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) { - int rc; struct qeth_qdio_out_buffer *newbuf; - rc = 0; newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); - if (!newbuf) { - rc = -ENOMEM; - goto out; - } + if (!newbuf) + return -ENOMEM; + newbuf->buffer = q->qdio_bufs[bidx]; skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); newbuf->q = q; - newbuf->aob = NULL; newbuf->next_pending = q->bufs[bidx]; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; - if (q->bufstates) { - q->bufstates[bidx].user = newbuf; - QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx); - QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf); - QETH_CARD_TEXT_(q->card, 2, "%lx", - (long) newbuf->next_pending); - } -out: - return rc; + return 0; } static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) @@ -2908,8 +2887,7 @@ int qeth_init_qdio_queues(struct qeth_card *card) QDIO_MAX_BUFFERS_PER_Q); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], - card->qdio.out_qs[i]->bufs[j], - QETH_QDIO_BUF_EMPTY); + card->qdio.out_qs[i]->bufs[j]); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; @@ -3076,7 +3054,7 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, return iob; } -int qeth_query_setadapterparms(struct qeth_card *card) +static int qeth_query_setadapterparms(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -3089,7 +3067,6 @@ int qeth_query_setadapterparms(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); static int qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3129,7 +3106,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, return 0; } -int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) +static int qeth_query_ipassists(struct qeth_card *card, + enum qeth_prot_versions prot) { int rc; struct qeth_cmd_buffer *iob; @@ -3141,7 +3119,6 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3180,7 +3157,6 @@ int qeth_query_switch_attributes(struct qeth_card *card, return qeth_send_ipa_cmd(card, iob, qeth_query_switch_attributes_cb, sw_info); } -EXPORT_SYMBOL_GPL(qeth_query_switch_attributes); static int qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3634,10 +3610,10 @@ out: } EXPORT_SYMBOL_GPL(qeth_configure_cq); - -static void qeth_qdio_cq_handler(struct qeth_card *card, - unsigned int qdio_err, - unsigned int queue, int first_element, int count) { +static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, + unsigned int queue, int first_element, + int count) +{ struct qeth_qdio_q *cq = card->qdio.c_q; int i; int rc; @@ -3663,25 +3639,17 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, for (i = first_element; i < first_element + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; - int e; + int e = 0; - e = 0; while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && buffer->element[e].addr) { unsigned long phys_aob_addr; phys_aob_addr = (unsigned long) buffer->element[e].addr; qeth_qdio_handle_aob(card, phys_aob_addr); - buffer->element[e].addr = NULL; - buffer->element[e].eflags = 0; - buffer->element[e].sflags = 0; - buffer->element[e].length = 0; - ++e; } - - buffer->element[15].eflags = 0; - buffer->element[15].sflags = 0; + qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, card->qdio.c_q->next_buf_to_init, @@ -3760,11 +3728,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); } - buffer->aob = queue->bufstates[bidx].aob; QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); - QETH_CARD_TEXT(queue->card, 5, "aob"); - QETH_CARD_TEXT_(queue->card, 5, "%lx", - virt_to_phys(buffer->aob)); /* prepare the queue slot for re-use: */ qeth_scrub_qdio_buffer(buffer->buffer, @@ -3782,8 +3746,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_notify_skbs(queue, buffer, n); } - qeth_clear_output_buffer(queue, buffer, - QETH_QDIO_BUF_EMPTY); + qeth_clear_output_buffer(queue, buffer); } qeth_cleanup_handled_pending(queue, bidx, 0); } @@ -3810,15 +3773,11 @@ static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) * Note: Function assumes that we have 4 outbound queues. */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, - int ipv, int cast_type) + int ipv) { __be16 *tci; u8 tos; - if (cast_type && card->info.is_multicast_different) - return card->info.is_multicast_different & - (card->qdio.no_out_queues - 1); - switch (card->qdio.do_prio_queueing) { case QETH_PRIO_Q_ING_TOS: case QETH_PRIO_Q_ING_PREC: @@ -5887,31 +5846,13 @@ static int qeth_core_restore(struct ccwgroup_device *gdev) return 0; } -static struct ccwgroup_driver qeth_core_ccwgroup_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "qeth", - }, - .ccw_driver = &qeth_ccw_driver, - .setup = qeth_core_probe_device, - .remove = qeth_core_remove_device, - .set_online = qeth_core_set_online, - .set_offline = qeth_core_set_offline, - .shutdown = qeth_core_shutdown, - .prepare = NULL, - .complete = NULL, - .freeze = qeth_core_freeze, - .thaw = qeth_core_thaw, - .restore = qeth_core_restore, -}; - static ssize_t group_store(struct device_driver *ddrv, const char *buf, size_t count) { int err; - err = ccwgroup_create_dev(qeth_core_root_dev, - &qeth_core_ccwgroup_driver, 3, buf); + err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, + buf); return err ? err : count; } @@ -5929,6 +5870,25 @@ static const struct attribute_group *qeth_drv_attr_groups[] = { NULL, }; +static struct ccwgroup_driver qeth_core_ccwgroup_driver = { + .driver = { + .groups = qeth_drv_attr_groups, + .owner = THIS_MODULE, + .name = "qeth", + }, + .ccw_driver = &qeth_ccw_driver, + .setup = qeth_core_probe_device, + .remove = qeth_core_remove_device, + .set_online = qeth_core_set_online, + .set_offline = qeth_core_set_offline, + .shutdown = qeth_core_shutdown, + .prepare = NULL, + .complete = NULL, + .freeze = qeth_core_freeze, + .thaw = qeth_core_thaw, + .restore = qeth_core_restore, +}; + int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct qeth_card *card = dev->ml_priv; @@ -6620,7 +6580,6 @@ static int __init qeth_core_init(void) rc = ccw_driver_register(&qeth_ccw_driver); if (rc) goto ccw_err; - qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups; rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); if (rc) goto ccwgroup_err; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 878e62f35169..54c35224262a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -64,6 +64,8 @@ enum qeth_card_types { QETH_CARD_TYPE_OSX = 2, }; +#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD) + #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 /* only the first two bytes are looked at in qeth_get_cardname_short */ enum qeth_link_types { diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index f2130051ca11..ddc615b431a8 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h @@ -14,6 +14,11 @@ extern const struct attribute_group *qeth_l2_attr_groups[]; int qeth_l2_create_device_attributes(struct device *); void qeth_l2_remove_device_attributes(struct device *); void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, + enum qeth_sbp_states *state); +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); +int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state); int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2487f0aeb165..8ac243de7a9e 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -26,7 +26,6 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); -static void qeth_l2_set_rx_mode(struct net_device *); static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); @@ -186,12 +185,12 @@ static void qeth_l2_del_all_macs(struct qeth_card *card) static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { if (card->info.type == QETH_CARD_TYPE_OSN) - return RTN_UNSPEC; + return RTN_UNICAST; if (is_broadcast_ether_addr(skb->data)) return RTN_BROADCAST; if (is_multicast_ether_addr(skb->data)) return RTN_MULTICAST; - return RTN_UNSPEC; + return RTN_UNICAST; } static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, @@ -344,7 +343,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); kfree(tmpid); } - qeth_l2_set_rx_mode(card->dev); return rc; } @@ -770,18 +768,13 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; - if (card->qdio.do_prio_queueing || (cast_type && - card->info.is_multicast_different)) - queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, - ipv, cast_type)]; - else - queue = card->qdio.out_qs[card->qdio.default_out_queue]; - if ((card->state != CARD_STATE_UP) || !card->lan_online) { card->stats.tx_carrier_errors++; goto tx_drop; } + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + if (card->options.performance_stats) { card->perf_stats.outbound_cnt++; card->perf_stats.outbound_start_time = qeth_get_micros(); @@ -1125,13 +1118,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { __qeth_l2_open(card->dev); + qeth_l2_set_rx_mode(card->dev); } else { rtnl_lock(); dev_open(card->dev); rtnl_unlock(); } - /* this also sets saved unicast addresses */ - qeth_l2_set_rx_mode(card->dev); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); @@ -1877,7 +1869,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, return rc; return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); } -EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); static int qeth_bridgeport_set_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -2025,7 +2016,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable) rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL); return qeth_anset_makerc(card, rc, response); } -EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set); static bool qeth_bridgeport_is_in_use(struct qeth_card *card) { diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 5905dc63e256..062f62b49294 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1978,17 +1978,17 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) (cast_type == RTN_MULTICAST) || (cast_type == RTN_ANYCAST)) return cast_type; - return RTN_UNSPEC; + return RTN_UNICAST; } rcu_read_unlock(); /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNSPEC; + RTN_MULTICAST : RTN_UNICAST; else if (be16_to_cpu(skb->protocol) == ETH_P_IP) return ipv4_is_multicast(ip_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNSPEC; + RTN_MULTICAST : RTN_UNICAST; /* ... and MAC address */ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast)) @@ -1997,22 +1997,21 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) return RTN_MULTICAST; /* default to unicast */ - return RTN_UNSPEC; + return RTN_UNICAST; } -static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb, + unsigned int data_len) { char daddr[16]; struct af_iucv_trans_hdr *iucv_hdr; memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; - hdr->hdr.l3.ext_flags = 0; - hdr->hdr.l3.length = skb->len - ETH_HLEN; + hdr->hdr.l3.length = data_len; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; - iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); + iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; @@ -2051,6 +2050,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); } + if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } + /* OSA only: */ if (!ipv) { hdr->hdr.l3.flags = QETH_HDR_PASSTHRU; @@ -2156,104 +2161,141 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card, return elements; } -static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, + int cast_type) { - int rc; - __be16 *tag; + const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); + unsigned int frame_len, nr_frags; + unsigned char eth_hdr[ETH_HLEN]; + unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; - int hdr_elements = 0; - int elements; - struct qeth_card *card = dev->ml_priv; - struct sk_buff *new_skb = NULL; - int ipv = qeth_get_ip_version(skb); - int cast_type = qeth_l3_get_cast_type(skb); - struct qeth_qdio_out_q *queue = - card->qdio.out_qs[card->qdio.do_prio_queueing - || (cast_type && card->info.is_multicast_different) ? - qeth_get_priority_queue(card, skb, ipv, cast_type) : - card->qdio.default_out_queue]; - int tx_bytes = skb->len; + int elements, push_len, rc; unsigned int hd_len = 0; - bool use_tso; - int data_offset = -1; - unsigned int nr_frags; - - if (((card->info.type == QETH_CARD_TYPE_IQD) && - (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || - ((card->options.cq == QETH_CQ_ENABLED) && - (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) || - card->options.sniffer) - goto tx_drop; - if ((card->state != CARD_STATE_UP) || !card->lan_online) { - card->stats.tx_carrier_errors++; - goto tx_drop; + /* compress skb to fit into one IO buffer: */ + if (!qeth_get_elements_no(card, skb, 0, 0)) { + rc = skb_linearize(skb); + + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + return rc; } - if ((cast_type == RTN_BROADCAST) && - (card->info.broadcast_capable == 0)) - goto tx_drop; + /* re-use the L2 header area for the HW header: */ + rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); + if (rc) + return rc; + skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN); + skb_pull(skb, ETH_HLEN); + frame_len = skb->len; + nr_frags = skb_shinfo(skb)->nr_frags; - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); + push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len); + if (push_len < 0) + return push_len; + if (!push_len) { + /* hdr was added discontiguous from skb->data */ + hd_len = hw_hdr_len; + hdr_elements = 1; } - /* Ignore segment size from skb_is_gso(), 1 page is always used. */ - use_tso = skb_is_gso(skb) && - (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); + elements = qeth_get_elements_no(card, skb, hdr_elements, 0); + if (!elements) { + rc = -E2BIG; + goto out; + } + elements += hdr_elements; - if (card->info.type == QETH_CARD_TYPE_IQD) { - new_skb = skb; - data_offset = ETH_HLEN; - hd_len = sizeof(*hdr); - hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); - if (!hdr) - goto tx_drop; - hdr_elements++; - } else { - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) - + VLAN_HLEN); - if (!new_skb) - goto tx_drop; + if (skb->protocol == htons(ETH_P_AF_IUCV)) + qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); + else + qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); - if (ipv == 4) { - skb_pull(new_skb, ETH_HLEN); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, + elements); + } +out: + if (!rc) { + if (card->options.performance_stats && nr_frags) { + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += nr_frags + 1; } - - if (ipv != 4 && skb_vlan_tag_present(new_skb)) { - skb_push(new_skb, VLAN_HLEN); - skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); - skb_copy_to_linear_data_offset(new_skb, 4, - new_skb->data + 8, 4); - skb_copy_to_linear_data_offset(new_skb, 8, - new_skb->data + 12, 4); - tag = (__be16 *)(new_skb->data + 12); - *tag = cpu_to_be16(ETH_P_8021Q); - *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); + } else { + if (!push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) { + /* roll back to ETH header */ + skb_pull(skb, push_len); + skb_push(skb, ETH_HLEN); + skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN); } } + return rc; +} - netif_stop_queue(dev); +static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type) +{ + unsigned int hd_len, nr_frags; + int elements, len, rc; + __be16 *tag; + struct qeth_hdr *hdr = NULL; + int hdr_elements = 0; + struct sk_buff *new_skb = NULL; + int tx_bytes = skb->len; + bool use_tso; + + /* Ignore segment size from skb_is_gso(), 1 page is always used. */ + use_tso = skb_is_gso(skb) && + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); + + /* create a clone with writeable headroom */ + new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + + VLAN_HLEN); + if (!new_skb) + return -ENOMEM; + + if (ipv == 4) { + skb_pull(new_skb, ETH_HLEN); + } else if (skb_vlan_tag_present(new_skb)) { + skb_push(new_skb, VLAN_HLEN); + skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); + skb_copy_to_linear_data_offset(new_skb, 4, + new_skb->data + 8, 4); + skb_copy_to_linear_data_offset(new_skb, 8, + new_skb->data + 12, 4); + tag = (__be16 *)(new_skb->data + 12); + *tag = cpu_to_be16(ETH_P_8021Q); + *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); + } /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ - if ((card->info.type != QETH_CARD_TYPE_IQD) && - ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { - int lin_rc = skb_linearize(new_skb); + if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) { + rc = skb_linearize(new_skb); if (card->options.performance_stats) { - if (lin_rc) + if (rc) card->perf_stats.tx_linfail++; else card->perf_stats.tx_lin++; } - if (lin_rc) - goto tx_drop; + if (rc) + goto out; } nr_frags = skb_shinfo(new_skb)->nr_frags; @@ -2265,60 +2307,37 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_tso_fill_header(card, hdr, new_skb); hdr_elements++; } else { - if (data_offset < 0) { - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, - new_skb->len - - sizeof(struct qeth_hdr)); - } else { - if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV) - qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); - else { - qeth_l3_fill_header(card, hdr, new_skb, ipv, - cast_type, - new_skb->len - data_offset); - } - } - - if (new_skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; - } + hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); + qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, + new_skb->len - sizeof(struct qeth_hdr)); } elements = use_tso ? qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements, - (data_offset > 0) ? data_offset : 0); + qeth_get_elements_no(card, new_skb, hdr_elements, 0); if (!elements) { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - goto tx_drop; + rc = -E2BIG; + goto out; } elements += hdr_elements; - if (card->info.type != QETH_CARD_TYPE_IQD) { - int len; - if (use_tso) { - hd_len = sizeof(struct qeth_hdr_tso) + - ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); - len = hd_len; - } else { - len = sizeof(struct qeth_hdr_layer3); - } - - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) - goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, - hd_len, elements); - } else - rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset, - hd_len); + if (use_tso) { + hd_len = sizeof(struct qeth_hdr_tso) + + ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); + len = hd_len; + } else { + hd_len = 0; + len = sizeof(struct qeth_hdr_layer3); + } + if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { + rc = -EINVAL; + goto out; + } + rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, + elements); +out: if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { @@ -2332,30 +2351,68 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, card->perf_stats.sg_frags_sent += nr_frags + 1; } } - rc = NETDEV_TX_OK; } else { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); + if (new_skb != skb) + dev_kfree_skb_any(new_skb); + } + return rc; +} - if (rc == -EBUSY) { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - return NETDEV_TX_BUSY; - } else +static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + int cast_type = qeth_l3_get_cast_type(skb); + struct qeth_card *card = dev->ml_priv; + int ipv = qeth_get_ip_version(skb); + struct qeth_qdio_out_q *queue; + int tx_bytes = skb->len; + int rc; + + if (IS_IQD(card)) { + if (card->options.sniffer) + goto tx_drop; + if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || + (card->options.cq == QETH_CQ_ENABLED && + skb->protocol != htons(ETH_P_AF_IUCV))) goto tx_drop; } - netif_wake_queue(dev); - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; - return rc; + if (card->state != CARD_STATE_UP || !card->lan_online) { + card->stats.tx_carrier_errors++; + goto tx_drop; + } + + if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) + goto tx_drop; + + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + + if (card->options.performance_stats) { + card->perf_stats.outbound_cnt++; + card->perf_stats.outbound_start_time = qeth_get_micros(); + } + netif_stop_queue(dev); + + if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4)) + rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type); + else + rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); + + if (!rc) { + card->stats.tx_packets++; + card->stats.tx_bytes += tx_bytes; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ tx_drop: card->stats.tx_dropped++; card->stats.tx_errors++; - if ((new_skb != skb) && new_skb) - dev_kfree_skb_any(new_skb); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; @@ -2497,9 +2554,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) card->dev->dev_id = card->info.unique_id & 0xffff; - card->dev->hw_features |= NETIF_F_SG; - card->dev->vlan_features |= NETIF_F_SG; - if (!card->info.guestlan) { card->dev->features |= NETIF_F_SG; card->dev->hw_features |= NETIF_F_TSO | @@ -2519,6 +2573,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return -ENODEV; card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; + rc = qeth_l3_iqd_read_initial_mac(card); if (rc) return rc; @@ -2534,12 +2589,18 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->max_mtu = ETH_MAX_MTU; card->dev->dev_port = card->info.portno; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; + card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; + card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + card->dev->hw_features |= NETIF_F_SG; + card->dev->vlan_features |= NETIF_F_SG; + netif_keep_dst(card->dev); - netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE); + if (card->dev->hw_features & NETIF_F_TSO) + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); @@ -2666,11 +2727,12 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { rtnl_lock(); - if (recovery_mode) + if (recovery_mode) { __qeth_l3_open(card->dev); - else + qeth_l3_set_rx_mode(card->dev); + } else { dev_open(card->dev); - qeth_l3_set_rx_mode(card->dev); + } rtnl_unlock(); } qeth_trace_features(card); diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index e461168313bf..4e6611e4c59b 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, - select_queue_fallback_t fallback) -{ - return (u16)smp_processor_id(); -} - static void xlr_hw_set_mac_addr(struct net_device *ndev) { struct xlr_net_priv *priv = netdev_priv(ndev); @@ -403,7 +396,7 @@ static const struct net_device_ops xlr_netdev_ops = { .ndo_open = xlr_net_open, .ndo_stop = xlr_net_stop, .ndo_start_xmit = xlr_net_start_xmit, - .ndo_select_queue = xlr_net_select_queue, + .ndo_select_queue = dev_pick_tx_cpu_id, .ndo_set_mac_address = xlr_net_set_mac_addr, .ndo_set_rx_mode = xlr_set_rx_mode, .ndo_get_stats64 = xlr_stats, diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h index 084a246eec19..6790b7c8cfb1 100644 --- a/drivers/staging/rtl8188eu/include/wifi.h +++ b/drivers/staging/rtl8188eu/include/wifi.h @@ -575,7 +575,6 @@ enum ht_cap_ampdu_factor { * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ #define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 #define OP_MODE_PURE 0 diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index add1ba00f3e9..38e85c8a85c8 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -253,7 +253,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h index 0ed2f44ab4e9..00a4302e9983 100644 --- a/drivers/staging/rtl8712/wifi.h +++ b/drivers/staging/rtl8712/wifi.h @@ -574,7 +574,6 @@ struct ieee80211_ht_addt_info { * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ #define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 /* Spatial Multiplexing Power Save Modes */ diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h index 08bc79840b23..559bf2606fb7 100644 --- a/drivers/staging/rtl8723bs/include/wifi.h +++ b/drivers/staging/rtl8723bs/include/wifi.h @@ -799,7 +799,6 @@ enum HT_CAP_AMPDU_FACTOR { * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ #define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 /* Spatial Multiplexing Power Save Modes */ diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index ace68f023b49..181642358e3f 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c @@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) } -static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb - , void *accel_priv - , select_queue_fallback_t fallback -) +static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c index e46e47d93d7d..094827c1879a 100644 --- a/drivers/staging/rtlwifi/base.c +++ b/drivers/staging/rtlwifi/base.c @@ -1838,7 +1838,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) reject_agg, ctrl_agg_size, agg_size); rtlpriv->hw->max_rx_aggregation_subframes = - (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF); + (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT); } /********************************************************* diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 29756d88799b..b2240361f1a1 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -396,13 +396,10 @@ static inline unsigned long busy_clock(void) return local_clock() >> 10; } -static bool vhost_can_busy_poll(struct vhost_dev *dev, - unsigned long endtime) +static bool vhost_can_busy_poll(unsigned long endtime) { - return likely(!need_resched()) && - likely(!time_after(busy_clock(), endtime)) && - likely(!signal_pending(current)) && - !vhost_has_work(dev); + return likely(!need_resched() && !time_after(busy_clock(), endtime) && + !signal_pending(current)); } static void vhost_net_disable_vq(struct vhost_net *n, @@ -434,7 +431,8 @@ static int vhost_net_enable_vq(struct vhost_net *n, static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, - unsigned int *out_num, unsigned int *in_num) + unsigned int *out_num, unsigned int *in_num, + bool *busyloop_intr) { unsigned long uninitialized_var(endtime); int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), @@ -443,9 +441,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, if (r == vq->num && vq->busyloop_timeout) { preempt_disable(); endtime = busy_clock() + vq->busyloop_timeout; - while (vhost_can_busy_poll(vq->dev, endtime) && - vhost_vq_avail_empty(vq->dev, vq)) + while (vhost_can_busy_poll(endtime)) { + if (vhost_has_work(vq->dev)) { + *busyloop_intr = true; + break; + } + if (!vhost_vq_avail_empty(vq->dev, vq)) + break; cpu_relax(); + } preempt_enable(); r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), out_num, in_num, NULL, NULL); @@ -501,20 +505,24 @@ static void handle_tx(struct vhost_net *net) zcopy = nvq->ubufs; for (;;) { + bool busyloop_intr; + /* Release DMAs done buffers first */ if (zcopy) vhost_zerocopy_signal_used(net, vq); - + busyloop_intr = false; head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, ARRAY_SIZE(vq->iov), - &out, &in); + &out, &in, &busyloop_intr); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { - if (unlikely(vhost_enable_notify(&net->dev, vq))) { + if (unlikely(busyloop_intr)) { + vhost_poll_queue(&vq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { vhost_disable_notify(&net->dev, vq); continue; } @@ -645,41 +653,50 @@ static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) nvq->done_idx = 0; } -static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) +static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, + bool *busyloop_intr) { - struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; - struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; - struct vhost_virtqueue *vq = &nvq->vq; + struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; + struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *rvq = &rnvq->vq; + struct vhost_virtqueue *tvq = &tnvq->vq; unsigned long uninitialized_var(endtime); - int len = peek_head_len(rvq, sk); + int len = peek_head_len(rnvq, sk); - if (!len && vq->busyloop_timeout) { + if (!len && tvq->busyloop_timeout) { /* Flush batched heads first */ - vhost_rx_signal_used(rvq); + vhost_rx_signal_used(rnvq); /* Both tx vq and rx socket were polled here */ - mutex_lock_nested(&vq->mutex, 1); - vhost_disable_notify(&net->dev, vq); + mutex_lock_nested(&tvq->mutex, 1); + vhost_disable_notify(&net->dev, tvq); preempt_disable(); - endtime = busy_clock() + vq->busyloop_timeout; + endtime = busy_clock() + tvq->busyloop_timeout; - while (vhost_can_busy_poll(&net->dev, endtime) && - !sk_has_rx_data(sk) && - vhost_vq_avail_empty(&net->dev, vq)) + while (vhost_can_busy_poll(endtime)) { + if (vhost_has_work(&net->dev)) { + *busyloop_intr = true; + break; + } + if ((sk_has_rx_data(sk) && + !vhost_vq_avail_empty(&net->dev, rvq)) || + !vhost_vq_avail_empty(&net->dev, tvq)) + break; cpu_relax(); + } preempt_enable(); - if (!vhost_vq_avail_empty(&net->dev, vq)) - vhost_poll_queue(&vq->poll); - else if (unlikely(vhost_enable_notify(&net->dev, vq))) { - vhost_disable_notify(&net->dev, vq); - vhost_poll_queue(&vq->poll); + if (!vhost_vq_avail_empty(&net->dev, tvq)) { + vhost_poll_queue(&tvq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) { + vhost_disable_notify(&net->dev, tvq); + vhost_poll_queue(&tvq->poll); } - mutex_unlock(&vq->mutex); + mutex_unlock(&tvq->mutex); - len = peek_head_len(rvq, sk); + len = peek_head_len(rnvq, sk); } return len; @@ -786,6 +803,7 @@ static void handle_rx(struct vhost_net *net) s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; + bool busyloop_intr = false; struct socket *sock; struct iov_iter fixup; __virtio16 num_buffers; @@ -809,7 +827,8 @@ static void handle_rx(struct vhost_net *net) vq->log : NULL; mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); - while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { + while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, + &busyloop_intr))) { sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, @@ -820,7 +839,9 @@ static void handle_rx(struct vhost_net *net) goto out; /* OK, now we need to know about added descriptors. */ if (!headcount) { - if (unlikely(vhost_enable_notify(&net->dev, vq))) { + if (unlikely(busyloop_intr)) { + vhost_poll_queue(&vq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { /* They have slipped one in as we were * doing that: check again. */ vhost_disable_notify(&net->dev, vq); @@ -830,6 +851,7 @@ static void handle_rx(struct vhost_net *net) * they refilled. */ goto out; } + busyloop_intr = false; if (nvq->rx_ring) msg.msg_control = vhost_net_buf_consume(&nvq->rxq); /* On overrun, truncate and discard */ @@ -896,7 +918,10 @@ static void handle_rx(struct vhost_net *net) goto out; } } - vhost_net_enable_vq(net, vq); + if (unlikely(busyloop_intr)) + vhost_poll_queue(&vq->poll); + else + vhost_net_enable_vq(net, vq); out: vhost_rx_signal_used(nvq); mutex_unlock(&vq->mutex); diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index bf53d893ad02..57f20a0a7794 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask; #define cpu_active(cpu) ((cpu) == 0) #endif -/* verify cpu argument to cpumask_* operators */ -static inline unsigned int cpumask_check(unsigned int cpu) +static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) { #ifdef CONFIG_DEBUG_PER_CPU_MAPS - WARN_ON_ONCE(cpu >= nr_cpumask_bits); + WARN_ON_ONCE(cpu >= bits); #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ +} + +/* verify cpu argument to cpumask_* operators */ +static inline unsigned int cpumask_check(unsigned int cpu) +{ + cpu_max_bits_warn(cpu, nr_cpumask_bits); return cpu; } diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 79563840c295..572e11bb8696 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, unsigned int rxqs); #define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) -struct sk_buff **eth_gro_receive(struct sk_buff **head, - struct sk_buff *skb); +struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); int eth_gro_complete(struct sk_buff *skb, int nhoff); /* Reserved Ethernet Addresses per IEEE 802.1Q */ diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index b462d9ea8007..dc3dac40f069 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -11,9 +11,8 @@ /* * qoriq ptp registers - * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 */ -struct qoriq_ptp_registers { +struct ctrl_regs { u32 tmr_ctrl; /* Timer control register */ u32 tmr_tevent; /* Timestamp event register */ u32 tmr_temask; /* Timer event mask register */ @@ -28,22 +27,47 @@ struct qoriq_ptp_registers { u8 res1[4]; u32 tmroff_h; /* Timer offset high */ u32 tmroff_l; /* Timer offset low */ - u8 res2[8]; +}; + +struct alarm_regs { u32 tmr_alarm1_h; /* Timer alarm 1 high register */ u32 tmr_alarm1_l; /* Timer alarm 1 high register */ u32 tmr_alarm2_h; /* Timer alarm 2 high register */ u32 tmr_alarm2_l; /* Timer alarm 2 high register */ - u8 res3[48]; +}; + +struct fiper_regs { u32 tmr_fiper1; /* Timer fixed period interval */ u32 tmr_fiper2; /* Timer fixed period interval */ u32 tmr_fiper3; /* Timer fixed period interval */ - u8 res4[20]; +}; + +struct etts_regs { u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ }; +struct qoriq_ptp_registers { + struct ctrl_regs __iomem *ctrl_regs; + struct alarm_regs __iomem *alarm_regs; + struct fiper_regs __iomem *fiper_regs; + struct etts_regs __iomem *etts_regs; +}; + +/* Offset definitions for the four register groups */ +#define CTRL_REGS_OFFSET 0x0 +#define ALARM_REGS_OFFSET 0x40 +#define FIPER_REGS_OFFSET 0x80 +#define ETTS_REGS_OFFSET 0xa0 + +#define FMAN_CTRL_REGS_OFFSET 0x80 +#define FMAN_ALARM_REGS_OFFSET 0xb8 +#define FMAN_FIPER_REGS_OFFSET 0xd0 +#define FMAN_ETTS_REGS_OFFSET 0xe0 + + /* Bit definitions for the TMR_CTRL register */ #define ALM1P (1<<31) /* Alarm1 output polarity */ #define ALM2P (1<<30) /* Alarm2 output polarity */ @@ -105,10 +129,10 @@ struct qoriq_ptp_registers { #define DRIVER "ptp_qoriq" #define DEFAULT_CKSEL 1 #define N_EXT_TS 2 -#define REG_SIZE sizeof(struct qoriq_ptp_registers) struct qoriq_ptp { - struct qoriq_ptp_registers __iomem *regs; + void __iomem *base; + struct qoriq_ptp_registers regs; spinlock_t lock; /* protects regs */ struct ptp_clock *clock; struct ptp_clock_info caps; diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index e5fd2707b6df..9493d4a388db 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -93,6 +93,7 @@ enum hwmon_temp_attributes { #define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) #define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) #define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) +#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm) #define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm) #define HWMON_T_FAULT BIT(hwmon_temp_fault) #define HWMON_T_OFFSET BIT(hwmon_temp_offset) @@ -187,12 +188,16 @@ enum hwmon_power_attributes { hwmon_power_cap_hyst, hwmon_power_cap_max, hwmon_power_cap_min, + hwmon_power_min, hwmon_power_max, hwmon_power_crit, + hwmon_power_lcrit, hwmon_power_label, hwmon_power_alarm, hwmon_power_cap_alarm, + hwmon_power_min_alarm, hwmon_power_max_alarm, + hwmon_power_lcrit_alarm, hwmon_power_crit_alarm, }; @@ -213,12 +218,16 @@ enum hwmon_power_attributes { #define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst) #define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max) #define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min) +#define HWMON_P_MIN BIT(hwmon_power_min) #define HWMON_P_MAX BIT(hwmon_power_max) +#define HWMON_P_LCRIT BIT(hwmon_power_lcrit) #define HWMON_P_CRIT BIT(hwmon_power_crit) #define HWMON_P_LABEL BIT(hwmon_power_label) #define HWMON_P_ALARM BIT(hwmon_power_alarm) #define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm) +#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm) #define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm) +#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm) #define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) enum hwmon_energy_attributes { @@ -389,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev, void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); +/** + * hwmon_is_bad_char - Is the char invalid in a hwmon name + * @ch: the char to be considered + * + * hwmon_is_bad_char() can be used to determine if the given character + * may not be used in a hwmon name. + * + * Returns true if the char is invalid, false otherwise. + */ +static inline bool hwmon_is_bad_char(const char ch) +{ + switch (ch) { + case '-': + case '*': + case ' ': + case '\t': + case '\n': + return true; + default: + return false; + } +} + #endif diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 8fe7e4306816..9c03a7d5e400 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation { #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 /* - * A-PMDU buffer sizes - * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) + * A-MPDU buffer sizes + * According to HT size varies from 8 to 64 frames + * HE adds the ability to have up to 256 frames. */ -#define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF_HT 0x40 +#define IEEE80211_MAX_AMPDU_BUF 0x100 /* Spatial Multiplexing Power Save Modes (for capability) */ @@ -1539,6 +1541,106 @@ struct ieee80211_vht_operation { __le16 basic_mcs_set; } __packed; +/** + * struct ieee80211_he_cap_elem - HE capabilities element + * + * This structure is the "HE capabilities element" fixed fields as + * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3 + */ +struct ieee80211_he_cap_elem { + u8 mac_cap_info[5]; + u8 phy_cap_info[9]; +} __packed; + +#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 + +/** + * enum ieee80211_he_mcs_support - HE MCS support definitions + * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the + * number of streams + * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported + * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported + * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported + * + * These definitions are used in each 2-bit subfield of the rx_mcs_* + * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are + * both split into 8 subfields by number of streams. These values indicate + * which MCSes are supported for the number of streams the value appears + * for. + */ +enum ieee80211_he_mcs_support { + IEEE80211_HE_MCS_SUPPORT_0_7 = 0, + IEEE80211_HE_MCS_SUPPORT_0_9 = 1, + IEEE80211_HE_MCS_SUPPORT_0_11 = 2, + IEEE80211_HE_MCS_NOT_SUPPORTED = 3, +}; + +/** + * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field + * + * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field + * described in P802.11ax_D2.0 section 9.4.2.237.4 + * + * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel + * widths less than 80MHz. + * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel + * widths less than 80MHz. + * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel + * width 160MHz. + * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel + * width 160MHz. + * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for + * channel width 80p80MHz. + * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for + * channel width 80p80MHz. + */ +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +} __packed; + +/** + * struct ieee80211_he_operation - HE capabilities element + * + * This structure is the "HE operation element" fields as + * described in P802.11ax_D2.0 section 9.4.2.238 + */ +struct ieee80211_he_operation { + __le32 he_oper_params; + __le16 he_mcs_nss_set; + /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */ + u8 optional[0]; +} __packed; + +/** + * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field + * + * This structure is the "MU AC Parameter Record" fields as + * described in P802.11ax_D2.0 section 9.4.2.240 + */ +struct ieee80211_he_mu_edca_param_ac_rec { + u8 aifsn; + u8 ecw_min_max; + u8 mu_edca_timer; +} __packed; + +/** + * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element + * + * This structure is the "MU EDCA Parameter Set element" fields as + * described in P802.11ax_D2.0 section 9.4.2.240 + */ +struct ieee80211_mu_edca_param_set { + u8 mu_qos_info; + struct ieee80211_he_mu_edca_param_ac_rec ac_be; + struct ieee80211_he_mu_edca_param_ac_rec ac_bk; + struct ieee80211_he_mu_edca_param_ac_rec ac_vi; + struct ieee80211_he_mu_edca_param_ac_rec ac_vo; +} __packed; /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 @@ -1577,6 +1679,328 @@ struct ieee80211_vht_operation { #define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 #define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 +/* 802.11ax HE MAC capabilities */ +#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 +#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 +#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0 + +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70 + +/* Link adaptation is split between byte HE_MAC_CAP1 and + * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE + * in which case the following values apply: + * 0 = No feedback. + * 1 = reserved. + * 2 = Unsolicited feedback. + * 3 = both + */ +#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80 + +#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 +#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 +#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04 +#define IEEE80211_HE_MAC_CAP2_BSR 0x08 +#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 +#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 +#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 +#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 + +#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01 +#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 +#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 + +/* The maximum length of an A-MDPU is defined by the combination of the Maximum + * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the + * same field in the HE capabilities. + */ +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18 +#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20 +#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 +#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 + +#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 +#define IEEE80211_HE_MAC_CAP4_QTP 0x02 +#define IEEE80211_HE_MAC_CAP4_BQR 0x04 +#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08 +#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 +#define IEEE80211_HE_MAC_CAP4_OPS 0x20 +#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 + +/* 802.11ax HE PHY capabilities */ +#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe + +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f +#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 +#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 +#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 +/* Midamble RX Max NSTS is split between byte #2 and byte #3 */ +#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80 + +#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01 +#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 +#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 +#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 +#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10 +#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20 + +/* Note that the meaning of UL MU below is different between an AP and a non-AP + * sta, where in the AP case it indicates support for Rx and in the non-AP sta + * case it indicates support for Tx. + */ +#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40 +#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80 + +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20 +#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40 +#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80 + +#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01 +#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02 + +/* Minimal allowed value of Max STS under 80MHz is 3 */ +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c + +/* Minimal allowed value of Max STS above 80MHz is 3 */ +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0 + +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07 + +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38 + +#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40 +#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80 + +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01 +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02 +#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04 +#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08 +#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10 +#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20 +#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40 +#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80 + +#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01 +#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02 +#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38 +#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40 +#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80 + +#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01 +#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 +#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 +#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 +#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20 + +/* 802.11ax HE TX/RX MCS NSS Support */ +#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0 +#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800 + +/* TX/RX HE MCS Support field Highest MCS subfield encoding */ +enum ieee80211_he_highest_mcs_supported_subfield_enc { + HIGHEST_MCS_SUPPORTED_MCS7 = 0, + HIGHEST_MCS_SUPPORTED_MCS8, + HIGHEST_MCS_SUPPORTED_MCS9, + HIGHEST_MCS_SUPPORTED_MCS10, + HIGHEST_MCS_SUPPORTED_MCS11, +}; + +/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */ +static inline u8 +ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap) +{ + u8 count = 4; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + count += 4; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + count += 4; + + return count; +} + +/* 802.11ax HE PPE Thresholds */ +#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1) +#define IEEE80211_PPE_THRES_NSS_POS (0) +#define IEEE80211_PPE_THRES_NSS_MASK (7) +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \ + (BIT(5) | BIT(6)) +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78 +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3) +#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3) + +/* + * Calculate 802.11ax HE capabilities IE PPE field size + * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8* + */ +static inline u8 +ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) +{ + u8 n; + + if ((phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) + return 0; + + n = hweight8(ppe_thres_hdr & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >> + IEEE80211_PPE_THRES_NSS_POS)); + + /* + * Each pair is 6 bits, and we need to add the 7 "header" bits to the + * total size. + */ + n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; + n = DIV_ROUND_UP(n, 8); + + return n; +} + +/* HE Operation defines */ +#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0 +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6 +#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 +#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000 +#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000 +#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 +#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 + +/* + * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size + * @he_oper_ie: byte data of the He Operations IE, stating from the the byte + * after the ext ID byte. It is assumed that he_oper_ie has at least + * sizeof(struct ieee80211_he_operation) bytes, checked already in + * ieee802_11_parse_elems_crc() + * @return the actual size of the IE data (not including header), or 0 on error + */ +static inline u8 +ieee80211_he_oper_size(const u8 *he_oper_ie) +{ + struct ieee80211_he_operation *he_oper = (void *)he_oper_ie; + u8 oper_len = sizeof(struct ieee80211_he_operation); + u32 he_oper_params; + + /* Make sure the input is not NULL */ + if (!he_oper_ie) + return 0; + + /* Calc required length */ + he_oper_params = le32_to_cpu(he_oper->he_oper_params); + if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) + oper_len += 3; + if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP) + oper_len++; + + /* Add the first byte (extension ID) to the total length */ + oper_len++; + + return oper_len; +} + /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 @@ -1992,6 +2416,11 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, WLAN_EID_EXT_FILS_NONCE = 13, + WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, + WLAN_EID_EXT_HE_CAPABILITY = 35, + WLAN_EID_EXT_HE_OPERATION = 36, + WLAN_EID_EXT_UORA = 37, + WLAN_EID_EXT_HE_MU_EDCA = 38, }; /* Action category code */ diff --git a/include/linux/if_team.h b/include/linux/if_team.h index d95cae09dea0..ac42da56f7a2 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -74,6 +74,11 @@ struct team_port { long mode_priv[0]; }; +static inline struct team_port *team_port_get_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->rx_handler_data); +} + static inline bool team_port_enabled(struct team_port *port) { return port->index != -1; @@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port) return port->linkup && team_port_enabled(port); } +static inline bool team_port_dev_txable(const struct net_device *port_dev) +{ + struct team_port *port; + bool txable; + + rcu_read_lock(); + port = team_port_get_rcu(port_dev); + txable = port ? team_port_txable(port) : false; + rcu_read_unlock(); + + return txable; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static inline void team_netpoll_send_skb(struct team_port *port, struct sk_buff *skb) diff --git a/include/linux/ipc.h b/include/linux/ipc.h index 6cc2df7f7ac9..e1c9eea6015b 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -4,7 +4,7 @@ #include <linux/spinlock.h> #include <linux/uidgid.h> -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> #include <uapi/linux/ipc.h> #include <linux/refcount.h> diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index b5630c8eb2f3..6cea726612b7 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -9,7 +9,7 @@ #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/refcount.h> -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> struct user_namespace; diff --git a/include/linux/list.h b/include/linux/list.h index 4b129df4d46b..de04cc5ed536 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list, __list_cut_position(list, head, entry); } +/** + * list_cut_before - cut a list into two, before given entry + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * + * This helper moves the initial part of @head, up to but + * excluding @entry, from @head to @list. You should pass + * in @entry an element you know is on @head. @list should + * be an empty list or a list you do not care about losing + * its data. + * If @entry == @head, all entries on @head are moved to + * @list. + */ +static inline void list_cut_before(struct list_head *list, + struct list_head *head, + struct list_head *entry) +{ + if (head->next == entry) { + INIT_LIST_HEAD(list); + return; + } + list->next = head->next; + list->next->prev = list; + list->prev = entry->prev; + list->prev->next = list; + head->next = entry; + entry->prev = head; +} + static inline void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 122e7e9d3091..dca6ab4eaa99 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -630,6 +630,7 @@ struct mlx4_caps { u32 vf_caps; bool wol_port[MLX4_MAX_PORTS + 1]; struct mlx4_rate_limit_caps rl_caps; + u32 health_buffer_addrs; }; struct mlx4_buf_list { @@ -851,6 +852,12 @@ struct mlx4_vf_dev { u8 n_ports; }; +struct mlx4_fw_crdump { + bool snapshot_enable; + struct devlink_region *region_crspace; + struct devlink_region *region_fw_health; +}; + enum mlx4_pci_status { MLX4_PCI_STATUS_DISABLED, MLX4_PCI_STATUS_ENABLED, @@ -871,6 +878,7 @@ struct mlx4_dev_persistent { u8 interface_state; struct mutex pci_status_mutex; /* sync pci state */ enum mlx4_pci_status pci_status; + struct mlx4_fw_crdump crdump; }; struct mlx4_dev { diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 64d0f40d4cc3..37e065a80a43 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa { enum fpga_tls_cmds { CMD_SETUP_STREAM = 0x1001, CMD_TEARDOWN_STREAM = 0x1002, + CMD_RESYNC_RX = 0x1003, }; #define MLX5_TLS_1_2 (0) diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index d633f737b3c6..6675b9f81979 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -2,7 +2,7 @@ #define __LINUX_MROUTE_BASE_H #include <linux/netdevice.h> -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> #include <linux/spinlock.h> #include <net/net_namespace.h> #include <net/sock.h> @@ -254,6 +254,7 @@ struct mr_table { atomic_t cache_resolve_queue_len; bool mroute_do_assert; bool mroute_do_pim; + bool mroute_do_wrvifwhole; int mroute_reg_vif_num; }; diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 623bb8ced060..2b2a6dce1630 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -79,6 +79,7 @@ enum { NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */ + NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ @@ -151,6 +152,7 @@ enum { #define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) #define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) +#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3d0cc0b5cec2..c1295c7a452e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -302,6 +302,17 @@ struct netdev_boot_setup { int __init netdev_boot_setup(char *str); +struct gro_list { + struct list_head list; + int count; +}; + +/* + * size of gro hash buckets, must less than bit number of + * napi_struct::gro_bitmask + */ +#define GRO_HASH_BUCKETS 8 + /* * Structure for NAPI scheduling similar to tasklet but with weighting */ @@ -316,13 +327,13 @@ struct napi_struct { unsigned long state; int weight; - unsigned int gro_count; + unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL int poll_owner; #endif struct net_device *dev; - struct sk_buff *gro_list; + struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; @@ -569,6 +580,9 @@ struct netdev_queue { * (/sys/class/net/DEV/Q/trans_timeout) */ unsigned long trans_timeout; + + /* Subordinate device that the queue has been assigned to */ + struct net_device *sb_dev; /* * write-mostly part */ @@ -730,10 +744,15 @@ struct xps_map { */ struct xps_dev_maps { struct rcu_head rcu; - struct xps_map __rcu *cpu_map[0]; + struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ }; -#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ + +#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) + +#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ + (_rxqs * (_tcs) * sizeof(struct xps_map *))) + #endif /* CONFIG_XPS */ #define TC_MAX_QUEUE 16 @@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, } typedef u16 (*select_queue_fallback_t)(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + struct net_device *sb_dev); enum tc_setup_type { TC_SETUP_QDISC_MQPRIO, @@ -792,6 +812,7 @@ enum tc_setup_type { TC_SETUP_QDISC_RED, TC_SETUP_QDISC_PRIO, TC_SETUP_QDISC_MQ, + TC_SETUP_QDISC_ETF, }; /* These structures hold the attributes of bpf state that are being passed @@ -807,11 +828,8 @@ enum bpf_netdev_command { */ XDP_SETUP_PROG, XDP_SETUP_PROG_HW, - /* Check if a bpf program is set on the device. The callee should - * set @prog_attached to one of XDP_ATTACHED_* values, note that "true" - * is equivalent to XDP_ATTACHED_DRV. - */ XDP_QUERY_PROG, + XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ BPF_OFFLOAD_VERIFIER_PREP, BPF_OFFLOAD_TRANSLATE, @@ -835,9 +853,8 @@ struct netdev_bpf { struct bpf_prog *prog; struct netlink_ext_ack *extack; }; - /* XDP_QUERY_PROG */ + /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ struct { - u8 prog_attached; u32 prog_id; /* flags with which program was installed */ u32 prog_flags; @@ -891,6 +908,8 @@ struct tlsdev_ops { void (*tls_dev_del)(struct net_device *netdev, struct tls_context *ctx, enum tls_offload_ctx_dir direction); + void (*tls_dev_resync_rx)(struct net_device *netdev, + struct sock *sk, u32 seq, u64 rcd_sn); }; #endif @@ -942,7 +961,8 @@ struct dev_ifalias { * those the driver believes to be appropriate. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - * void *accel_priv, select_queue_fallback_t fallback); + * struct net_device *sb_dev, + * select_queue_fallback_t fallback); * Called to decide which queue to use when device supports multiple * transmit queues. * @@ -1214,7 +1234,7 @@ struct net_device_ops { netdev_features_t features); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); @@ -1909,7 +1929,8 @@ struct net_device { int watchdog_timeo; #ifdef CONFIG_XPS - struct xps_dev_maps __rcu *xps_maps; + struct xps_dev_maps __rcu *xps_cpus_map; + struct xps_dev_maps __rcu *xps_rxqs_map; #endif #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc __rcu *miniq_egress; @@ -1978,7 +1999,7 @@ struct net_device { #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif - u8 num_tc; + s16 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; u8 prio_tc_map[TC_BITMASK + 1]; @@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev) return dev->num_tc; } +void netdev_unbind_sb_channel(struct net_device *dev, + struct net_device *sb_dev); +int netdev_bind_sb_channel_queue(struct net_device *dev, + struct net_device *sb_dev, + u8 tc, u16 count, u16 offset); +int netdev_set_sb_channel(struct net_device *dev, u16 channel); +static inline int netdev_get_sb_channel(struct net_device *dev) +{ + return max_t(int, -dev->num_tc, 0); +} + static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, unsigned int index) @@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + struct net_device *sb_dev); /* returns the headroom that the master device needs to take in account * when forwarding to this dev @@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb) return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; } -typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); -static inline struct sk_buff **call_gro_receive(gro_receive_t cb, - struct sk_buff **head, - struct sk_buff *skb) +typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); +static inline struct sk_buff *call_gro_receive(gro_receive_t cb, + struct list_head *head, + struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; @@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb, return cb(head, skb); } -typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, - struct sk_buff *); -static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, - struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, + struct sk_buff *); +static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; @@ -2290,6 +2322,9 @@ struct packet_type { struct net_device *, struct packet_type *, struct net_device *); + void (*list_func) (struct list_head *, + struct packet_type *, + struct net_device *); bool (*id_match)(struct packet_type *ptype, struct sock *sk); void *af_packet_priv; @@ -2299,8 +2334,8 @@ struct packet_type { struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); - struct sk_buff **(*gro_receive)(struct sk_buff **head, - struct sk_buff *skb); + struct sk_buff *(*gro_receive)(struct list_head *head, + struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); }; @@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev); void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); +u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); +u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); int dev_queue_xmit(struct sk_buff *skb); -int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); +int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); @@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); -int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); static inline unsigned int skb_gro_offset(const struct sk_buff *skb) { @@ -2784,13 +2825,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, } #ifdef CONFIG_XFRM_OFFLOAD -static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) { if (PTR_ERR(pp) != -EINPROGRESS) NAPI_GRO_CB(skb)->flush |= flush; } static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, - struct sk_buff **pp, + struct sk_buff *pp, int flush, struct gro_remcsum *grc) { @@ -2801,12 +2842,12 @@ static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, } } #else -static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) { NAPI_GRO_CB(skb)->flush |= flush; } static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, - struct sk_buff **pp, + struct sk_buff *pp, int flush, struct gro_remcsum *grc) { @@ -3278,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index); +int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, + u16 index, bool is_rxqs_map); + +/** + * netif_attr_test_mask - Test a CPU or Rx queue set in a mask + * @j: CPU/Rx queue index + * @mask: bitmask of all cpus/rx queues + * @nr_bits: number of bits in the bitmask + * + * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. + */ +static inline bool netif_attr_test_mask(unsigned long j, + const unsigned long *mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + return test_bit(j, mask); +} + +/** + * netif_attr_test_online - Test for online CPU/Rx queue + * @j: CPU/Rx queue index + * @online_mask: bitmask for CPUs/Rx queues that are online + * @nr_bits: number of bits in the bitmask + * + * Returns true if a CPU/Rx queue is online. + */ +static inline bool netif_attr_test_online(unsigned long j, + const unsigned long *online_mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + + if (online_mask) + return test_bit(j, online_mask); + + return (j < nr_bits); +} + +/** + * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask + * @n: CPU/Rx queue index + * @srcp: the cpumask/Rx queue mask pointer + * @nr_bits: number of bits in the bitmask + * + * Returns >= nr_bits if no further CPUs/Rx queues set. + */ +static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, + unsigned int nr_bits) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (srcp) + return find_next_bit(srcp, nr_bits, n + 1); + + return n + 1; +} + +/** + * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p + * @n: CPU/Rx queue index + * @src1p: the first CPUs/Rx queues mask pointer + * @src2p: the second CPUs/Rx queues mask pointer + * @nr_bits: number of bits in the bitmask + * + * Returns >= nr_bits if no further CPUs/Rx queues set in both. + */ +static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, + const unsigned long *src2p, + unsigned int nr_bits) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (src1p && src2p) + return find_next_and_bit(src1p, src2p, nr_bits, n + 1); + else if (src1p) + return find_next_bit(src1p, nr_bits, n + 1); + else if (src2p) + return find_next_bit(src2p, nr_bits, n + 1); + + return n + 1; +} #else static inline int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, @@ -3384,6 +3511,7 @@ int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); int netif_receive_skb(struct sk_buff *skb); int netif_receive_skb_core(struct sk_buff *skb); +void netif_receive_skb_list(struct list_head *head); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); @@ -3435,8 +3563,8 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); -void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, - struct netdev_bpf *xdp); +u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, + enum bpf_netdev_command cmd); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index dd2052f0efb7..07efffd0c759 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -288,6 +288,24 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct return ret; } +static inline void +NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct list_head *head, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + list_del(&skb->list); + if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1) + list_add_tail(&skb->list, &sublist); + } + /* Put passed packets back on main list */ + list_splice(&sublist, head); +} + /* Call setsockopt() */ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); @@ -369,6 +387,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, return okfn(net, sk, skb); } +static inline void +NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct list_head *head, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + /* nothing to do */ +} + static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, @@ -388,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); +struct nf_conntrack_tuple; +bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +struct nf_conntrack_tuple; +static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + return false; +} #endif struct nf_conn; @@ -398,6 +433,8 @@ enum ip_conntrack_info; struct nf_ct_hook { int (*update)(struct net *net, struct sk_buff *skb); void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, + const struct sk_buff *); }; extern struct nf_ct_hook __rcu *nf_ct_hook; diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index e6b240b6196c..379affc63e24 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -21,4 +21,9 @@ #include <uapi/linux/openvswitch.h> +#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero, + * actions in clone will not change flow + * keys. False otherwise. + */ + #endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 6cd09098427c..075c2f770d3e 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -942,6 +942,8 @@ void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); +int phy_speed_down(struct phy_device *phydev, bool sync); +int phy_speed_up(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b4040023cbfb..8cd34645e892 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -759,6 +759,9 @@ struct qed_generic_tlvs { u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; }; +#define QED_I2C_DEV_ADDR_A0 0xA0 +#define QED_I2C_DEV_ADDR_A2 0xA2 + #define QED_NVM_SIGNATURE 0x12435687 enum qed_nvm_flash_cmd { @@ -1026,6 +1029,18 @@ struct qed_common_ops { * @param enabled - true iff WoL should be enabled. */ int (*update_wol) (struct qed_dev *cdev, bool enabled); + +/** + * @brief read_module_eeprom + * + * @param cdev + * @param buf - buffer + * @param dev_addr - PHY device memory region + * @param offset - offset into eeprom contents to be read + * @param len - buffer length, i.e., max bytes to be read + */ + int (*read_module_eeprom)(struct qed_dev *cdev, + char *buf, u8 dev_addr, u32 offset, u32 len); }; #define MASK_FIELD(_name, _value) \ diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h index e031e9f2f9d8..585ce89c0f33 100644 --- a/include/linux/reciprocal_div.h +++ b/include/linux/reciprocal_div.h @@ -25,6 +25,9 @@ struct reciprocal_value { u8 sh1, sh2; }; +/* "reciprocal_value" and "reciprocal_divide" together implement the basic + * version of the algorithm described in Figure 4.1 of the paper. + */ struct reciprocal_value reciprocal_value(u32 d); static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) @@ -33,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) return (t + ((a - t) >> R.sh1)) >> R.sh2; } +struct reciprocal_value_adv { + u32 m; + u8 sh, exp; + bool is_wide_m; +}; + +/* "reciprocal_value_adv" implements the advanced version of the algorithm + * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose + * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The + * exception case could be easily handled before calling "reciprocal_value_adv". + * + * The advanced version requires more complex calculation to get the reciprocal + * multiplier and other control variables, but then could reduce the required + * emulation operations. + * + * It makes no sense to use this advanced version for host divide emulation, + * those extra complexities for calculating multiplier etc could completely + * waive our saving on emulation operations. + * + * However, it makes sense to use it for JIT divide code generation for which + * we are willing to trade performance of JITed code with that of host. As shown + * by the following pseudo code, the required emulation operations could go down + * from 6 (the basic version) to 3 or 4. + * + * To use the result of "reciprocal_value_adv", suppose we want to calculate + * n/d, the pseudo C code will be: + * + * struct reciprocal_value_adv rvalue; + * u8 pre_shift, exp; + * + * // handle exception case. + * if (d >= (1U << 31)) { + * result = n >= d; + * return; + * } + * + * rvalue = reciprocal_value_adv(d, 32) + * exp = rvalue.exp; + * if (rvalue.is_wide_m && !(d & 1)) { + * // floor(log2(d & (2^32 -d))) + * pre_shift = fls(d & -d) - 1; + * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift); + * } else { + * pre_shift = 0; + * } + * + * // code generation starts. + * if (imm == 1U << exp) { + * result = n >> exp; + * } else if (rvalue.is_wide_m) { + * // pre_shift must be zero when reached here. + * t = (n * rvalue.m) >> 32; + * result = n - t; + * result >>= 1; + * result += t; + * result >>= rvalue.sh - 1; + * } else { + * if (pre_shift) + * result = n >> pre_shift; + * result = ((u64)result * rvalue.m) >> 32; + * result >>= rvalue.sh; + * } + */ +struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec); + #endif /* _LINUX_RECIPROCAL_DIV_H */ diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index e6a0031d1b1f..8ad2487a86d5 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -66,7 +66,7 @@ struct rfkill_ops { #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) /** - * rfkill_alloc - allocate rfkill structure + * rfkill_alloc - Allocate rfkill structure * @name: name of the struct -- the string is not copied internally * @parent: device that has rf switch on it * @type: type of the switch (RFKILL_TYPE_*) @@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill); /** * rfkill_resume_polling(struct rfkill *rfkill) * - * Pause polling -- say transmitter is off for other reasons. + * Resume polling * NOTE: not necessary for suspend/resume -- in that case the * core stops polling anyway */ @@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill); void rfkill_unregister(struct rfkill *rfkill); /** - * rfkill_destroy - free rfkill structure + * rfkill_destroy - Free rfkill structure * @rfkill: rfkill structure to be destroyed * * Destroys the rfkill structure. @@ -140,7 +140,7 @@ void rfkill_destroy(struct rfkill *rfkill); /** * rfkill_set_hw_state - Set the internal rfkill hardware block state * @rfkill: pointer to the rfkill class to modify. - * @state: the current hardware block state to set + * @blocked: the current hardware block state to set * * rfkill drivers that get events when the hard-blocked state changes * use this function to notify the rfkill core (and through that also @@ -161,7 +161,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_set_sw_state - Set the internal rfkill software block state * @rfkill: pointer to the rfkill class to modify. - * @state: the current software block state to set + * @blocked: the current software block state to set * * rfkill drivers that get events when the soft-blocked state changes * (yes, some platforms directly act on input but allow changing again) @@ -183,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_init_sw_state - Initialize persistent software block state * @rfkill: pointer to the rfkill class to modify. - * @state: the current software block state to set + * @blocked: the current software block state to set * * rfkill drivers that preserve their software block state over power off * use this function to notify the rfkill core (and through that also @@ -208,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); /** - * rfkill_blocked - query rfkill block + * rfkill_blocked - Query rfkill block state * * @rfkill: rfkill struct to query */ bool rfkill_blocked(struct rfkill *rfkill); /** - * rfkill_find_type - Helpper for finding rfkill type by name + * rfkill_find_type - Helper for finding rfkill type by name * @name: the name of the type * - * Returns enum rfkill_type that conrresponds the name. + * Returns enum rfkill_type that corresponds to the name. */ enum rfkill_type rfkill_find_type(const char *name); @@ -296,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name) const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); /** - * rfkill_set_led_trigger_name -- set the LED trigger name + * rfkill_set_led_trigger_name - Set the LED trigger name * @rfkill: rfkill struct * @name: LED trigger name * diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h new file mode 100644 index 000000000000..763d613ce2c2 --- /dev/null +++ b/include/linux/rhashtable-types.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Resizable, Scalable, Concurrent Hash Table + * + * Simple structures that might be needed in include + * files. + */ + +#ifndef _LINUX_RHASHTABLE_TYPES_H +#define _LINUX_RHASHTABLE_TYPES_H + +#include <linux/atomic.h> +#include <linux/compiler.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> + +struct rhash_head { + struct rhash_head __rcu *next; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head __rcu *next; +}; + +struct bucket_table; + +/** + * struct rhashtable_compare_arg - Key for the function rhashtable_compare + * @ht: Hash table + * @key: Key to compare against + */ +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); + +/** + * struct rhashtable_params - Hash table construction parameters + * @nelem_hint: Hint on number of elements, should be 75% of desired size + * @key_len: Length of key + * @key_offset: Offset of key in struct to be hashed + * @head_offset: Offset of rhash_head in struct to be hashed + * @max_size: Maximum size while expanding + * @min_size: Minimum size while shrinking + * @locks_mul: Number of bucket locks to allocate per cpu (default: 32) + * @automatic_shrinking: Enable automatic shrinking of tables + * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) + * @obj_hashfn: Function to hash object + * @obj_cmpfn: Function to compare key with object + */ +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + u8 locks_mul; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +/** + * struct rhashtable - Hash table handle + * @tbl: Bucket table + * @key_len: Key length for hashfn + * @max_elems: Maximum number of elements in table + * @p: Configuration parameters + * @rhlist: True if this is an rhltable + * @run_work: Deferred worker to expand/shrink asynchronously + * @mutex: Mutex to protect current/future table swapping + * @lock: Spin lock to protect walker list + * @nelems: Number of elements in table + */ +struct rhashtable { + struct bucket_table __rcu *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +/** + * struct rhltable - Hash table with duplicate objects in a list + * @ht: Underlying rhtable + */ +struct rhltable { + struct rhashtable ht; +}; + +/** + * struct rhashtable_walker - Hash table walker + * @list: List entry on list of walkers + * @tbl: The table that we were walking over + */ +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +/** + * struct rhashtable_iter - Hash table iterator + * @ht: Table to iterate through + * @p: Current pointer + * @list: Current hash list pointer + * @walker: Associated rhashtable walker + * @slot: Current slot + * @skip: Number of entries to skip in slot + */ +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); +int rhltable_init(struct rhltable *hlt, + const struct rhashtable_params *params); + +#endif /* _LINUX_RHASHTABLE_TYPES_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 4e1f535c2034..eb7111039247 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Resizable, Scalable, Concurrent Hash Table * @@ -17,37 +18,18 @@ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H -#include <linux/atomic.h> -#include <linux/compiler.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/jhash.h> #include <linux/list_nulls.h> #include <linux/workqueue.h> -#include <linux/mutex.h> #include <linux/rculist.h> +#include <linux/rhashtable-types.h> /* * The end of the chain is marked with a special nulls marks which has - * the following format: - * - * +-------+-----------------------------------------------------+-+ - * | Base | Hash |1| - * +-------+-----------------------------------------------------+-+ - * - * Base (4 bits) : Reserved to distinguish between multiple tables. - * Specified via &struct rhashtable_params.nulls_base. - * Hash (27 bits): Full hash (unmasked) of first element added to bucket - * 1 (1 bit) : Nulls marker (always set) - * - * The remaining bits of the next pointer remain unused for now. + * the least significant bit set. */ -#define RHT_BASE_BITS 4 -#define RHT_HASH_BITS 27 -#define RHT_BASE_SHIFT RHT_HASH_BITS - -/* Base bits plus 1 bit for nulls marker */ -#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) /* Maximum chain length before rehash * @@ -64,15 +46,6 @@ */ #define RHT_ELASTICITY 16u -struct rhash_head { - struct rhash_head __rcu *next; -}; - -struct rhlist_head { - struct rhash_head rhead; - struct rhlist_head __rcu *next; -}; - /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets @@ -102,132 +75,14 @@ struct bucket_table { struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; -/** - * struct rhashtable_compare_arg - Key for the function rhashtable_compare - * @ht: Hash table - * @key: Key to compare against - */ -struct rhashtable_compare_arg { - struct rhashtable *ht; - const void *key; -}; - -typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); -typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); -typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, - const void *obj); - -struct rhashtable; - -/** - * struct rhashtable_params - Hash table construction parameters - * @nelem_hint: Hint on number of elements, should be 75% of desired size - * @key_len: Length of key - * @key_offset: Offset of key in struct to be hashed - * @head_offset: Offset of rhash_head in struct to be hashed - * @max_size: Maximum size while expanding - * @min_size: Minimum size while shrinking - * @locks_mul: Number of bucket locks to allocate per cpu (default: 32) - * @automatic_shrinking: Enable automatic shrinking of tables - * @nulls_base: Base value to generate nulls marker - * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) - * @obj_hashfn: Function to hash object - * @obj_cmpfn: Function to compare key with object - */ -struct rhashtable_params { - u16 nelem_hint; - u16 key_len; - u16 key_offset; - u16 head_offset; - unsigned int max_size; - u16 min_size; - bool automatic_shrinking; - u8 locks_mul; - u32 nulls_base; - rht_hashfn_t hashfn; - rht_obj_hashfn_t obj_hashfn; - rht_obj_cmpfn_t obj_cmpfn; -}; - -/** - * struct rhashtable - Hash table handle - * @tbl: Bucket table - * @key_len: Key length for hashfn - * @max_elems: Maximum number of elements in table - * @p: Configuration parameters - * @rhlist: True if this is an rhltable - * @run_work: Deferred worker to expand/shrink asynchronously - * @mutex: Mutex to protect current/future table swapping - * @lock: Spin lock to protect walker list - * @nelems: Number of elements in table - */ -struct rhashtable { - struct bucket_table __rcu *tbl; - unsigned int key_len; - unsigned int max_elems; - struct rhashtable_params p; - bool rhlist; - struct work_struct run_work; - struct mutex mutex; - spinlock_t lock; - atomic_t nelems; -}; - -/** - * struct rhltable - Hash table with duplicate objects in a list - * @ht: Underlying rhtable - */ -struct rhltable { - struct rhashtable ht; -}; - -/** - * struct rhashtable_walker - Hash table walker - * @list: List entry on list of walkers - * @tbl: The table that we were walking over - */ -struct rhashtable_walker { - struct list_head list; - struct bucket_table *tbl; -}; - -/** - * struct rhashtable_iter - Hash table iterator - * @ht: Table to iterate through - * @p: Current pointer - * @list: Current hash list pointer - * @walker: Associated rhashtable walker - * @slot: Current slot - * @skip: Number of entries to skip in slot - */ -struct rhashtable_iter { - struct rhashtable *ht; - struct rhash_head *p; - struct rhlist_head *list; - struct rhashtable_walker walker; - unsigned int slot; - unsigned int skip; - bool end_of_table; -}; - -static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) -{ - return NULLS_MARKER(ht->p.nulls_base + hash); -} - -#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ - ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) +#define INIT_RHT_NULLS_HEAD(ptr) \ + ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) { return ((unsigned long) ptr & 1); } -static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) -{ - return ((unsigned long) ptr) >> 1; -} - static inline void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { @@ -237,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht, static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, unsigned int hash) { - return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); + return hash & (tbl->size - 1); } static inline unsigned int rht_key_get_hash(struct rhashtable *ht, @@ -376,11 +231,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, } #endif /* CONFIG_PROVE_LOCKING */ -int rhashtable_init(struct rhashtable *ht, - const struct rhashtable_params *params); -int rhltable_init(struct rhltable *hlt, - const struct rhashtable_params *params); - void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj); @@ -745,7 +595,7 @@ static inline void *__rhashtable_insert_fast( lock = rht_bucket_lock(tbl, hash); spin_lock_bh(lock); - if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) { + if (unlikely(rcu_access_pointer(tbl->future_tbl))) { slow_path: spin_unlock_bh(lock); rcu_read_unlock(); diff --git a/include/linux/sctp.h b/include/linux/sctp.h index b36c76635f18..83d94341e003 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -801,4 +801,11 @@ struct sctp_strreset_resptsn { __be32 receivers_next_tsn; }; +enum { + SCTP_DSCP_SET_MASK = 0x1, + SCTP_DSCP_VAL_MASK = 0xfc, + SCTP_FLOWLABEL_SET_MASK = 0x100000, + SCTP_FLOWLABEL_VAL_MASK = 0xfffff +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/sfp.h b/include/linux/sfp.h index ebce9e24906a..d37518e89db2 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -231,6 +231,50 @@ struct sfp_eeprom_id { struct sfp_eeprom_ext ext; } __packed; +struct sfp_diag { + __be16 temp_high_alarm; + __be16 temp_low_alarm; + __be16 temp_high_warn; + __be16 temp_low_warn; + __be16 volt_high_alarm; + __be16 volt_low_alarm; + __be16 volt_high_warn; + __be16 volt_low_warn; + __be16 bias_high_alarm; + __be16 bias_low_alarm; + __be16 bias_high_warn; + __be16 bias_low_warn; + __be16 txpwr_high_alarm; + __be16 txpwr_low_alarm; + __be16 txpwr_high_warn; + __be16 txpwr_low_warn; + __be16 rxpwr_high_alarm; + __be16 rxpwr_low_alarm; + __be16 rxpwr_high_warn; + __be16 rxpwr_low_warn; + __be16 laser_temp_high_alarm; + __be16 laser_temp_low_alarm; + __be16 laser_temp_high_warn; + __be16 laser_temp_low_warn; + __be16 tec_cur_high_alarm; + __be16 tec_cur_low_alarm; + __be16 tec_cur_high_warn; + __be16 tec_cur_low_warn; + __be32 cal_rxpwr4; + __be32 cal_rxpwr3; + __be32 cal_rxpwr2; + __be32 cal_rxpwr1; + __be32 cal_rxpwr0; + __be16 cal_txi_slope; + __be16 cal_txi_offset; + __be16 cal_txpwr_slope; + __be16 cal_txpwr_offset; + __be16 cal_t_slope; + __be16 cal_t_offset; + __be16 cal_v_slope; + __be16 cal_v_offset; +} __packed; + /* SFP EEPROM registers */ enum { SFP_PHYS_ID = 0x00, @@ -384,7 +428,33 @@ enum { SFP_TEC_CUR = 0x6c, SFP_STATUS = 0x6e, - SFP_ALARM = 0x70, + SFP_ALARM0 = 0x70, + SFP_ALARM0_TEMP_HIGH = BIT(7), + SFP_ALARM0_TEMP_LOW = BIT(6), + SFP_ALARM0_VCC_HIGH = BIT(5), + SFP_ALARM0_VCC_LOW = BIT(4), + SFP_ALARM0_TX_BIAS_HIGH = BIT(3), + SFP_ALARM0_TX_BIAS_LOW = BIT(2), + SFP_ALARM0_TXPWR_HIGH = BIT(1), + SFP_ALARM0_TXPWR_LOW = BIT(0), + + SFP_ALARM1 = 0x71, + SFP_ALARM1_RXPWR_HIGH = BIT(7), + SFP_ALARM1_RXPWR_LOW = BIT(6), + + SFP_WARN0 = 0x74, + SFP_WARN0_TEMP_HIGH = BIT(7), + SFP_WARN0_TEMP_LOW = BIT(6), + SFP_WARN0_VCC_HIGH = BIT(5), + SFP_WARN0_VCC_LOW = BIT(4), + SFP_WARN0_TX_BIAS_HIGH = BIT(3), + SFP_WARN0_TX_BIAS_LOW = BIT(2), + SFP_WARN0_TXPWR_HIGH = BIT(1), + SFP_WARN0_TXPWR_LOW = BIT(0), + + SFP_WARN1 = 0x75, + SFP_WARN1_RXPWR_HIGH = BIT(7), + SFP_WARN1_RXPWR_LOW = BIT(6), SFP_EXT_STATUS = 0x76, SFP_VSL = 0x78, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 610a201126ee..fd3cb1b247df 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -641,6 +641,7 @@ typedef unsigned char *sk_buff_data_t; * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @dst_pending_confirm: need to confirm neighbour + * @decrypted: Decrypted SKB * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark @@ -678,7 +679,8 @@ struct sk_buff { int ip_defrag_offset; }; }; - struct rb_node rbnode; /* used in netem & tcp stack */ + struct rb_node rbnode; /* used in netem & tcp stack */ + struct list_head list; }; struct sock *sk; @@ -791,6 +793,9 @@ struct sk_buff { __u8 tc_redirected:1; __u8 tc_from_ingress:1; #endif +#ifdef CONFIG_TLS_DEVICE + __u8 decrypted:1; +#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 72705eaf4b84..58a8d7d71354 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -89,7 +89,7 @@ struct tcp_sack_block { struct tcp_options_received { /* PAWS/RTTM data */ - long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ + int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ u32 ts_recent; /* Time stamp to echo next */ u32 rcv_tsval; /* Time stamp value */ u32 rcv_tsecr; /* Time stamp echo reply */ @@ -350,6 +350,7 @@ struct tcp_sock { #endif /* Receiver side RTT estimation */ + u32 rcv_rtt_last_tsecr; struct { u32 rtt_us; u32 seq; @@ -425,7 +426,7 @@ struct tcp_timewait_sock { /* The time we sent the last out-of-window ACK: */ u32 tw_last_oow_ack_time; - long tw_ts_recent_stamp; + int tw_ts_recent_stamp; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif diff --git a/include/linux/udp.h b/include/linux/udp.h index ca840345571b..320d49d85484 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -74,8 +74,8 @@ struct udp_sock { void (*encap_destroy)(struct sock *sk); /* GRO functions for UDP socket */ - struct sk_buff ** (*gro_receive)(struct sock *sk, - struct sk_buff **head, + struct sk_buff * (*gro_receive)(struct sock *sk, + struct list_head *head, struct sk_buff *skb); int (*gro_complete)(struct sock *sk, struct sk_buff *skb, diff --git a/include/net/act_api.h b/include/net/act_api.h index 9e59ebfded62..683ce41053d9 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -6,6 +6,7 @@ * Public action API for classifiers/qdiscs */ +#include <linux/refcount.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/net_namespace.h> @@ -26,8 +27,8 @@ struct tc_action { struct tcf_idrinfo *idrinfo; u32 tcfa_index; - int tcfa_refcnt; - int tcfa_bindcnt; + refcount_t tcfa_refcnt; + atomic_t tcfa_bindcnt; u32 tcfa_capab; int tcfa_action; struct tcf_t tcfa_tm; @@ -37,7 +38,7 @@ struct tc_action { spinlock_t tcfa_lock; struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; - struct tc_cookie *act_cookie; + struct tc_cookie __rcu *act_cookie; struct tcf_chain *goto_chain; }; #define tcf_index common.tcfa_index @@ -91,7 +92,8 @@ struct tc_action_ops { struct netlink_ext_ack *extack); int (*init)(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **act, int ovr, - int bind, struct netlink_ext_ack *extack); + int bind, bool rtnl_held, + struct netlink_ext_ack *extack); int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, @@ -99,6 +101,7 @@ struct tc_action_ops { void (*stats_update)(struct tc_action *, u64, u32, u64); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a); + int (*delete)(struct net *net, u32 index); }; struct tc_action_net { @@ -151,6 +154,10 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, int bind, bool cpustats); void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); +void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); +int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, + struct tc_action **a, int bind); +int tcf_idr_delete_index(struct tc_action_net *tn, u32 index); int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); static inline int tcf_idr_release(struct tc_action *a, bool bind) @@ -161,18 +168,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind) int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops); -int tcf_action_destroy(struct list_head *actions, int bind); +int tcf_action_destroy(struct tc_action *actions[], int bind); int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res); int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct list_head *actions, size_t *attr_size, - struct netlink_ext_ack *extack); + struct tc_action *actions[], size_t *attr_size, + bool rtnl_held, struct netlink_ext_ack *extack); struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, + bool rtnl_held, struct netlink_ext_ack *extack); -int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); +int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, + int ref); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); @@ -190,9 +199,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, #endif } -typedef int tc_setup_cb_t(enum tc_setup_type type, - void *type_data, void *cb_priv); - #ifdef CONFIG_NET_CLS_ACT int tc_setup_cb_egdev_register(const struct net_device *dev, tc_setup_cb_t *cb, void *cb_priv); diff --git a/include/net/bonding.h b/include/net/bonding.h index 808f1d167349..a2d058170ea3 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave) bond_is_active_slave(slave); } +static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev) +{ + struct slave *slave; + bool active; + + rcu_read_lock(); + slave = bond_slave_get_rcu(slave_dev); + active = bond_is_active_slave(slave); + rcu_read_unlock(); + + return active; +} + static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) { if (len == ETH_ALEN) { diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index c5187438af38..9e36fda652b7 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -151,6 +151,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) #ifdef CONFIG_NET_RX_BUSY_POLL sk->sk_napi_id = skb->napi_id; #endif + sk_rx_queue_set(sk, skb); } /* variant used for unconnected sockets */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5fbfe61f41c6..9ba1f289c439 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap { struct ieee80211_vht_mcs_info vht_mcs; }; +#define IEEE80211_HE_PPE_THRES_MAX_LEN 25 + +/** + * struct ieee80211_sta_he_cap - STA's HE capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11ax HE capabilities for a STA. + * + * @has_he: true iff HE data is valid. + * @he_cap_elem: Fixed portion of the HE capabilities element. + * @he_mcs_nss_supp: The supported NSS/MCS combinations. + * @ppe_thres: Holds the PPE Thresholds data. + */ +struct ieee80211_sta_he_cap { + bool has_he; + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN]; +}; + +/** + * struct ieee80211_sband_iftype_data + * + * This structure encapsulates sband data that is relevant for the + * interface types defined in @types_mask. Each type in the + * @types_mask must be unique across all instances of iftype_data. + * + * @types_mask: interface types mask + * @he_cap: holds the HE capabilities + */ +struct ieee80211_sband_iftype_data { + u16 types_mask; + struct ieee80211_sta_he_cap he_cap; +}; + /** * struct ieee80211_supported_band - frequency band definition * @@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap { * @n_bitrates: Number of bitrates in @bitrates * @ht_cap: HT capabilities in this band * @vht_cap: VHT capabilities in this band + * @n_iftype_data: number of iftype data entries + * @iftype_data: interface type data entries. Note that the bits in + * @types_mask inside this structure cannot overlap (i.e. only + * one occurrence of each type is allowed across all instances of + * iftype_data). */ struct ieee80211_supported_band { struct ieee80211_channel *channels; @@ -310,9 +350,56 @@ struct ieee80211_supported_band { int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; + u16 n_iftype_data; + const struct ieee80211_sband_iftype_data *iftype_data; }; /** + * ieee80211_get_sband_iftype_data - return sband data for a given iftype + * @sband: the sband to search for the STA on + * @iftype: enum nl80211_iftype + * + * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found + */ +static inline const struct ieee80211_sband_iftype_data * +ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, + u8 iftype) +{ + int i; + + if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) + return NULL; + + for (i = 0; i < sband->n_iftype_data; i++) { + const struct ieee80211_sband_iftype_data *data = + &sband->iftype_data[i]; + + if (data->types_mask & BIT(iftype)) + return data; + } + + return NULL; +} + +/** + * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA + * @sband: the sband to search for the STA on + * + * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found + */ +static inline const struct ieee80211_sta_he_cap * +ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) +{ + const struct ieee80211_sband_iftype_data *data = + ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION); + + if (data && data->he_cap.has_he) + return &data->he_cap; + + return NULL; +} + +/** * wiphy_read_of_freq_limits - read frequency limits from device tree * * @wiphy: the wireless device to get extra limits for @@ -899,6 +986,8 @@ enum station_parameters_apply_mask { * @opmode_notif: operating mode field from Operating Mode Notification * @opmode_notif_used: information if operating mode field is used * @support_p2p_ps: information if station supports P2P PS mechanism + * @he_capa: HE capabilities of station + * @he_capa_len: the length of the HE capabilities */ struct station_parameters { const u8 *supported_rates; @@ -926,6 +1015,8 @@ struct station_parameters { u8 opmode_notif; bool opmode_notif_used; int support_p2p_ps; + const struct ieee80211_he_cap_elem *he_capa; + u8 he_capa_len; }; /** @@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval * @RATE_INFO_FLAGS_60G: 60GHz MCS + * @RATE_INFO_FLAGS_HE_MCS: HE MCS information */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), RATE_INFO_FLAGS_VHT_MCS = BIT(1), RATE_INFO_FLAGS_SHORT_GI = BIT(2), RATE_INFO_FLAGS_60G = BIT(3), + RATE_INFO_FLAGS_HE_MCS = BIT(4), }; /** @@ -1019,6 +1112,7 @@ enum rate_info_flags { * @RATE_INFO_BW_40: 40 MHz bandwidth * @RATE_INFO_BW_80: 80 MHz bandwidth * @RATE_INFO_BW_160: 160 MHz bandwidth + * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation */ enum rate_info_bw { RATE_INFO_BW_20 = 0, @@ -1027,6 +1121,7 @@ enum rate_info_bw { RATE_INFO_BW_40, RATE_INFO_BW_80, RATE_INFO_BW_160, + RATE_INFO_BW_HE_RU, }; /** @@ -1035,10 +1130,14 @@ enum rate_info_bw { * Information about a receiving or transmitting bitrate * * @flags: bitflag of flags from &enum rate_info_flags - * @mcs: mcs index if struct describes a 802.11n bitrate + * @mcs: mcs index if struct describes an HT/VHT/HE rate * @legacy: bitrate in 100kbit/s for 802.11abg - * @nss: number of streams (VHT only) + * @nss: number of streams (VHT & HE only) * @bw: bandwidth (from &enum rate_info_bw) + * @he_gi: HE guard interval (from &enum nl80211_he_gi) + * @he_dcm: HE DCM value + * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, + * only valid if bw is %RATE_INFO_BW_HE_RU) */ struct rate_info { u8 flags; @@ -1046,6 +1145,9 @@ struct rate_info { u16 legacy; u8 nss; u8 bw; + u8 he_gi; + u8 he_dcm; + u8 he_ru_alloc; }; /** diff --git a/include/net/devlink.h b/include/net/devlink.h index e336ea9c73df..b9b89d6604d4 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -27,6 +27,9 @@ struct devlink { struct list_head sb_list; struct list_head dpipe_table_list; struct list_head resource_list; + struct list_head param_list; + struct list_head region_list; + u32 snapshot_id; struct devlink_dpipe_headers *dpipe_headers; const struct devlink_ops *ops; struct device *dev; @@ -295,6 +298,115 @@ struct devlink_resource { #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 +#define DEVLINK_PARAM_MAX_STRING_VALUE 32 +enum devlink_param_type { + DEVLINK_PARAM_TYPE_U8, + DEVLINK_PARAM_TYPE_U16, + DEVLINK_PARAM_TYPE_U32, + DEVLINK_PARAM_TYPE_STRING, + DEVLINK_PARAM_TYPE_BOOL, +}; + +union devlink_param_value { + u8 vu8; + u16 vu16; + u32 vu32; + const char *vstr; + bool vbool; +}; + +struct devlink_param_gset_ctx { + union devlink_param_value val; + enum devlink_param_cmode cmode; +}; + +/** + * struct devlink_param - devlink configuration parameter data + * @name: name of the parameter + * @generic: indicates if the parameter is generic or driver specific + * @type: parameter type + * @supported_cmodes: bitmap of supported configuration modes + * @get: get parameter value, used for runtime and permanent + * configuration modes + * @set: set parameter value, used for runtime and permanent + * configuration modes + * @validate: validate input value is applicable (within value range, etc.) + * + * This struct should be used by the driver to fill the data for + * a parameter it registers. + */ +struct devlink_param { + u32 id; + const char *name; + bool generic; + enum devlink_param_type type; + unsigned long supported_cmodes; + int (*get)(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx); + int (*set)(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx); + int (*validate)(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack); +}; + +struct devlink_param_item { + struct list_head list; + const struct devlink_param *param; + union devlink_param_value driverinit_value; + bool driverinit_value_valid; +}; + +enum devlink_param_generic_id { + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + + /* add new param generic ids above here*/ + __DEVLINK_PARAM_GENERIC_ID_MAX, + DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1, +}; + +#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset" +#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs" +#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32 + +#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov" +#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable" +#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ +{ \ + .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ + .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \ + .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \ + .generic = true, \ + .supported_cmodes = _cmodes, \ + .get = _get, \ + .set = _set, \ + .validate = _validate, \ +} + +#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \ +{ \ + .id = _id, \ + .name = _name, \ + .type = _type, \ + .supported_cmodes = _cmodes, \ + .get = _get, \ + .set = _set, \ + .validate = _validate, \ +} + +struct devlink_region; + +typedef void devlink_snapshot_data_dest_t(const void *data); + struct devlink_ops { int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, @@ -430,6 +542,26 @@ void devlink_resource_occ_get_register(struct devlink *devlink, void *occ_get_priv); void devlink_resource_occ_get_unregister(struct devlink *devlink, u64 resource_id); +int devlink_params_register(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count); +void devlink_params_unregister(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count); +int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, + union devlink_param_value *init_val); +int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val); +void devlink_param_value_changed(struct devlink *devlink, u32 param_id); +struct devlink_region *devlink_region_create(struct devlink *devlink, + const char *region_name, + u32 region_max_snapshots, + u64 region_size); +void devlink_region_destroy(struct devlink_region *region); +u32 devlink_region_shapshot_id_get(struct devlink *devlink); +int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, + u8 *data, u32 snapshot_id, + devlink_snapshot_data_dest_t *data_destructor); #else @@ -622,6 +754,69 @@ devlink_resource_occ_get_unregister(struct devlink *devlink, { } +static inline int +devlink_params_register(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + return 0; +} + +static inline void +devlink_params_unregister(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + +} + +static inline int +devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, + union devlink_param_value *init_val) +{ + return -EOPNOTSUPP; +} + +static inline int +devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val) +{ + return -EOPNOTSUPP; +} + +static inline void +devlink_param_value_changed(struct devlink *devlink, u32 param_id) +{ +} + +static inline struct devlink_region * +devlink_region_create(struct devlink *devlink, + const char *region_name, + u32 region_max_snapshots, + u64 region_size) +{ + return NULL; +} + +static inline void +devlink_region_destroy(struct devlink_region *region) +{ +} + +static inline u32 +devlink_region_shapshot_id_get(struct devlink *devlink) +{ + return 0; +} + +static inline int +devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, + u8 *data, u32 snapshot_id, + devlink_snapshot_data_dest_t *data_destructor) +{ + return 0; +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/include/net/dsa.h b/include/net/dsa.h index fdbd6082945d..461e8a7661b7 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -259,6 +259,9 @@ struct dsa_switch { /* Number of switch port queues */ unsigned int num_tx_queues; + unsigned long *bitmap; + unsigned long _bitmap; + /* Dynamically allocated ports, keep last */ size_t num_ports; struct dsa_port ports[]; diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index adc24df56b90..2a17f041f7a1 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -47,7 +47,7 @@ struct flow_dissector_key_tags { struct flow_dissector_key_vlan { u16 vlan_id:12, vlan_priority:3; - u16 padding; + __be16 vlan_tpid; }; struct flow_dissector_key_mpls { @@ -206,7 +206,8 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ - + FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */ + FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_MAX, }; @@ -237,6 +238,7 @@ struct flow_keys { struct flow_dissector_key_basic basic; struct flow_dissector_key_tags tags; struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_addrs addrs; diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 960236fb1681..feef706e1158 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2017 Intel Deutschland GmbH + * Copyright (c) 2018 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence { IEEE80211_RADIOTAP_AMPDU_STATUS = 20, IEEE80211_RADIOTAP_VHT = 21, IEEE80211_RADIOTAP_TIMESTAMP = 22, + IEEE80211_RADIOTAP_HE = 23, + IEEE80211_RADIOTAP_HE_MU = 24, /* valid in every it_present bitmap, even vendor namespaces */ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, @@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags { IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02, }; +struct ieee80211_radiotap_he { + __le16 data1, data2, data3, data4, data5, data6; +}; + +enum ieee80211_radiotap_he_bits { + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3, + + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008, + IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020, + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100, + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000, + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001, + IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002, + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008, + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020, + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f, + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040, + IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080, + IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00, + IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000, + IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000, + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000, + IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f, + IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000, + + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10, + + IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030, + IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0, + IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1, + IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2, + + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700, + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000, + IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000, + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f, + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010, + IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00, + IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000, +}; + +struct ieee80211_radiotap_he_mu { + __le16 flags1, flags2; + u8 ru_ch1[4]; + u8 ru_ch2[4]; +}; + +enum ieee80211_radiotap_he_mu_bits { + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000, + + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800, +}; + /** * ieee80211_get_radiotap_len - get radiotap header length */ diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 384b90c62c0b..3ca969cbd161 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); -struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb); +struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb); int inet_gro_complete(struct sk_buff *skb, int nhoff); struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features); diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index ed07e3786d98..f4272a29dc44 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -2,7 +2,7 @@ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> struct netns_frags { /* sysctls */ diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 83d5b3c2ac42..314be484c696 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -148,6 +148,7 @@ struct inet_cork { __s16 tos; char priority; __u16 gso_size; + u64 transmit_time; }; struct inet_cork_full { diff --git a/include/net/ip.h b/include/net/ip.h index 0d2281b4b27a..e44b1a44f67a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -72,13 +72,27 @@ struct ipcm_cookie { __be32 addr; int oif; struct ip_options_rcu *opt; - __u8 tx_flags; __u8 ttl; __s16 tos; char priority; __u16 gso_size; }; +static inline void ipcm_init(struct ipcm_cookie *ipcm) +{ + *ipcm = (struct ipcm_cookie) { .tos = -1 }; +} + +static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, + const struct inet_sock *inet) +{ + ipcm_init(ipcm); + + ipcm->sockc.tsflags = inet->sk.sk_tsflags; + ipcm->oif = inet->sk.sk_bound_dev_if; + ipcm->addr = inet->inet_saddr; +} + #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) @@ -138,6 +152,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, struct ip_options_rcu *opt); int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); +void ip_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev); int ip_local_deliver(struct sk_buff *skb); int ip_mr_input(struct sk_buff *skb); int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); @@ -148,7 +164,8 @@ void ip_send_check(struct iphdr *ip); int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); -int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); +int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, + __u8 tos); void ip_init(void); int ip_append_data(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, @@ -174,6 +191,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct ipcm_cookie *ipc, struct rtable **rtp, struct inet_cork *cork, unsigned int flags); +static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, + struct flowi *fl) +{ + return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); +} + static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) { return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 90ff430f5e9d..b0d022ff6ea1 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to, } static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, - const void *from, int len) + const void *from, int len, + __be16 flags) { memcpy(ip_tunnel_info_opts(info), from, len); info->options_len = len; + info->key.tun_flags |= flags; } static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) @@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to, } static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, - const void *from, int len) + const void *from, int len, + __be16 flags) { info->options_len = 0; + info->key.tun_flags |= flags; } #endif /* CONFIG_INET */ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 8f73be494503..190015652168 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -294,6 +294,7 @@ struct ipv6_fl_socklist { }; struct ipcm6_cookie { + struct sockcm_cookie sockc; __s16 hlimit; __s16 tclass; __s8 dontfrag; @@ -301,6 +302,25 @@ struct ipcm6_cookie { __u16 gso_size; }; +static inline void ipcm6_init(struct ipcm6_cookie *ipc6) +{ + *ipc6 = (struct ipcm6_cookie) { + .hlimit = -1, + .tclass = -1, + .dontfrag = -1, + }; +} + +static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, + const struct ipv6_pinfo *np) +{ + *ipc6 = (struct ipcm6_cookie) { + .hlimit = -1, + .tclass = np->tclass, + .dontfrag = np->dontfrag, + }; +} + static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) { struct ipv6_txoptions *opt; @@ -915,6 +935,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6) int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); +void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev); int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); @@ -931,8 +953,7 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, - const struct sockcm_cookie *sockc); + struct rt6_info *rt, unsigned int flags); int ip6_push_pending_frames(struct sock *sk); @@ -949,8 +970,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - struct inet_cork_full *cork, - const struct sockcm_cookie *sockc); + struct inet_cork_full *cork); static inline struct sk_buff *ip6_finish_skb(struct sock *sk) { diff --git a/include/net/lag.h b/include/net/lag.h new file mode 100644 index 000000000000..95b880e6fdde --- /dev/null +++ b/include/net/lag.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IF_LAG_H +#define _LINUX_IF_LAG_H + +#include <linux/netdevice.h> +#include <linux/if_team.h> +#include <net/bonding.h> + +static inline bool net_lag_port_dev_txable(const struct net_device *port_dev) +{ + if (netif_is_team_port(port_dev)) + return team_port_dev_txable(port_dev); + else + return bond_is_active_slave_dev(port_dev); +} + +#endif /* _LINUX_IF_LAG_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 851a5e19ae32..5790f55c241d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -23,6 +23,7 @@ #include <linux/ieee80211.h> #include <net/cfg80211.h> #include <net/codel.h> +#include <net/ieee80211_radiotap.h> #include <asm/unaligned.h> /** @@ -162,6 +163,8 @@ enum ieee80211_ac_numbers { * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled * @acm: is mandatory admission control required for the access category * @uapsd: is U-APSD mode enabled for the queue + * @mu_edca: is the MU EDCA configured + * @mu_edca_param_rec: MU EDCA Parameter Record for HE */ struct ieee80211_tx_queue_params { u16 txop; @@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params { u8 aifs; bool acm; bool uapsd; + bool mu_edca; + struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec; }; struct ieee80211_low_level_stats { @@ -463,6 +468,15 @@ struct ieee80211_mu_group_data { * This structure keeps information about a BSS (and an association * to that BSS) that can change during the lifetime of the BSS. * + * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE + * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE + * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK + * @uora_exists: is the UORA element advertised by AP + * @ack_enabled: indicates support to receive a multi-TID that solicits either + * ACK, BACK or both + * @uora_ocw_range: UORA element's OCW Range field + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @he_support: does this BSS support HE * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not @@ -550,6 +564,14 @@ struct ieee80211_mu_group_data { */ struct ieee80211_bss_conf { const u8 *bssid; + u8 bss_color; + u8 htc_trig_based_pkt_ext; + bool multi_sta_back_32bit; + bool uora_exists; + bool ack_enabled; + u8 uora_ocw_range; + u16 frame_time_rts_th; + bool he_support; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; @@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this * frame * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known + * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present + * (&struct ieee80211_radiotap_he, mac80211 will fill in + * - DATA3_DATA_MCS + * - DATA3_DATA_DCM + * - DATA3_CODING + * - DATA5_GI + * - DATA5_DATA_BW_RU_ALLOC + * - DATA6_NSTS + * - DATA3_STBC + * from the RX info data, so leave those zeroed when building this data) + * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present + * (&struct ieee80211_radiotap_he_mu) */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1134,6 +1168,8 @@ enum mac80211_rx_flags { RX_FLAG_ICV_STRIPPED = BIT(23), RX_FLAG_AMPDU_EOF_BIT = BIT(24), RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25), + RX_FLAG_RADIOTAP_HE = BIT(26), + RX_FLAG_RADIOTAP_HE_MU = BIT(27), }; /** @@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding { RX_ENC_LEGACY = 0, RX_ENC_HT, RX_ENC_VHT, + RX_ENC_HE, }; /** @@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding { * @encoding: &enum mac80211_rx_encoding * @bw: &enum rate_info_bw * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags + * @he_ru: HE RU, from &enum nl80211_he_ru_alloc + * @he_gi: HE GI, from &enum nl80211_he_gi + * @he_dcm: HE DCM value * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU @@ -1211,7 +1251,8 @@ struct ieee80211_rx_status { u32 flag; u16 freq; u8 enc_flags; - u8 encoding:2, bw:3; + u8 encoding:2, bw:3, he_ru:3; + u8 he_gi:2, he_dcm:1; u8 rate_idx; u8 nss; u8 rx_flags; @@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates { * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities + * @he_cap: HE capabilities of this STA * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU * that this station is allowed to transmit to us. * Can be modified by driver. @@ -1805,7 +1847,8 @@ struct ieee80211_sta { u16 aid; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; - u8 max_rx_aggregation_subframes; + struct ieee80211_sta_he_cap he_cap; + u16 max_rx_aggregation_subframes; bool wme; u8 uapsd_queues; u8 max_sp; @@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags { * it shouldn't be set. * * @max_tx_aggregation_subframes: maximum number of subframes in an - * aggregate an HT driver will transmit. Though ADDBA will advertise - * a constant value of 64 as some older APs can crash if the window - * size is smaller (an example is LinkSys WRT120N with FW v1.0.07 - * build 002 Jun 18 2012). + * aggregate an HT/HE device will transmit. In HT AddBA we'll + * advertise a constant value of 64 as some older APs crash if + * the window size is smaller (an example is LinkSys WRT120N + * with FW v1.0.07 build 002 Jun 18 2012). + * For AddBA to HE capable peers this value will be used. * * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list. @@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags { * the default is _GI | _BANDWIDTH. * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values. * + * @radiotap_he: HE radiotap validity flags + * * @radiotap_timestamp: Information for the radiotap timestamp field; if the * 'units_pos' member is set to a non-negative value it must be set to * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a @@ -2263,8 +2309,8 @@ struct ieee80211_hw { u8 max_rates; u8 max_report_rates; u8 max_rate_tries; - u8 max_rx_aggregation_subframes; - u8 max_tx_aggregation_subframes; + u16 max_rx_aggregation_subframes; + u16 max_tx_aggregation_subframes; u8 max_tx_fragments; u8 offchannel_tx_hw_queue; u8 radiotap_mcs_details; @@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params { struct ieee80211_sta *sta; u16 tid; u16 ssn; - u8 buf_size; + u16 buf_size; bool amsdu; u16 timeout; }; diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index ba9fa4592f2b..0e355f4a3d76 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -4,7 +4,7 @@ #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> #include <linux/rcupdate.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <net/dst.h> diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index e811ac07ea94..0d3920896d50 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb, int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset, unsigned int logflags); -void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk); +void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, + struct sock *sk); void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index 24c78183a4c2..16a842456189 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h @@ -9,12 +9,7 @@ struct net; static inline u32 net_hash_mix(const struct net *net) { #ifdef CONFIG_NET_NS - /* - * shift this right to eliminate bits, that are - * always zeroed - */ - - return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT); + return (u32)(((unsigned long)net) >> ilog2(sizeof(*net))); #else return 0; #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 20b059574e60..e4252a176eec 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -13,6 +13,7 @@ struct tcf_walker { int stop; int skip; int count; + unsigned long cookie; int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); }; @@ -73,11 +74,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb); unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv); + void *cb_priv, + struct netlink_ext_ack *extack); int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv); -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb); + void *cb_priv, struct netlink_ext_ack *extack); +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb); void tcf_block_cb_unregister(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); @@ -166,7 +169,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) static inline struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, + struct netlink_ext_ack *extack) { return NULL; } @@ -174,13 +178,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, static inline int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, struct netlink_ext_ack *extack) { return 0; } static inline -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb) { } @@ -601,6 +606,7 @@ struct tc_block_offload { enum tc_block_command command; enum tcf_block_binder_type binder_type; struct tcf_block *block; + struct netlink_ext_ack *extack; }; struct tc_cls_common_offload { @@ -776,6 +782,7 @@ struct tc_mqprio_qopt_offload { struct tc_cookie { u8 *data; u32 len; + struct rcu_head rcu; }; struct tc_qopt_offload_stats { diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 815b92a23936..7dc769e5452b 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -72,6 +72,8 @@ struct qdisc_watchdog { struct Qdisc *qdisc; }; +void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, + clockid_t clockid); void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); @@ -153,4 +155,9 @@ struct tc_cbs_qopt_offload { s32 sendslope; }; +struct tc_etf_qopt_offload { + u8 enable; + s32 queue; +}; + #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6488daa32f82..7432100027b7 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -20,6 +20,9 @@ struct qdisc_walker; struct tcf_walker; struct module; +typedef int tc_setup_cb_t(enum tc_setup_type type, + void *type_data, void *cb_priv); + struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; @@ -256,6 +259,9 @@ struct tcf_proto_ops { bool *last, struct netlink_ext_ack *); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); + int (*reoffload)(struct tcf_proto *tp, bool add, + tc_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack); void (*bind_class)(void *, u32, unsigned long); /* rtnetlink specific */ @@ -330,6 +336,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) block->offloadcnt--; } +static inline void +tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt, + u32 *flags, bool add) +{ + if (add) { + if (!*cnt) + tcf_block_offload_inc(block, flags); + (*cnt)++; + } else { + (*cnt)--; + if (!*cnt) + tcf_block_offload_dec(block, flags); + } +} + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index dbe1b911a24d..ab869e0d8326 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -48,7 +48,7 @@ #define __sctp_structs_h__ #include <linux/ktime.h> -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> #include <linux/socket.h> /* linux/in.h needs this!! */ #include <linux/in.h> /* We get struct sockaddr_in. */ #include <linux/in6.h> /* We get struct in6_addr */ @@ -193,6 +193,9 @@ struct sctp_sock { /* This is the max_retrans value for new associations. */ __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + /* The initial Path MTU to use for new associations. */ __u32 pathmtu; @@ -220,6 +223,7 @@ struct sctp_sock { __u32 adaptation_ind; __u32 pd_point; __u16 nodelay:1, + reuse:1, disable_fragments:1, v4mapped:1, frag_interleave:1, @@ -894,6 +898,9 @@ struct sctp_transport { */ __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + /* This is the partially failed retrans value for the transport * and will be initialized from the assocs value. This can be changed * using the SCTP_PEER_ADDR_THLDS socket option @@ -1771,6 +1778,9 @@ struct sctp_association { */ __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + /* Flag that path mtu update is pending */ __u8 pmtu_pending; diff --git a/include/net/seg6.h b/include/net/seg6.h index e029e301faa5..2567941a2f32 100644 --- a/include/net/seg6.h +++ b/include/net/seg6.h @@ -18,7 +18,7 @@ #include <linux/ipv6.h> #include <net/lwtunnel.h> #include <linux/seg6.h> -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> static inline void update_csum_diff4(struct sk_buff *skb, __be32 from, __be32 to) diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h index 69c3a106056b..7fda469e2758 100644 --- a/include/net/seg6_hmac.h +++ b/include/net/seg6_hmac.h @@ -22,7 +22,7 @@ #include <linux/route.h> #include <net/seg6.h> #include <linux/seg6_hmac.h> -#include <linux/rhashtable.h> +#include <linux/rhashtable-types.h> #define SEG6_HMAC_MAX_DIGESTSIZE 160 #define SEG6_HMAC_RING_SIZE 256 diff --git a/include/net/smc.h b/include/net/smc.h index 8381d163fefa..9ef49f8b1002 100644 --- a/include/net/smc.h +++ b/include/net/smc.h @@ -11,6 +11,8 @@ #ifndef _SMC_H #define _SMC_H +#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */ + struct smc_hashinfo { rwlock_t lock; struct hlist_head ht; @@ -18,4 +20,67 @@ struct smc_hashinfo { int smc_hash_sk(struct sock *sk); void smc_unhash_sk(struct sock *sk); + +/* SMCD/ISM device driver interface */ +struct smcd_dmb { + u64 dmb_tok; + u64 rgid; + u32 dmb_len; + u32 sba_idx; + u32 vlan_valid; + u32 vlan_id; + void *cpu_addr; + dma_addr_t dma_addr; +}; + +#define ISM_EVENT_DMB 0 +#define ISM_EVENT_GID 1 +#define ISM_EVENT_SWR 2 + +struct smcd_event { + u32 type; + u32 code; + u64 tok; + u64 time; + u64 info; +}; + +struct smcd_dev; + +struct smcd_ops { + int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid, + u32 vid); + int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb); + int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb); + int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id); + int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id); + int (*set_vlan_required)(struct smcd_dev *dev); + int (*reset_vlan_required)(struct smcd_dev *dev); + int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq, + u32 event_code, u64 info); + int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx, + bool sf, unsigned int offset, void *data, + unsigned int size); +}; + +struct smcd_dev { + const struct smcd_ops *ops; + struct device dev; + void *priv; + u64 local_gid; + struct list_head list; + spinlock_t lock; + struct smc_connection **conn; + struct list_head vlan; + struct workqueue_struct *event_wq; + u8 pnetid[SMC_MAX_PNETID_LEN]; +}; + +struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, + const struct smcd_ops *ops, int max_dmbs); +int smcd_register_dev(struct smcd_dev *smcd); +void smcd_unregister_dev(struct smcd_dev *smcd); +void smcd_free_dev(struct smcd_dev *smcd); +void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event); +void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit); #endif /* _SMC_H */ diff --git a/include/net/sock.h b/include/net/sock.h index b3b75419eafe..83b747538bd0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair; * @skc_node: main hash linkage for various protocol lookup tables * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol * @skc_tx_queue_mapping: tx queue number for this connection + * @skc_rx_queue_mapping: rx queue number for this connection * @skc_flags: place holder for sk_flags * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings @@ -214,7 +215,10 @@ struct sock_common { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; }; - int skc_tx_queue_mapping; + unsigned short skc_tx_queue_mapping; +#ifdef CONFIG_XPS + unsigned short skc_rx_queue_mapping; +#endif union { int skc_incoming_cpu; u32 skc_rcv_wnd; @@ -315,6 +319,9 @@ struct sock_common { * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 * @sk_reuseport_cb: reuseport group container * @sk_rcu: used during RCU grace period + * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME) + * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME + * @sk_txtime_unused: unused txtime flags */ struct sock { /* @@ -326,6 +333,9 @@ struct sock { #define sk_nulls_node __sk_common.skc_nulls_node #define sk_refcnt __sk_common.skc_refcnt #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping +#ifdef CONFIG_XPS +#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping +#endif #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin #define sk_dontcopy_end __sk_common.skc_dontcopy_end @@ -468,6 +478,12 @@ struct sock { u8 sk_shutdown; u32 sk_tskey; atomic_t sk_zckey; + + u8 sk_clockid; + u8 sk_txtime_deadline_mode : 1, + sk_txtime_report_errors : 1, + sk_txtime_unused : 6; + struct socket *sk_socket; void *sk_user_data; #ifdef CONFIG_SECURITY @@ -783,6 +799,7 @@ enum sock_flags { SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ + SOCK_TXTIME, }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) @@ -1578,10 +1595,17 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size); void sk_send_sigurg(struct sock *sk); struct sockcm_cookie { + u64 transmit_time; u32 mark; u16 tsflags; }; +static inline void sockcm_init(struct sockcm_cookie *sockc, + const struct sock *sk) +{ + *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags }; +} + int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, struct sockcm_cookie *sockc); int sock_cmsg_send(struct sock *sk, struct msghdr *msg, @@ -1681,19 +1705,58 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) { + /* sk_tx_queue_mapping accept only upto a 16-bit value */ + if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX)) + return; sk->sk_tx_queue_mapping = tx_queue; } +#define NO_QUEUE_MAPPING USHRT_MAX + static inline void sk_tx_queue_clear(struct sock *sk) { - sk->sk_tx_queue_mapping = -1; + sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING; } static inline int sk_tx_queue_get(const struct sock *sk) { - return sk ? sk->sk_tx_queue_mapping : -1; + if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING) + return sk->sk_tx_queue_mapping; + + return -1; +} + +static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + if (skb_rx_queue_recorded(skb)) { + u16 rx_queue = skb_get_rx_queue(skb); + + if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING)) + return; + + sk->sk_rx_queue_mapping = rx_queue; + } +#endif } +static inline void sk_rx_queue_clear(struct sock *sk) +{ +#ifdef CONFIG_XPS + sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; +#endif +} + +#ifdef CONFIG_XPS +static inline int sk_rx_queue_get(const struct sock *sk) +{ + if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) + return sk->sk_rx_queue_mapping; + + return -1; +} +#endif + static inline void sk_set_socket(struct sock *sk, struct socket *sock) { sk_tx_queue_clear(sk); diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index 227a6f1d02f4..fac3ad4a86de 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -17,6 +17,7 @@ struct tcf_pedit { struct tc_pedit_key *tcfp_keys; struct tcf_pedit_key_ex *tcfp_keys_ex; }; + #define to_pedit(a) ((struct tcf_pedit *)a) static inline bool is_tcf_pedit(const struct tc_action *a) diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index 19cd3d345804..911bbac838a2 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -22,14 +22,19 @@ #include <net/act_api.h> #include <linux/tc_act/tc_skbedit.h> +struct tcf_skbedit_params { + u32 flags; + u32 priority; + u32 mark; + u32 mask; + u16 queue_mapping; + u16 ptype; + struct rcu_head rcu; +}; + struct tcf_skbedit { - struct tc_action common; - u32 flags; - u32 priority; - u32 mark; - u32 mask; - u16 queue_mapping; - u16 ptype; + struct tc_action common; + struct tcf_skbedit_params __rcu *params; }; #define to_skbedit(a) ((struct tcf_skbedit *)a) @@ -37,15 +42,27 @@ struct tcf_skbedit { static inline bool is_tcf_skbedit_mark(const struct tc_action *a) { #ifdef CONFIG_NET_CLS_ACT - if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) - return to_skbedit(a)->flags == SKBEDIT_F_MARK; + u32 flags; + + if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) { + rcu_read_lock(); + flags = rcu_dereference(to_skbedit(a)->params)->flags; + rcu_read_unlock(); + return flags == SKBEDIT_F_MARK; + } #endif return false; } static inline u32 tcf_skbedit_mark(const struct tc_action *a) { - return to_skbedit(a)->mark; + u32 mark; + + rcu_read_lock(); + mark = rcu_dereference(to_skbedit(a)->params)->mark; + rcu_read_unlock(); + + return mark; } #endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 3482d13d655b..f6e0a9b1dff3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -472,19 +472,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); */ static inline void tcp_synq_overflow(const struct sock *sk) { - unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; - unsigned long now = jiffies; + unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int now = jiffies; - if (time_after(now, last_overflow + HZ)) + if (time_after32(now, last_overflow + HZ)) tcp_sk(sk)->rx_opt.ts_recent_stamp = now; } /* syncookies: no recent synqueue overflow on this listening socket? */ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) { - unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int now = jiffies; - return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); + return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); } static inline u32 tcp_cookie_time(void) @@ -956,6 +957,8 @@ struct rate_sample { u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ s32 delivered; /* number of packets delivered over interval */ long interval_us; /* time for tp->delivered to incr "delivered" */ + u32 snd_interval_us; /* snd interval for delivered packets */ + u32 rcv_interval_us; /* rcv interval for delivered packets */ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ int losses; /* number of packets marked lost upon ACK */ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ @@ -1187,6 +1190,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk) return tp->is_cwnd_limited; } +/* BBR congestion control needs pacing. + * Same remark for SO_MAX_PACING_RATE. + * sch_fq packet scheduler is efficiently handling pacing, + * but is not always installed/used. + * Return true if TCP stack should pace packets itself. + */ +static inline bool tcp_needs_internal_pacing(const struct sock *sk) +{ + return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; +} + /* Something is really bad, we could not queue an additional packet, * because qdisc is full or receiver sent a 0 window. * We do not want to add fuel to the fire, or abort too early, @@ -1364,7 +1378,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, { if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) return true; - if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) + if (unlikely(!time_before32(ktime_get_seconds(), + rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))) return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, @@ -1394,7 +1409,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, However, we can relax time bounds for RST segments to MSL. */ - if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) + if (rst && !time_before32(ktime_get_seconds(), + rx_opt->ts_recent_stamp + TCP_PAWS_MSL)) return false; return true; } @@ -1780,7 +1796,7 @@ void tcp_v4_destroy_sock(struct sock *sk); struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features); -struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); int tcp_gro_complete(struct sk_buff *skb); void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); diff --git a/include/net/tls.h b/include/net/tls.h index 70c273777fe9..d8b3b6578c01 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -83,6 +83,16 @@ struct tls_device { void (*unhash)(struct tls_device *device, struct sock *sk); }; +enum { + TLS_BASE, + TLS_SW, +#ifdef CONFIG_TLS_DEVICE + TLS_HW, +#endif + TLS_HW_RECORD, + TLS_NUM_CONFIG, +}; + struct tls_sw_context_tx { struct crypto_aead *aead_send; struct crypto_wait async_wait; @@ -128,7 +138,7 @@ struct tls_record_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; -struct tls_offload_context { +struct tls_offload_context_tx { struct crypto_aead *aead_send; spinlock_t lock; /* protects records list */ struct list_head records_list; @@ -147,8 +157,8 @@ struct tls_offload_context { #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) }; -#define TLS_OFFLOAD_CONTEXT_SIZE \ - (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \ +#define TLS_OFFLOAD_CONTEXT_SIZE_TX \ + (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \ TLS_DRIVER_STATE_SIZE) enum { @@ -197,6 +207,7 @@ struct tls_context { int (*push_pending_record)(struct sock *sk, int flags); void (*sk_write_space)(struct sock *sk); + void (*sk_destruct)(struct sock *sk); void (*sk_proto_close)(struct sock *sk, long timeout); int (*setsockopt)(struct sock *sk, int level, @@ -209,13 +220,27 @@ struct tls_context { void (*unhash)(struct sock *sk); }; +struct tls_offload_context_rx { + /* sw must be the first member of tls_offload_context_rx */ + struct tls_sw_context_rx sw; + atomic64_t resync_req; + u8 driver_state[]; + /* The TLS layer reserves room for driver specific state + * Currently the belief is that there is not enough + * driver specific state to justify another layer of indirection + */ +}; + +#define TLS_OFFLOAD_CONTEXT_SIZE_RX \ + (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \ + TLS_DRIVER_STATE_SIZE) + int wait_on_pending_writer(struct sock *sk, long *timeo); int tls_sk_query(struct sock *sk, int optname, char __user *optval, int __user *optlen); int tls_sk_attach(struct sock *sk, int optname, char __user *optval, unsigned int optlen); - int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_sw_sendpage(struct sock *sk, struct page *page, @@ -223,6 +248,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, void tls_sw_close(struct sock *sk, long timeout); void tls_sw_free_resources_tx(struct sock *sk); void tls_sw_free_resources_rx(struct sock *sk); +void tls_sw_release_resources_rx(struct sock *sk); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); unsigned int tls_sw_poll(struct file *file, struct socket *sock, @@ -239,7 +265,7 @@ void tls_device_sk_destruct(struct sock *sk); void tls_device_init(void); void tls_device_cleanup(void); -struct tls_record_info *tls_get_record(struct tls_offload_context *context, +struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn); static inline bool tls_record_is_start_marker(struct tls_record_info *rec) @@ -289,11 +315,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) return tls_ctx->pending_open_record_frags; } +struct sk_buff * +tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, + struct sk_buff *skb); + static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) { - return sk_fullsock(sk) && - /* matches smp_store_release in tls_set_device_offload */ - smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct; +#ifdef CONFIG_SOCK_VALIDATE_XMIT + return sk_fullsock(sk) & + (smp_load_acquire(&sk->sk_validate_xmit_skb) == + &tls_validate_xmit_skb); +#else + return false; +#endif } static inline void tls_err_abort(struct sock *sk, int err) @@ -380,23 +414,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx( return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; } -static inline struct tls_offload_context *tls_offload_ctx( - const struct tls_context *tls_ctx) +static inline struct tls_offload_context_tx * +tls_offload_ctx_tx(const struct tls_context *tls_ctx) { - return (struct tls_offload_context *)tls_ctx->priv_ctx_tx; + return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; } +static inline struct tls_offload_context_rx * +tls_offload_ctx_rx(const struct tls_context *tls_ctx) +{ + return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; +} + +/* The TLS context is valid until sk_destruct is called */ +static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1)); +} + + int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type); void tls_register_device(struct tls_device *device); void tls_unregister_device(struct tls_device *device); +int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); +int decrypt_skb(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout); struct sk_buff *tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb); int tls_sw_fallback_init(struct sock *sk, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info); +int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); + +void tls_device_offload_cleanup_rx(struct sock *sk); +void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn); + #endif /* _TLS_OFFLOAD_H */ diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index f6a3543e5247..a8f6020f1196 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, - struct flowi6 *fl6, struct ipcm6_cookie *ipc6, - struct sockcm_cookie *sockc); + struct flowi6 *fl6, struct ipcm6_cookie *ipc6); void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, __u16 destp, int rqueue, int bucket); diff --git a/include/net/udp.h b/include/net/udp.h index 81afdacd4fff..8482a990b0bb 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, __be16 dport); -struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, - struct udphdr *uh, udp_lookup_t lookup); +struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, + struct udphdr *uh, udp_lookup_t lookup); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index b95a6927c718..fe680ab6b15a 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net, typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); -typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb); +typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, + struct list_head *head, + struct sk_buff *skb); typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, int nhoff); diff --git a/include/net/xdp.h b/include/net/xdp.h index 2deea7166a34..fcb033f51d8c 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -144,4 +144,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp) return unlikely(xdp->data_meta > xdp->data); } +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct netdev_bpf; +int xdp_attachment_query(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); +bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); +void xdp_attachment_setup(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); + #endif /* __LINUX_NET_XDP_H__ */ diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 9c886739246a..00aa72ce0e7c 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -223,6 +223,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry, TP_ARGS(skb) ); +DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, TP_PROTO(const struct sk_buff *skb), diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index 3176a3931107..a0c4b8a30966 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -35,6 +35,10 @@ EM(TCP_CLOSING) \ EMe(TCP_NEW_SYN_RECV) +#define skmem_kind_names \ + EM(SK_MEM_SEND) \ + EMe(SK_MEM_RECV) + /* enums need to be exported to user space */ #undef EM #undef EMe @@ -44,6 +48,7 @@ family_names inet_protocol_names tcp_state_names +skmem_kind_names #undef EM #undef EMe @@ -59,6 +64,9 @@ tcp_state_names #define show_tcp_state_name(val) \ __print_symbolic(val, tcp_state_names) +#define show_skmem_kind_names(val) \ + __print_symbolic(val, skmem_kind_names) + TRACE_EVENT(sock_rcvqueue_full, TP_PROTO(struct sock *sk, struct sk_buff *skb), @@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full, TRACE_EVENT(sock_exceed_buf_limit, - TP_PROTO(struct sock *sk, struct proto *prot, long allocated), + TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind), - TP_ARGS(sk, prot, allocated), + TP_ARGS(sk, prot, allocated, kind), TP_STRUCT__entry( __array(char, name, 32) @@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit, __field(long, allocated) __field(int, sysctl_rmem) __field(int, rmem_alloc) + __field(int, sysctl_wmem) + __field(int, wmem_alloc) + __field(int, wmem_queued) + __field(int, kind) ), TP_fast_assign( @@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit, __entry->allocated = allocated; __entry->sysctl_rmem = sk_get_rmem0(sk, prot); __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); + __entry->sysctl_wmem = sk_get_wmem0(sk, prot); + __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc); + __entry->wmem_queued = sk->sk_wmem_queued; + __entry->kind = kind; ), - TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld " - "sysctl_rmem=%d rmem_alloc=%d", + TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s", __entry->name, __entry->sysctl_mem[0], __entry->sysctl_mem[1], __entry->sysctl_mem[2], __entry->allocated, __entry->sysctl_rmem, - __entry->rmem_alloc) + __entry->rmem_alloc, + __entry->sysctl_wmem, + __entry->wmem_alloc, + __entry->wmem_queued, + show_skmem_kind_names(__entry->kind) + ) ); TRACE_EVENT(inet_sock_set_state, diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 0ae758c90e54..a12692e5f7a8 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -107,4 +107,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b7db3261c62d..870113916cac 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1826,7 +1826,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) + * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -1877,7 +1877,7 @@ union bpf_attr { * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the - * * packet is not forwarded or needs assist from full stack + * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description @@ -2033,7 +2033,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * @@ -2053,7 +2052,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * @@ -2557,6 +2555,9 @@ enum { * Arg1: old_state * Arg2: new_state */ + BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after + * socket transition to LISTEN state. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 75cb5450c851..79407bbd296d 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -78,6 +78,17 @@ enum devlink_command { */ DEVLINK_CMD_RELOAD, + DEVLINK_CMD_PARAM_GET, /* can dump */ + DEVLINK_CMD_PARAM_SET, + DEVLINK_CMD_PARAM_NEW, + DEVLINK_CMD_PARAM_DEL, + + DEVLINK_CMD_REGION_GET, + DEVLINK_CMD_REGION_SET, + DEVLINK_CMD_REGION_NEW, + DEVLINK_CMD_REGION_DEL, + DEVLINK_CMD_REGION_READ, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -142,6 +153,16 @@ enum devlink_port_flavour { */ }; +enum devlink_param_cmode { + DEVLINK_PARAM_CMODE_RUNTIME, + DEVLINK_PARAM_CMODE_DRIVERINIT, + DEVLINK_PARAM_CMODE_PERMANENT, + + /* Add new configuration modes above */ + __DEVLINK_PARAM_CMODE_MAX, + DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1 +}; + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, @@ -238,6 +259,27 @@ enum devlink_attr { DEVLINK_ATTR_PORT_NUMBER, /* u32 */ DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */ + DEVLINK_ATTR_PARAM, /* nested */ + DEVLINK_ATTR_PARAM_NAME, /* string */ + DEVLINK_ATTR_PARAM_GENERIC, /* flag */ + DEVLINK_ATTR_PARAM_TYPE, /* u8 */ + DEVLINK_ATTR_PARAM_VALUES_LIST, /* nested */ + DEVLINK_ATTR_PARAM_VALUE, /* nested */ + DEVLINK_ATTR_PARAM_VALUE_DATA, /* dynamic */ + DEVLINK_ATTR_PARAM_VALUE_CMODE, /* u8 */ + + DEVLINK_ATTR_REGION_NAME, /* string */ + DEVLINK_ATTR_REGION_SIZE, /* u64 */ + DEVLINK_ATTR_REGION_SNAPSHOTS, /* nested */ + DEVLINK_ATTR_REGION_SNAPSHOT, /* nested */ + DEVLINK_ATTR_REGION_SNAPSHOT_ID, /* u32 */ + + DEVLINK_ATTR_REGION_CHUNKS, /* nested */ + DEVLINK_ATTR_REGION_CHUNK, /* nested */ + DEVLINK_ATTR_REGION_CHUNK_DATA, /* binary */ + DEVLINK_ATTR_REGION_CHUNK_ADDR, /* u64 */ + DEVLINK_ATTR_REGION_CHUNK_LEN, /* u64 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h index dc64cfaf13da..c0151200f7d1 100644 --- a/include/uapi/linux/errqueue.h +++ b/include/uapi/linux/errqueue.h @@ -20,12 +20,16 @@ struct sock_extended_err { #define SO_EE_ORIGIN_ICMP6 3 #define SO_EE_ORIGIN_TXSTATUS 4 #define SO_EE_ORIGIN_ZEROCOPY 5 +#define SO_EE_ORIGIN_TXTIME 6 #define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) #define SO_EE_CODE_ZEROCOPY_COPIED 1 +#define SO_EE_CODE_TXTIME_INVALID_PARAM 1 +#define SO_EE_CODE_TXTIME_MISSED 2 + /** * struct scm_timestamping - timestamps exposed through cmsg * diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cf01b6824244..8759cfb8aa2e 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -920,6 +920,7 @@ enum { XDP_ATTACHED_DRV, XDP_ATTACHED_SKB, XDP_ATTACHED_HW, + XDP_ATTACHED_MULTI, }; enum { @@ -928,6 +929,9 @@ enum { IFLA_XDP_ATTACHED, IFLA_XDP_FLAGS, IFLA_XDP_PROG_ID, + IFLA_XDP_DRV_PROG_ID, + IFLA_XDP_SKB_PROG_ID, + IFLA_XDP_HW_PROG_ID, __IFLA_XDP_MAX, }; diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h index 483b77af4eb8..db45d3e49a12 100644 --- a/include/uapi/linux/ila.h +++ b/include/uapi/linux/ila.h @@ -30,6 +30,7 @@ enum { ILA_CMD_ADD, ILA_CMD_DEL, ILA_CMD_GET, + ILA_CMD_FLUSH, __ILA_CMD_MAX, }; diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h index 10f9ff9426a2..5d37a9ccce63 100644 --- a/include/uapi/linux/mroute.h +++ b/include/uapi/linux/mroute.h @@ -120,6 +120,7 @@ enum { IPMRA_TABLE_MROUTE_DO_ASSERT, IPMRA_TABLE_MROUTE_DO_PIM, IPMRA_TABLE_VIFS, + IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE, __IPMRA_TABLE_MAX }; #define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1) @@ -173,5 +174,6 @@ enum { #define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */ #define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */ #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */ +#define IGMPMSG_WRVIFWHOLE 4 /* For PIM Register and assert processing */ #endif /* _UAPI__LINUX_MROUTE_H */ diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 4fe104b2411f..97ff3c17ec4d 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -141,4 +141,22 @@ struct scm_ts_pktinfo { __u32 reserved[2]; }; +/* + * SO_TXTIME gets a struct sock_txtime with flags being an integer bit + * field comprised of these values. + */ +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = (1 << 0), + SOF_TXTIME_REPORT_ERRORS = (1 << 1), + + SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS, + SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) | + SOF_TXTIME_FLAGS_LAST +}; + +struct sock_txtime { + clockid_t clockid; /* reference clockid */ + __u32 flags; /* as defined by enum txtime_flags */ +}; + #endif /* _NET_TIMESTAMPING_H */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 27e4e441caac..7acc16f34942 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2237,6 +2237,9 @@ enum nl80211_commands { * enforced. * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes * a flow is assigned on each round of the DRR scheduler. + * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION). Can be set + * only if %NL80211_STA_FLAG_WME is set. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2677,6 +2680,8 @@ enum nl80211_attrs { NL80211_ATTR_TXQ_MEMORY_LIMIT, NL80211_ATTR_TXQ_QUANTUM, + NL80211_ATTR_HE_CAPABILITY, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -2726,7 +2731,8 @@ enum nl80211_attrs { #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 #define NL80211_HT_CAPABILITY_LEN 26 #define NL80211_VHT_CAPABILITY_LEN 12 - +#define NL80211_HE_MIN_CAPABILITY_LEN 16 +#define NL80211_HE_MAX_CAPABILITY_LEN 51 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 @@ -2854,6 +2860,38 @@ struct nl80211_sta_flag_update { } __attribute__((packed)); /** + * enum nl80211_he_gi - HE guard interval + * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec + * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec + * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec + */ +enum nl80211_he_gi { + NL80211_RATE_INFO_HE_GI_0_8, + NL80211_RATE_INFO_HE_GI_1_6, + NL80211_RATE_INFO_HE_GI_3_2, +}; + +/** + * enum nl80211_he_ru_alloc - HE RU allocation values + * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation + */ +enum nl80211_he_ru_alloc { + NL80211_RATE_INFO_HE_RU_ALLOC_26, + NL80211_RATE_INFO_HE_RU_ALLOC_52, + NL80211_RATE_INFO_HE_RU_ALLOC_106, + NL80211_RATE_INFO_HE_RU_ALLOC_242, + NL80211_RATE_INFO_HE_RU_ALLOC_484, + NL80211_RATE_INFO_HE_RU_ALLOC_996, + NL80211_RATE_INFO_HE_RU_ALLOC_2x996, +}; + +/** * enum nl80211_rate_info - bitrate information * * These attribute types are used with %NL80211_STA_INFO_TXRATE @@ -2885,6 +2923,13 @@ struct nl80211_sta_flag_update { * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is * a legacy rate and will be reported as the actual bitrate, i.e. * a quarter of the base (20 MHz) rate + * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11) + * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8) + * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier + * (u8, see &enum nl80211_he_gi) + * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1) + * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then + * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc) * @__NL80211_RATE_INFO_AFTER_LAST: internal use */ enum nl80211_rate_info { @@ -2901,6 +2946,11 @@ enum nl80211_rate_info { NL80211_RATE_INFO_160_MHZ_WIDTH, NL80211_RATE_INFO_10_MHZ_WIDTH, NL80211_RATE_INFO_5_MHZ_WIDTH, + NL80211_RATE_INFO_HE_MCS, + NL80211_RATE_INFO_HE_NSS, + NL80211_RATE_INFO_HE_GI, + NL80211_RATE_INFO_HE_DCM, + NL80211_RATE_INFO_HE_RU_ALLOC, /* keep last */ __NL80211_RATE_INFO_AFTER_LAST, @@ -3167,6 +3217,38 @@ enum nl80211_mpath_info { }; /** + * enum nl80211_band_iftype_attr - Interface type data attributes + * + * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute + * for each interface type that supports the band data + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE + * capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE + * capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE + * capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as + * defined in HE capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently + * defined + * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use + */ +enum nl80211_band_iftype_attr { + __NL80211_BAND_IFTYPE_ATTR_INVALID, + + NL80211_BAND_IFTYPE_ATTR_IFTYPES, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, + + /* keep last */ + __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, + NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1 +}; + +/** * enum nl80211_band_attr - band attributes * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved * @NL80211_BAND_ATTR_FREQS: supported frequencies in this band, @@ -3181,6 +3263,8 @@ enum nl80211_mpath_info { * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as * defined in 802.11ac * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE + * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using + * attributes from &enum nl80211_band_iftype_attr * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined * @__NL80211_BAND_ATTR_AFTER_LAST: internal use */ @@ -3196,6 +3280,7 @@ enum nl80211_band_attr { NL80211_BAND_ATTR_VHT_MCS_SET, NL80211_BAND_ATTR_VHT_CAPA, + NL80211_BAND_ATTR_IFTYPE_DATA, /* keep last */ __NL80211_BAND_ATTR_AFTER_LAST, @@ -5133,6 +5218,11 @@ enum nl80211_feature_flags { * support to nl80211. * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate * TXQs. + * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the + * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN. + * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data + * except for supported rates from the probe request content if requested + * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -5167,6 +5257,8 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211, NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT, NL80211_EXT_FEATURE_TXQS, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -5272,6 +5364,12 @@ enum nl80211_timeout_reason { * possible scan results. This flag hints the driver to use the best * possible scan configuration to improve the accuracy in scanning. * Latency and power use may get impacted with this flag. + * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe + * request frames from this scan to avoid correlation/tracking being + * possible. + * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to + * only have supported rates and no additional capabilities (unless + * added by userspace explicitly.) */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, @@ -5285,6 +5383,8 @@ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_SPAN = 1<<8, NL80211_SCAN_FLAG_LOW_POWER = 1<<9, NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10, + NL80211_SCAN_FLAG_RANDOM_SN = 1<<11, + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12, }; /** diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 863aabaa5cc9..dbe0cbe4f1b7 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -840,6 +840,8 @@ struct ovs_action_push_eth { * @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet. * @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the * packet, or modify the packet (e.g., change the DSCP field). + * @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of + * actions without affecting the original packet and key. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment @@ -873,6 +875,7 @@ enum ovs_action_attr { OVS_ACTION_ATTR_PUSH_NSH, /* Nested OVS_NSH_KEY_ATTR_*. */ OVS_ACTION_ATTR_POP_NSH, /* No argument. */ OVS_ACTION_ATTR_METER, /* u32 meter ID. */ + OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 84e4c1d0f874..b4512254036b 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -469,6 +469,15 @@ enum { TCA_FLOWER_KEY_IP_TTL, /* u8 */ TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */ + TCA_FLOWER_KEY_CVLAN_ID, /* be16 */ + TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */ + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */ + + TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */ + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 37b5096ae97b..d9cc9dc4f547 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -539,6 +539,7 @@ enum { TCA_NETEM_LATENCY64, TCA_NETEM_JITTER64, TCA_NETEM_SLOT, + TCA_NETEM_SLOT_DIST, __TCA_NETEM_MAX, }; @@ -581,6 +582,8 @@ struct tc_netem_slot { __s64 max_delay; __s32 max_packets; __s32 max_bytes; + __s64 dist_delay; /* nsec */ + __s64 dist_jitter; /* nsec */ }; enum { @@ -934,4 +937,136 @@ enum { #define TCA_CBS_MAX (__TCA_CBS_MAX - 1) + +/* ETF */ +struct tc_etf_qopt { + __s32 delta; + __s32 clockid; + __u32 flags; +#define TC_ETF_DEADLINE_MODE_ON BIT(0) +#define TC_ETF_OFFLOAD_ON BIT(1) +}; + +enum { + TCA_ETF_UNSPEC, + TCA_ETF_PARMS, + __TCA_ETF_MAX, +}; + +#define TCA_ETF_MAX (__TCA_ETF_MAX - 1) + + +/* CAKE */ +enum { + TCA_CAKE_UNSPEC, + TCA_CAKE_PAD, + TCA_CAKE_BASE_RATE64, + TCA_CAKE_DIFFSERV_MODE, + TCA_CAKE_ATM, + TCA_CAKE_FLOW_MODE, + TCA_CAKE_OVERHEAD, + TCA_CAKE_RTT, + TCA_CAKE_TARGET, + TCA_CAKE_AUTORATE, + TCA_CAKE_MEMORY, + TCA_CAKE_NAT, + TCA_CAKE_RAW, + TCA_CAKE_WASH, + TCA_CAKE_MPU, + TCA_CAKE_INGRESS, + TCA_CAKE_ACK_FILTER, + TCA_CAKE_SPLIT_GSO, + __TCA_CAKE_MAX +}; +#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) + +enum { + __TCA_CAKE_STATS_INVALID, + TCA_CAKE_STATS_PAD, + TCA_CAKE_STATS_CAPACITY_ESTIMATE64, + TCA_CAKE_STATS_MEMORY_LIMIT, + TCA_CAKE_STATS_MEMORY_USED, + TCA_CAKE_STATS_AVG_NETOFF, + TCA_CAKE_STATS_MIN_NETLEN, + TCA_CAKE_STATS_MAX_NETLEN, + TCA_CAKE_STATS_MIN_ADJLEN, + TCA_CAKE_STATS_MAX_ADJLEN, + TCA_CAKE_STATS_TIN_STATS, + TCA_CAKE_STATS_DEFICIT, + TCA_CAKE_STATS_COBALT_COUNT, + TCA_CAKE_STATS_DROPPING, + TCA_CAKE_STATS_DROP_NEXT_US, + TCA_CAKE_STATS_P_DROP, + TCA_CAKE_STATS_BLUE_TIMER_US, + __TCA_CAKE_STATS_MAX +}; +#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) + +enum { + __TCA_CAKE_TIN_STATS_INVALID, + TCA_CAKE_TIN_STATS_PAD, + TCA_CAKE_TIN_STATS_SENT_PACKETS, + TCA_CAKE_TIN_STATS_SENT_BYTES64, + TCA_CAKE_TIN_STATS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, + TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, + TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, + TCA_CAKE_TIN_STATS_BACKLOG_BYTES, + TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, + TCA_CAKE_TIN_STATS_TARGET_US, + TCA_CAKE_TIN_STATS_INTERVAL_US, + TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, + TCA_CAKE_TIN_STATS_WAY_MISSES, + TCA_CAKE_TIN_STATS_WAY_COLLISIONS, + TCA_CAKE_TIN_STATS_PEAK_DELAY_US, + TCA_CAKE_TIN_STATS_AVG_DELAY_US, + TCA_CAKE_TIN_STATS_BASE_DELAY_US, + TCA_CAKE_TIN_STATS_SPARSE_FLOWS, + TCA_CAKE_TIN_STATS_BULK_FLOWS, + TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, + TCA_CAKE_TIN_STATS_MAX_SKBLEN, + TCA_CAKE_TIN_STATS_FLOW_QUANTUM, + __TCA_CAKE_TIN_STATS_MAX +}; +#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) +#define TC_CAKE_MAX_TINS (8) + +enum { + CAKE_FLOW_NONE = 0, + CAKE_FLOW_SRC_IP, + CAKE_FLOW_DST_IP, + CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ + CAKE_FLOW_FLOWS, + CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ + CAKE_FLOW_MAX, +}; + +enum { + CAKE_DIFFSERV_DIFFSERV3 = 0, + CAKE_DIFFSERV_DIFFSERV4, + CAKE_DIFFSERV_DIFFSERV8, + CAKE_DIFFSERV_BESTEFFORT, + CAKE_DIFFSERV_PRECEDENCE, + CAKE_DIFFSERV_MAX +}; + +enum { + CAKE_ACK_NONE = 0, + CAKE_ACK_FILTER, + CAKE_ACK_AGGRESSIVE, + CAKE_ACK_MAX +}; + +enum { + CAKE_ATM_NONE = 0, + CAKE_ATM_ATM, + CAKE_ATM_PTM, + CAKE_ATM_MAX +}; + #endif diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index b64d583bf053..b479db5c71d9 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -100,6 +100,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_RECVNXTINFO 33 #define SCTP_DEFAULT_SNDINFO 34 #define SCTP_AUTH_DEACTIVATE_KEY 35 +#define SCTP_REUSE_PORT 36 /* Internal Socket Options. Some of the sctp library functions are * implemented using these socket options. @@ -762,6 +763,8 @@ enum sctp_spp_flags { SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/ SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE, SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */ + SPP_IPV6_FLOWLABEL = 1<<8, + SPP_DSCP = 1<<9, }; struct sctp_paddrparams { @@ -772,6 +775,8 @@ struct sctp_paddrparams { __u32 spp_pathmtu; __u32 spp_sackdelay; __u32 spp_flags; + __u32 spp_ipv6_flowlabel; + __u8 spp_dscp; } __attribute__((packed, aligned(4))); /* diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index 0ae5d4685ba3..92be255e534c 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -35,6 +35,7 @@ enum { SMC_DIAG_CONNINFO, SMC_DIAG_LGRINFO, SMC_DIAG_SHUTDOWN, + SMC_DIAG_DMBINFO, __SMC_DIAG_MAX, }; @@ -83,4 +84,13 @@ struct smc_diag_lgrinfo { struct smc_diag_linkinfo lnk[1]; __u8 role; }; + +struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ + __u32 linkid; /* Link identifier */ + __u64 peer_gid; /* Peer GID */ + __u64 my_gid; /* My GID */ + __u64 token; /* Token of DMB */ + __u64 peer_token; /* Token of remote DMBE */ +}; + #endif /* _UAPI_SMC_DIAG_H_ */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 750d89120335..e5ebc83827ab 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -279,6 +279,8 @@ enum LINUX_MIB_TCPDELIVERED, /* TCPDelivered */ LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */ LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */ + LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */ + LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h index 162d1094c41c..24ec792dacc1 100644 --- a/include/uapi/linux/tc_act/tc_pedit.h +++ b/include/uapi/linux/tc_act/tc_pedit.h @@ -17,13 +17,15 @@ enum { TCA_PEDIT_KEY_EX, __TCA_PEDIT_MAX }; + #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1) - + enum { TCA_PEDIT_KEY_EX_HTYPE = 1, TCA_PEDIT_KEY_EX_CMD = 2, __TCA_PEDIT_KEY_EX_MAX }; + #define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1) /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It @@ -38,6 +40,7 @@ enum pedit_header_type { TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, __PEDIT_HDR_TYPE_MAX, }; + #define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1) enum pedit_cmd { @@ -45,6 +48,7 @@ enum pedit_cmd { TCA_PEDIT_KEY_EX_CMD_ADD = 1, __PEDIT_CMD_MAX, }; + #define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1) struct tc_pedit_key { @@ -55,13 +59,14 @@ struct tc_pedit_key { __u32 offmask; __u32 shift; }; - + struct tc_pedit_sel { tc_gen; unsigned char nkeys; unsigned char flags; struct tc_pedit_key keys[0]; }; + #define tc_pedit tc_pedit_sel #endif diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h index fbcfe27a4e6c..6de6071ebed6 100644 --- a/include/uapi/linux/tc_act/tc_skbedit.h +++ b/include/uapi/linux/tc_act/tc_skbedit.h @@ -30,6 +30,7 @@ #define SKBEDIT_F_MARK 0x4 #define SKBEDIT_F_PTYPE 0x8 #define SKBEDIT_F_MASK 0x10 +#define SKBEDIT_F_INHERITDSFIELD 0x20 struct tc_skbedit { tc_gen; @@ -45,6 +46,7 @@ enum { TCA_SKBEDIT_PAD, TCA_SKBEDIT_PTYPE, TCA_SKBEDIT_MASK, + TCA_SKBEDIT_FLAGS, __TCA_SKBEDIT_MAX }; #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h index 72bbefe5d1d1..be384d63e1b5 100644 --- a/include/uapi/linux/tc_act/tc_tunnel_key.h +++ b/include/uapi/linux/tc_act/tc_tunnel_key.h @@ -36,9 +36,37 @@ enum { TCA_TUNNEL_KEY_PAD, TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */ TCA_TUNNEL_KEY_NO_CSUM, /* u8 */ + TCA_TUNNEL_KEY_ENC_OPTS, /* Nested TCA_TUNNEL_KEY_ENC_OPTS_ + * attributes + */ + TCA_TUNNEL_KEY_ENC_TOS, /* u8 */ + TCA_TUNNEL_KEY_ENC_TTL, /* u8 */ __TCA_TUNNEL_KEY_MAX, }; #define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1) +enum { + TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC, + TCA_TUNNEL_KEY_ENC_OPTS_GENEVE, /* Nested + * TCA_TUNNEL_KEY_ENC_OPTS_ + * attributes + */ + __TCA_TUNNEL_KEY_ENC_OPTS_MAX, +}; + +#define TCA_TUNNEL_KEY_ENC_OPTS_MAX (__TCA_TUNNEL_KEY_ENC_OPTS_MAX - 1) + +enum { + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_UNSPEC, + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, /* be16 */ + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */ + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */ + + __TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX, +}; + +#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \ + (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1) + #endif diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h index 85c11982c89b..0ebe02ef1a86 100644 --- a/include/uapi/linux/tipc_netlink.h +++ b/include/uapi/linux/tipc_netlink.h @@ -121,6 +121,7 @@ enum { TIPC_NLA_SOCK_TIPC_STATE, /* u32 */ TIPC_NLA_SOCK_COOKIE, /* u64 */ TIPC_NLA_SOCK_PAD, /* flag */ + TIPC_NLA_SOCK_GROUP, /* nest */ __TIPC_NLA_SOCK_MAX, TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1 @@ -233,6 +234,19 @@ enum { TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1 }; +/* Nest, socket group info */ +enum { + TIPC_NLA_SOCK_GROUP_ID, /* u32 */ + TIPC_NLA_SOCK_GROUP_OPEN, /* flag */ + TIPC_NLA_SOCK_GROUP_NODE_SCOPE, /* flag */ + TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE, /* flag */ + TIPC_NLA_SOCK_GROUP_INSTANCE, /* u32 */ + TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, /* u32 */ + + __TIPC_NLA_SOCK_GROUP_MAX, + TIPC_NLA_SOCK_GROUP_MAX = __TIPC_NLA_SOCK_GROUP_MAX - 1 +}; + /* Nest, connection info */ enum { TIPC_NLA_CON_UNSPEC, diff --git a/ipc/msg.c b/ipc/msg.c index 3b6545302598..203281198079 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -38,6 +38,7 @@ #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> +#include <linux/rhashtable.h> #include <asm/current.h> #include <linux/uaccess.h> diff --git a/ipc/sem.c b/ipc/sem.c index 5af1943ad782..29c0347ef11d 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -86,6 +86,7 @@ #include <linux/ipc_namespace.h> #include <linux/sched/wake_q.h> #include <linux/nospec.h> +#include <linux/rhashtable.h> #include <linux/uaccess.h> #include "util.h" diff --git a/ipc/shm.c b/ipc/shm.c index 051a3e1fb8df..d4daf78df6da 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -43,6 +43,7 @@ #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> +#include <linux/rhashtable.h> #include <linux/uaccess.h> diff --git a/ipc/util.c b/ipc/util.c index 4e81182fa0ac..fdffff41f65b 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -63,6 +63,7 @@ #include <linux/rwsem.h> #include <linux/memory.h> #include <linux/ipc_namespace.h> +#include <linux/rhashtable.h> #include <asm/unistd.h> diff --git a/lib/nlattr.c b/lib/nlattr.c index dfa55c873c13..e335bcafa9e4 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -253,8 +253,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) { - if (extack) - extack->bad_attr = nla; + NL_SET_ERR_MSG_ATTR(extack, nla, + "Attribute failed policy validation"); goto errout; } } diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index fcb4ce682c6f..bf043258fa00 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/bug.h> #include <linux/kernel.h> #include <asm/div64.h> #include <linux/reciprocal_div.h> @@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d) return R; } EXPORT_SYMBOL(reciprocal_value); + +struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec) +{ + struct reciprocal_value_adv R; + u32 l, post_shift; + u64 mhigh, mlow; + + /* ceil(log2(d)) */ + l = fls(d - 1); + /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to + * be handled before calling "reciprocal_value_adv", please see the + * comment at include/linux/reciprocal_div.h. + */ + WARN(l == 32, + "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor", + d, __func__); + post_shift = l; + mlow = 1ULL << (32 + l); + do_div(mlow, d); + mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec)); + do_div(mhigh, d); + + for (; post_shift > 0; post_shift--) { + u64 lo = mlow >> 1, hi = mhigh >> 1; + + if (lo >= hi) + break; + + mlow = lo; + mhigh = hi; + } + + R.m = (u32)mhigh; + R.sh = post_shift; + R.exp = l; + R.is_wide_m = mhigh > U32_MAX; + + return R; +} +EXPORT_SYMBOL(reciprocal_value_adv); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e5c8586cf717..ae4223e0f5bc 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -28,6 +28,7 @@ #include <linux/rhashtable.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/rhashtable.h> #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U @@ -115,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head) static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, - unsigned int shifted, - unsigned int nhash) + bool leaf) { union nested_table *ntbl; int i; @@ -127,10 +127,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); - if (ntbl && shifted) { - for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) - INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, - (i << shifted) | nhash); + if (ntbl && leaf) { + for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) + INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } rcu_assign_pointer(*prev, ntbl); @@ -156,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, - 0, 0)) { + false)) { kfree(tbl); return NULL; } @@ -206,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, tbl->hash_rnd = get_random_u32(); for (i = 0; i < nbuckets; i++) - INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); + INIT_RHT_NULLS_HEAD(tbl->buckets[i]); return tbl; } @@ -227,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct bucket_table *new_tbl = rhashtable_last_table(ht, - rht_dereference_rcu(old_tbl->future_tbl, ht)); + struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); int err = -EAGAIN; struct rhash_head *head, *next, *entry; @@ -298,21 +296,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) { - /* Protect future_tbl using the first bucket lock. */ - spin_lock_bh(old_tbl->locks); - - /* Did somebody beat us to it? */ - if (rcu_access_pointer(old_tbl->future_tbl)) { - spin_unlock_bh(old_tbl->locks); - return -EEXIST; - } - /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. + * As cmpxchg() provides strong barriers, we do not need + * rcu_assign_pointer(). */ - rcu_assign_pointer(old_tbl->future_tbl, new_tbl); - spin_unlock_bh(old_tbl->locks); + if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) + return -EEXIST; return 0; } @@ -475,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, fail: /* Do not fail the insert if someone else did a rehash. */ - if (likely(rcu_dereference_raw(tbl->future_tbl))) + if (likely(rcu_access_pointer(tbl->future_tbl))) return 0; /* Schedule async rehash to retry allocation in process context. */ @@ -548,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); - new_tbl = rcu_dereference(tbl->future_tbl); + new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (new_tbl) return new_tbl; @@ -607,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, break; spin_unlock_bh(lock); - tbl = rcu_dereference(tbl->future_tbl); + tbl = rht_dereference_rcu(tbl->future_tbl, ht); } data = rhashtable_lookup_one(ht, tbl, hash, key, obj); @@ -1002,7 +993,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, - * .nulls_base = (1U << RHT_BASE_SHIFT), * }; * * Configuration Example 2: Variable length keys @@ -1034,9 +1024,6 @@ int rhashtable_init(struct rhashtable *ht, (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; - if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) - return -EINVAL; - memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); spin_lock_init(&ht->lock); @@ -1100,10 +1087,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) { int err; - /* No rhlist NULLs marking for now. */ - if (params->nulls_base) - return -EINVAL; - err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; @@ -1227,25 +1210,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; - unsigned int shifted; - unsigned int nhash; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); hash >>= tbl->nest; - nhash = index; - shifted = tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0, nhash); + size <= (1 << shift)); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; - nhash |= index << shifted; - shifted += shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0, - nhash); + size <= (1 << shift)); } if (!ntbl) diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index fb6968109113..82ac39ce5310 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed) { const struct test_obj_rhl *obj = data; - return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE; + return (obj->value.id % 10); } static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) @@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = { .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(struct test_obj_val), .hashfn = jhash, - .nulls_base = (3U << RHT_BASE_SHIFT), }; static struct rhashtable_params test_rht_params_dup = { @@ -296,8 +295,6 @@ static int __init test_rhltable(unsigned int entries) if (!obj_in_table) goto out_free; - /* nulls_base not supported in rhlist interface */ - test_rht_params.nulls_base = 0; err = rhltable_init(&rhlt, &test_rht_params); if (WARN_ON(err)) goto out_free; @@ -501,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt) unsigned int i, cnt = 0; ht = &rhlt->ht; + /* Take the mutex to avoid RCU warning */ + mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; @@ -534,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt) } } printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff); + mutex_unlock(&ht->mutex); return cnt; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 8ccee3d01822..5e9950453955 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -647,13 +647,14 @@ out: return err; } -static struct sk_buff **vlan_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *vlan_gro_receive(struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; - struct vlan_hdr *vhdr; - unsigned int hlen, off_vlan; const struct packet_offload *ptype; + unsigned int hlen, off_vlan; + struct sk_buff *pp = NULL; + struct vlan_hdr *vhdr; + struct sk_buff *p; __be16 type; int flush = 1; @@ -675,7 +676,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, flush = 0; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { struct vlan_hdr *vhdr2; if (!NAPI_GRO_CB(p)->same_flow) diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index de8034d80623..361116f77cb9 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -24,7 +24,6 @@ config BATMAN_ADV depends on NET select CRC16 select LIBCRC32C - default n help B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is a routing protocol for multi-hop ad-hoc mesh networks. The @@ -33,7 +32,7 @@ config BATMAN_ADV tools. config BATMAN_ADV_BATMAN_V - bool "B.A.T.M.A.N. V protocol (experimental)" + bool "B.A.T.M.A.N. V protocol" depends on BATMAN_ADV && !(CFG80211=m && BATMAN_ADV=y) default y help @@ -60,7 +59,7 @@ config BATMAN_ADV_BLA config BATMAN_ADV_DAT bool "Distributed ARP Table" depends on BATMAN_ADV && INET - default n + default y help This option enables DAT (Distributed ARP Table), a DHT based mechanism that increases ARP reliability on sparse wireless @@ -70,7 +69,6 @@ config BATMAN_ADV_DAT config BATMAN_ADV_NC bool "Network Coding" depends on BATMAN_ADV - default n help This option enables network coding, a mechanism that aims to increase the overall network throughput by fusing multiple @@ -84,7 +82,6 @@ config BATMAN_ADV_NC config BATMAN_ADV_MCAST bool "Multicast optimisation" depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y) - default n help This option enables the multicast optimisation which aims to reduce the air overhead while improving the reliability of @@ -94,7 +91,6 @@ config BATMAN_ADV_DEBUGFS bool "batman-adv debugfs entries" depends on BATMAN_ADV depends on DEBUG_FS - default n help Enable this to export routing related debug tables via debugfs. The information for each soft-interface and used hard-interface can be diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h index 317cafd302cf..3dc6a7a43eb7 100644 --- a/net/batman-adv/bat_iv_ogm.h +++ b/net/batman-adv/bat_iv_ogm.h @@ -16,11 +16,11 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ -#ifndef _BATMAN_ADV_BATADV_IV_OGM_H_ -#define _BATMAN_ADV_BATADV_IV_OGM_H_ +#ifndef _NET_BATMAN_ADV_BAT_IV_OGM_H_ +#define _NET_BATMAN_ADV_BAT_IV_OGM_H_ #include "main.h" int batadv_iv_init(void); -#endif /* _BATMAN_ADV_BATADV_IV_OGM_H_ */ +#endif /* _NET_BATMAN_ADV_BAT_IV_OGM_H_ */ diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h index ed36c5e79fde..e5be14c908c6 100644 --- a/net/batman-adv/bat_v_ogm.h +++ b/net/batman-adv/bat_v_ogm.h @@ -16,8 +16,8 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ -#ifndef _BATMAN_ADV_BATADV_V_OGM_H_ -#define _BATMAN_ADV_BATADV_V_OGM_H_ +#ifndef _NET_BATMAN_ADV_BAT_V_OGM_H_ +#define _NET_BATMAN_ADV_BAT_V_OGM_H_ #include "main.h" @@ -34,4 +34,4 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface); int batadv_v_ogm_packet_recv(struct sk_buff *skb, struct batadv_hard_iface *if_incoming); -#endif /* _BATMAN_ADV_BATADV_V_OGM_H_ */ +#endif /* _NET_BATMAN_ADV_BAT_V_OGM_H_ */ diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index a2de5a44bd41..ff9659af6b91 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1449,7 +1449,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) * detection frames. Set the locally administered bit to avoid * collisions with users mac addresses. */ - random_ether_addr(bat_priv->bla.loopdetect_addr); + eth_random_addr(bat_priv->bla.loopdetect_addr); bat_priv->bla.loopdetect_addr[0] = 0xba; bat_priv->bla.loopdetect_addr[1] = 0xbe; bat_priv->bla.loopdetect_lasttime = jiffies; diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 87479c60670e..3cb82378300b 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -118,7 +118,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode, #ifdef CONFIG_BATMAN_ADV_DAT /** - * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache + * batadv_dat_cache_open() - Prepare file handler for reads from dat_cache * @inode: inode which was opened * @file: file handle to be initialized * diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 716e5b43acfa..1d295da3e342 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -1339,7 +1339,11 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, return false; } -static void _batadv_purge_orig(struct batadv_priv *bat_priv) +/** + * batadv_purge_orig_ref() - Purge all outdated originators + * @bat_priv: the bat priv with all the soft interface information + */ +void batadv_purge_orig_ref(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_node *node_tmp; @@ -1385,21 +1389,12 @@ static void batadv_purge_orig(struct work_struct *work) delayed_work = to_delayed_work(work); bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); - _batadv_purge_orig(bat_priv); + batadv_purge_orig_ref(bat_priv); queue_delayed_work(batadv_event_workqueue, &bat_priv->orig_work, msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); } -/** - * batadv_purge_orig_ref() - Purge all outdated originators - * @bat_priv: the bat priv with all the soft interface information - */ -void batadv_purge_orig_ref(struct batadv_priv *bat_priv) -{ - _batadv_purge_orig(bat_priv); -} - #ifdef CONFIG_BATMAN_ADV_DEBUGFS /** diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 360357f83f20..343d304851a5 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -43,12 +43,13 @@ struct seq_file; #ifdef CONFIG_BATMAN_ADV_DAT /** - * batadv_dat_addr_t - it is the type used for all DHT addresses. If it is - * changed, BATADV_DAT_ADDR_MAX is changed as well. + * typedef batadv_dat_addr_t - type used for all DHT addresses + * + * If it is changed, BATADV_DAT_ADDR_MAX is changed as well. * * *Please be careful: batadv_dat_addr_t must be UNSIGNED* */ -#define batadv_dat_addr_t u16 +typedef u16 batadv_dat_addr_t; #endif /* CONFIG_BATMAN_ADV_DAT */ diff --git a/net/core/dev.c b/net/core/dev.c index a5aa1c7444e6..4f8b92d81d10 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -149,7 +149,6 @@ #include "net-sysfs.h" -/* Instead of increasing this, you should create a hash table. */ #define MAX_GRO_SKBS 8 /* This should be increased if a protocol with a bigger head is added. */ @@ -2068,11 +2067,13 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; int i; + /* walk through the TCs and see if it falls into any of them */ for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { if ((txq - tc->offset) < tc->count) return i; } + /* didn't find it, just return -1 to indicate no match */ return -1; } @@ -2081,6 +2082,10 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) EXPORT_SYMBOL(netdev_txq_to_tc); #ifdef CONFIG_XPS +struct static_key xps_needed __read_mostly; +EXPORT_SYMBOL(xps_needed); +struct static_key xps_rxqs_needed __read_mostly; +EXPORT_SYMBOL(xps_rxqs_needed); static DEFINE_MUTEX(xps_map_mutex); #define xmap_dereference(P) \ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) @@ -2092,7 +2097,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps, int pos; if (dev_maps) - map = xmap_dereference(dev_maps->cpu_map[tci]); + map = xmap_dereference(dev_maps->attr_map[tci]); if (!map) return false; @@ -2105,7 +2110,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps, break; } - RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL); + RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); kfree_rcu(map, rcu); return false; } @@ -2135,33 +2140,68 @@ static bool remove_xps_queue_cpu(struct net_device *dev, return active; } +static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, + struct xps_dev_maps *dev_maps, unsigned int nr_ids, + u16 offset, u16 count, bool is_rxqs_map) +{ + bool active = false; + int i, j; + + for (j = -1; j = netif_attrmask_next(j, mask, nr_ids), + j < nr_ids;) + active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, + count); + if (!active) { + if (is_rxqs_map) { + RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); + } else { + RCU_INIT_POINTER(dev->xps_cpus_map, NULL); + + for (i = offset + (count - 1); count--; i--) + netdev_queue_numa_node_write( + netdev_get_tx_queue(dev, i), + NUMA_NO_NODE); + } + kfree_rcu(dev_maps, rcu); + } +} + static void netif_reset_xps_queues(struct net_device *dev, u16 offset, u16 count) { + const unsigned long *possible_mask = NULL; struct xps_dev_maps *dev_maps; - int cpu, i; - bool active = false; + unsigned int nr_ids; + + if (!static_key_false(&xps_needed)) + return; mutex_lock(&xps_map_mutex); - dev_maps = xmap_dereference(dev->xps_maps); + if (static_key_false(&xps_rxqs_needed)) { + dev_maps = xmap_dereference(dev->xps_rxqs_map); + if (dev_maps) { + nr_ids = dev->num_rx_queues; + clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, + offset, count, true); + } + } + + dev_maps = xmap_dereference(dev->xps_cpus_map); if (!dev_maps) goto out_no_maps; - for_each_possible_cpu(cpu) - active |= remove_xps_queue_cpu(dev, dev_maps, cpu, - offset, count); - - if (!active) { - RCU_INIT_POINTER(dev->xps_maps, NULL); - kfree_rcu(dev_maps, rcu); - } - - for (i = offset + (count - 1); count--; i--) - netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), - NUMA_NO_NODE); + if (num_possible_cpus() > 1) + possible_mask = cpumask_bits(cpu_possible_mask); + nr_ids = nr_cpu_ids; + clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count, + false); out_no_maps: + if (static_key_enabled(&xps_rxqs_needed)) + static_key_slow_dec(&xps_rxqs_needed); + + static_key_slow_dec(&xps_needed); mutex_unlock(&xps_map_mutex); } @@ -2170,8 +2210,8 @@ static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); } -static struct xps_map *expand_xps_map(struct xps_map *map, - int cpu, u16 index) +static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, + u16 index, bool is_rxqs_map) { struct xps_map *new_map; int alloc_len = XPS_MIN_MAP_ALLOC; @@ -2183,7 +2223,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, return map; } - /* Need to add queue to this CPU's existing map */ + /* Need to add tx-queue to this CPU's/rx-queue's existing map */ if (map) { if (pos < map->alloc_len) return map; @@ -2191,9 +2231,14 @@ static struct xps_map *expand_xps_map(struct xps_map *map, alloc_len = map->alloc_len * 2; } - /* Need to allocate new map to store queue on this CPU's map */ - new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, - cpu_to_node(cpu)); + /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's + * map + */ + if (is_rxqs_map) + new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); + else + new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, + cpu_to_node(attr_index)); if (!new_map) return NULL; @@ -2205,32 +2250,52 @@ static struct xps_map *expand_xps_map(struct xps_map *map, return new_map; } -int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, - u16 index) +int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, + u16 index, bool is_rxqs_map) { + const unsigned long *online_mask = NULL, *possible_mask = NULL; struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; - int i, cpu, tci, numa_node_id = -2; + int i, j, tci, numa_node_id = -2; int maps_sz, num_tc = 1, tc = 0; struct xps_map *map, *new_map; bool active = false; + unsigned int nr_ids; if (dev->num_tc) { + /* Do not allow XPS on subordinate device directly */ num_tc = dev->num_tc; + if (num_tc < 0) + return -EINVAL; + + /* If queue belongs to subordinate dev use its map */ + dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; } - maps_sz = XPS_DEV_MAPS_SIZE(num_tc); - if (maps_sz < L1_CACHE_BYTES) - maps_sz = L1_CACHE_BYTES; - mutex_lock(&xps_map_mutex); + if (is_rxqs_map) { + maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); + dev_maps = xmap_dereference(dev->xps_rxqs_map); + nr_ids = dev->num_rx_queues; + } else { + maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); + if (num_possible_cpus() > 1) { + online_mask = cpumask_bits(cpu_online_mask); + possible_mask = cpumask_bits(cpu_possible_mask); + } + dev_maps = xmap_dereference(dev->xps_cpus_map); + nr_ids = nr_cpu_ids; + } - dev_maps = xmap_dereference(dev->xps_maps); + if (maps_sz < L1_CACHE_BYTES) + maps_sz = L1_CACHE_BYTES; /* allocate memory for queue storage */ - for_each_cpu_and(cpu, cpu_online_mask, mask) { + for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), + j < nr_ids;) { if (!new_dev_maps) new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); if (!new_dev_maps) { @@ -2238,73 +2303,85 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, return -ENOMEM; } - tci = cpu * num_tc + tc; - map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) : + tci = j * num_tc + tc; + map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; - map = expand_xps_map(map, cpu, index); + map = expand_xps_map(map, j, index, is_rxqs_map); if (!map) goto error; - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } if (!new_dev_maps) goto out_no_new_maps; - for_each_possible_cpu(cpu) { + static_key_slow_inc(&xps_needed); + if (is_rxqs_map) + static_key_slow_inc(&xps_rxqs_needed); + + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { /* copy maps belonging to foreign traffic classes */ - for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) { + for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { /* fill in the new device map from the old device map */ - map = xmap_dereference(dev_maps->cpu_map[tci]); - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + map = xmap_dereference(dev_maps->attr_map[tci]); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } /* We need to explicitly update tci as prevous loop * could break out early if dev_maps is NULL. */ - tci = cpu * num_tc + tc; + tci = j * num_tc + tc; - if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { - /* add queue to CPU maps */ + if (netif_attr_test_mask(j, mask, nr_ids) && + netif_attr_test_online(j, online_mask, nr_ids)) { + /* add tx-queue to CPU/rx-queue maps */ int pos = 0; - map = xmap_dereference(new_dev_maps->cpu_map[tci]); + map = xmap_dereference(new_dev_maps->attr_map[tci]); while ((pos < map->len) && (map->queues[pos] != index)) pos++; if (pos == map->len) map->queues[map->len++] = index; #ifdef CONFIG_NUMA - if (numa_node_id == -2) - numa_node_id = cpu_to_node(cpu); - else if (numa_node_id != cpu_to_node(cpu)) - numa_node_id = -1; + if (!is_rxqs_map) { + if (numa_node_id == -2) + numa_node_id = cpu_to_node(j); + else if (numa_node_id != cpu_to_node(j)) + numa_node_id = -1; + } #endif } else if (dev_maps) { /* fill in the new device map from the old device map */ - map = xmap_dereference(dev_maps->cpu_map[tci]); - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + map = xmap_dereference(dev_maps->attr_map[tci]); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } /* copy maps belonging to foreign traffic classes */ for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { /* fill in the new device map from the old device map */ - map = xmap_dereference(dev_maps->cpu_map[tci]); - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + map = xmap_dereference(dev_maps->attr_map[tci]); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } } - rcu_assign_pointer(dev->xps_maps, new_dev_maps); + if (is_rxqs_map) + rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps); + else + rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps); /* Cleanup old maps */ if (!dev_maps) goto out_no_old_maps; - for_each_possible_cpu(cpu) { - for (i = num_tc, tci = cpu * num_tc; i--; tci++) { - new_map = xmap_dereference(new_dev_maps->cpu_map[tci]); - map = xmap_dereference(dev_maps->cpu_map[tci]); + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { + for (i = num_tc, tci = j * num_tc; i--; tci++) { + new_map = xmap_dereference(new_dev_maps->attr_map[tci]); + map = xmap_dereference(dev_maps->attr_map[tci]); if (map && map != new_map) kfree_rcu(map, rcu); } @@ -2317,19 +2394,23 @@ out_no_old_maps: active = true; out_no_new_maps: - /* update Tx queue numa node */ - netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), - (numa_node_id >= 0) ? numa_node_id : - NUMA_NO_NODE); + if (!is_rxqs_map) { + /* update Tx queue numa node */ + netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), + (numa_node_id >= 0) ? + numa_node_id : NUMA_NO_NODE); + } if (!dev_maps) goto out_no_maps; - /* removes queue from unused CPUs */ - for_each_possible_cpu(cpu) { - for (i = tc, tci = cpu * num_tc; i--; tci++) + /* removes tx-queue from unused CPUs/rx-queues */ + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { + for (i = tc, tci = j * num_tc; i--; tci++) active |= remove_xps_queue(dev_maps, tci, index); - if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu)) + if (!netif_attr_test_mask(j, mask, nr_ids) || + !netif_attr_test_online(j, online_mask, nr_ids)) active |= remove_xps_queue(dev_maps, tci, index); for (i = num_tc - tc, tci++; --i; tci++) active |= remove_xps_queue(dev_maps, tci, index); @@ -2337,7 +2418,10 @@ out_no_new_maps: /* free map if not active */ if (!active) { - RCU_INIT_POINTER(dev->xps_maps, NULL); + if (is_rxqs_map) + RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); + else + RCU_INIT_POINTER(dev->xps_cpus_map, NULL); kfree_rcu(dev_maps, rcu); } @@ -2347,11 +2431,12 @@ out_no_maps: return 0; error: /* remove any maps that we added */ - for_each_possible_cpu(cpu) { - for (i = num_tc, tci = cpu * num_tc; i--; tci++) { - new_map = xmap_dereference(new_dev_maps->cpu_map[tci]); + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { + for (i = num_tc, tci = j * num_tc; i--; tci++) { + new_map = xmap_dereference(new_dev_maps->attr_map[tci]); map = dev_maps ? - xmap_dereference(dev_maps->cpu_map[tci]) : + xmap_dereference(dev_maps->attr_map[tci]) : NULL; if (new_map && new_map != map) kfree(new_map); @@ -2363,14 +2448,34 @@ error: kfree(new_dev_maps); return -ENOMEM; } + +int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); +} EXPORT_SYMBOL(netif_set_xps_queue); #endif +static void netdev_unbind_all_sb_channels(struct net_device *dev) +{ + struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; + + /* Unbind any subordinate channels */ + while (txq-- != &dev->_tx[0]) { + if (txq->sb_dev) + netdev_unbind_sb_channel(dev, txq->sb_dev); + } +} + void netdev_reset_tc(struct net_device *dev) { #ifdef CONFIG_XPS netif_reset_xps_queues_gt(dev, 0); #endif + netdev_unbind_all_sb_channels(dev); + + /* Reset TC configuration of device */ dev->num_tc = 0; memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); @@ -2399,11 +2504,77 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc) #ifdef CONFIG_XPS netif_reset_xps_queues_gt(dev, 0); #endif + netdev_unbind_all_sb_channels(dev); + dev->num_tc = num_tc; return 0; } EXPORT_SYMBOL(netdev_set_num_tc); +void netdev_unbind_sb_channel(struct net_device *dev, + struct net_device *sb_dev) +{ + struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; + +#ifdef CONFIG_XPS + netif_reset_xps_queues_gt(sb_dev, 0); +#endif + memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); + memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); + + while (txq-- != &dev->_tx[0]) { + if (txq->sb_dev == sb_dev) + txq->sb_dev = NULL; + } +} +EXPORT_SYMBOL(netdev_unbind_sb_channel); + +int netdev_bind_sb_channel_queue(struct net_device *dev, + struct net_device *sb_dev, + u8 tc, u16 count, u16 offset) +{ + /* Make certain the sb_dev and dev are already configured */ + if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) + return -EINVAL; + + /* We cannot hand out queues we don't have */ + if ((offset + count) > dev->real_num_tx_queues) + return -EINVAL; + + /* Record the mapping */ + sb_dev->tc_to_txq[tc].count = count; + sb_dev->tc_to_txq[tc].offset = offset; + + /* Provide a way for Tx queue to find the tc_to_txq map or + * XPS map for itself. + */ + while (count--) + netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; + + return 0; +} +EXPORT_SYMBOL(netdev_bind_sb_channel_queue); + +int netdev_set_sb_channel(struct net_device *dev, u16 channel) +{ + /* Do not use a multiqueue device to represent a subordinate channel */ + if (netif_is_multiqueue(dev)) + return -ENODEV; + + /* We allow channels 1 - 32767 to be used for subordinate channels. + * Channel 0 is meant to be "native" mode and used only to represent + * the main root device. We allow writing 0 to reset the device back + * to normal mode after being used as a subordinate channel. + */ + if (channel > S16_MAX) + return -EINVAL; + + dev->num_tc = -channel; + + return 0; +} +EXPORT_SYMBOL(netdev_set_sb_channel); + /* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. @@ -2615,24 +2786,26 @@ EXPORT_SYMBOL(netif_device_attach); * Returns a Tx hash based on the given packet descriptor a Tx queues' number * to be used as a distribution range. */ -static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb) +static u16 skb_tx_hash(const struct net_device *dev, + const struct net_device *sb_dev, + struct sk_buff *skb) { u32 hash; u16 qoffset = 0; u16 qcount = dev->real_num_tx_queues; + if (dev->num_tc) { + u8 tc = netdev_get_prio_tc_map(dev, skb->priority); + + qoffset = sb_dev->tc_to_txq[tc].offset; + qcount = sb_dev->tc_to_txq[tc].count; + } + if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); while (unlikely(hash >= qcount)) hash -= qcount; - return hash; - } - - if (dev->num_tc) { - u8 tc = netdev_get_prio_tc_map(dev, skb->priority); - - qoffset = dev->tc_to_txq[tc].offset; - qcount = dev->tc_to_txq[tc].count; + return hash + qoffset; } return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; @@ -3376,32 +3549,64 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) } #endif /* CONFIG_NET_EGRESS */ -static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) +#ifdef CONFIG_XPS +static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, + struct xps_dev_maps *dev_maps, unsigned int tci) +{ + struct xps_map *map; + int queue_index = -1; + + if (dev->num_tc) { + tci *= dev->num_tc; + tci += netdev_get_prio_tc_map(dev, skb->priority); + } + + map = rcu_dereference(dev_maps->attr_map[tci]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else + queue_index = map->queues[reciprocal_scale( + skb_get_hash(skb), map->len)]; + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + return queue_index; +} +#endif + +static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, + struct sk_buff *skb) { #ifdef CONFIG_XPS struct xps_dev_maps *dev_maps; - struct xps_map *map; + struct sock *sk = skb->sk; int queue_index = -1; + if (!static_key_false(&xps_needed)) + return -1; + rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_maps); + if (!static_key_false(&xps_rxqs_needed)) + goto get_cpus_map; + + dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); if (dev_maps) { - unsigned int tci = skb->sender_cpu - 1; + int tci = sk_rx_queue_get(sk); - if (dev->num_tc) { - tci *= dev->num_tc; - tci += netdev_get_prio_tc_map(dev, skb->priority); - } + if (tci >= 0 && tci < dev->num_rx_queues) + queue_index = __get_xps_queue_idx(dev, skb, dev_maps, + tci); + } - map = rcu_dereference(dev_maps->cpu_map[tci]); - if (map) { - if (map->len == 1) - queue_index = map->queues[0]; - else - queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), - map->len)]; - if (unlikely(queue_index >= dev->real_num_tx_queues)) - queue_index = -1; +get_cpus_map: + if (queue_index < 0) { + dev_maps = rcu_dereference(sb_dev->xps_cpus_map); + if (dev_maps) { + unsigned int tci = skb->sender_cpu - 1; + + queue_index = __get_xps_queue_idx(dev, skb, dev_maps, + tci); } } rcu_read_unlock(); @@ -3412,17 +3617,36 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) #endif } -static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + return 0; +} +EXPORT_SYMBOL(dev_pick_tx_zero); + +u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; +} +EXPORT_SYMBOL(dev_pick_tx_cpu_id); + +static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); + sb_dev = sb_dev ? : dev; + if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { - int new_index = get_xps_queue(dev, skb); + int new_index = get_xps_queue(dev, sb_dev, skb); if (new_index < 0) - new_index = skb_tx_hash(dev, skb); + new_index = skb_tx_hash(dev, sb_dev, skb); if (queue_index != new_index && sk && sk_fullsock(sk) && @@ -3437,7 +3661,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + struct net_device *sb_dev) { int queue_index = 0; @@ -3452,10 +3676,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) - queue_index = ops->ndo_select_queue(dev, skb, accel_priv, + queue_index = ops->ndo_select_queue(dev, skb, sb_dev, __netdev_pick_tx); else - queue_index = __netdev_pick_tx(dev, skb); + queue_index = __netdev_pick_tx(dev, skb, sb_dev); queue_index = netdev_cap_txqueue(dev, queue_index); } @@ -3467,7 +3691,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, /** * __dev_queue_xmit - transmit a buffer * @skb: buffer to transmit - * @accel_priv: private data used for L2 forwarding offload + * @sb_dev: suboordinate device used for L2 forwarding offload * * Queue a buffer for transmission to a network device. The caller must * have set the device and priority and built the buffer before calling @@ -3490,7 +3714,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, * the BH enable code must have IRQs enabled so that it will not deadlock. * --BLG */ -static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) +static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) { struct net_device *dev = skb->dev; struct netdev_queue *txq; @@ -3529,7 +3753,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) else skb_dst_force(skb); - txq = netdev_pick_tx(dev, skb, accel_priv); + txq = netdev_pick_tx(dev, skb, sb_dev); q = rcu_dereference_bh(txq->qdisc); trace_net_dev_queue(skb); @@ -3603,9 +3827,9 @@ int dev_queue_xmit(struct sk_buff *skb) } EXPORT_SYMBOL(dev_queue_xmit); -int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) +int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) { - return __dev_queue_xmit(skb, accel_priv); + return __dev_queue_xmit(skb, sb_dev); } EXPORT_SYMBOL(dev_queue_xmit_accel); @@ -4494,7 +4718,8 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, return 0; } -static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) +static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, + struct packet_type **ppt_prev) { struct packet_type *ptype, *pt_prev; rx_handler_func_t *rx_handler; @@ -4624,8 +4849,7 @@ skip_classify: if (pt_prev) { if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) goto drop; - else - ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); + *ppt_prev = pt_prev; } else { drop: if (!deliver_exact) @@ -4643,6 +4867,18 @@ out: return ret; } +static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) +{ + struct net_device *orig_dev = skb->dev; + struct packet_type *pt_prev = NULL; + int ret; + + ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + if (pt_prev) + ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); + return ret; +} + /** * netif_receive_skb_core - special purpose version of netif_receive_skb * @skb: buffer to process @@ -4663,13 +4899,72 @@ int netif_receive_skb_core(struct sk_buff *skb) int ret; rcu_read_lock(); - ret = __netif_receive_skb_core(skb, false); + ret = __netif_receive_skb_one_core(skb, false); rcu_read_unlock(); return ret; } EXPORT_SYMBOL(netif_receive_skb_core); +static inline void __netif_receive_skb_list_ptype(struct list_head *head, + struct packet_type *pt_prev, + struct net_device *orig_dev) +{ + struct sk_buff *skb, *next; + + if (!pt_prev) + return; + if (list_empty(head)) + return; + if (pt_prev->list_func != NULL) + pt_prev->list_func(head, pt_prev, orig_dev); + else + list_for_each_entry_safe(skb, next, head, list) + pt_prev->func(skb, skb->dev, pt_prev, orig_dev); +} + +static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) +{ + /* Fast-path assumptions: + * - There is no RX handler. + * - Only one packet_type matches. + * If either of these fails, we will end up doing some per-packet + * processing in-line, then handling the 'last ptype' for the whole + * sublist. This can't cause out-of-order delivery to any single ptype, + * because the 'last ptype' must be constant across the sublist, and all + * other ptypes are handled per-packet. + */ + /* Current (common) ptype of sublist */ + struct packet_type *pt_curr = NULL; + /* Current (common) orig_dev of sublist */ + struct net_device *od_curr = NULL; + struct list_head sublist; + struct sk_buff *skb, *next; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + struct net_device *orig_dev = skb->dev; + struct packet_type *pt_prev = NULL; + + list_del(&skb->list); + __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + if (!pt_prev) + continue; + if (pt_curr != pt_prev || od_curr != orig_dev) { + /* dispatch old sublist */ + __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); + /* start new sublist */ + INIT_LIST_HEAD(&sublist); + pt_curr = pt_prev; + od_curr = orig_dev; + } + list_add_tail(&skb->list, &sublist); + } + + /* dispatch final sublist */ + __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); +} + static int __netif_receive_skb(struct sk_buff *skb) { int ret; @@ -4687,14 +4982,44 @@ static int __netif_receive_skb(struct sk_buff *skb) * context down to all allocation sites. */ noreclaim_flag = memalloc_noreclaim_save(); - ret = __netif_receive_skb_core(skb, true); + ret = __netif_receive_skb_one_core(skb, true); memalloc_noreclaim_restore(noreclaim_flag); } else - ret = __netif_receive_skb_core(skb, false); + ret = __netif_receive_skb_one_core(skb, false); return ret; } +static void __netif_receive_skb_list(struct list_head *head) +{ + unsigned long noreclaim_flag = 0; + struct sk_buff *skb, *next; + bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ + + list_for_each_entry_safe(skb, next, head, list) { + if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { + struct list_head sublist; + + /* Handle the previous sublist */ + list_cut_before(&sublist, head, &skb->list); + if (!list_empty(&sublist)) + __netif_receive_skb_list_core(&sublist, pfmemalloc); + pfmemalloc = !pfmemalloc; + /* See comments in __netif_receive_skb */ + if (pfmemalloc) + noreclaim_flag = memalloc_noreclaim_save(); + else + memalloc_noreclaim_restore(noreclaim_flag); + } + } + /* Handle the remaining sublist */ + if (!list_empty(head)) + __netif_receive_skb_list_core(head, pfmemalloc); + /* Restore pflags */ + if (pfmemalloc) + memalloc_noreclaim_restore(noreclaim_flag); +} + static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) { struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); @@ -4717,7 +5042,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) break; case XDP_QUERY_PROG: - xdp->prog_attached = !!old; xdp->prog_id = old ? old->aux->id : 0; break; @@ -4769,6 +5093,55 @@ static int netif_receive_skb_internal(struct sk_buff *skb) return ret; } +static void netif_receive_skb_list_internal(struct list_head *head) +{ + struct bpf_prog *xdp_prog = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + net_timestamp_check(netdev_tstamp_prequeue, skb); + list_del(&skb->list); + if (!skb_defer_rx_timestamp(skb)) + list_add_tail(&skb->list, &sublist); + } + list_splice_init(&sublist, head); + + if (static_branch_unlikely(&generic_xdp_needed_key)) { + preempt_disable(); + rcu_read_lock(); + list_for_each_entry_safe(skb, next, head, list) { + xdp_prog = rcu_dereference(skb->dev->xdp_prog); + list_del(&skb->list); + if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) + list_add_tail(&skb->list, &sublist); + } + rcu_read_unlock(); + preempt_enable(); + /* Put passed packets back on main list */ + list_splice_init(&sublist, head); + } + + rcu_read_lock(); +#ifdef CONFIG_RPS + if (static_key_false(&rps_needed)) { + list_for_each_entry_safe(skb, next, head, list) { + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu = get_rps_cpu(skb->dev, skb, &rflow); + + if (cpu >= 0) { + /* Will be handled, remove from list */ + list_del(&skb->list); + enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + } + } + } +#endif + __netif_receive_skb_list(head); + rcu_read_unlock(); +} + /** * netif_receive_skb - process receive buffer from network * @skb: buffer to process @@ -4792,6 +5165,28 @@ int netif_receive_skb(struct sk_buff *skb) } EXPORT_SYMBOL(netif_receive_skb); +/** + * netif_receive_skb_list - process many receive buffers from network + * @head: list of skbs to process. + * + * Since return value of netif_receive_skb() is normally ignored, and + * wouldn't be meaningful for a list, this function returns void. + * + * This function may only be called from softirq context and interrupts + * should be enabled. + */ +void netif_receive_skb_list(struct list_head *head) +{ + struct sk_buff *skb; + + if (list_empty(head)) + return; + list_for_each_entry(skb, head, list) + trace_netif_receive_skb_list_entry(skb); + netif_receive_skb_list_internal(head); +} +EXPORT_SYMBOL(netif_receive_skb_list); + DEFINE_PER_CPU(struct work_struct, flush_works); /* Network device is going away, flush any packets still pending */ @@ -4875,42 +5270,50 @@ out: return netif_receive_skb_internal(skb); } -/* napi->gro_list contains packets ordered by age. - * youngest packets at the head of it. - * Complete skbs in reverse order to reduce latencies. - */ -void napi_gro_flush(struct napi_struct *napi, bool flush_old) +static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, + bool flush_old) { - struct sk_buff *skb, *prev = NULL; - - /* scan list and build reverse chain */ - for (skb = napi->gro_list; skb != NULL; skb = skb->next) { - skb->prev = prev; - prev = skb; - } - - for (skb = prev; skb; skb = prev) { - skb->next = NULL; + struct list_head *head = &napi->gro_hash[index].list; + struct sk_buff *skb, *p; + list_for_each_entry_safe_reverse(skb, p, head, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; - - prev = skb->prev; + list_del(&skb->list); + skb->next = NULL; napi_gro_complete(skb); - napi->gro_count--; + napi->gro_hash[index].count--; } - napi->gro_list = NULL; + if (!napi->gro_hash[index].count) + __clear_bit(index, &napi->gro_bitmask); +} + +/* napi->gro_hash[].list contains packets ordered by age. + * youngest packets at the head of it. + * Complete skbs in reverse order to reduce latencies. + */ +void napi_gro_flush(struct napi_struct *napi, bool flush_old) +{ + u32 i; + + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + if (test_bit(i, &napi->gro_bitmask)) + __napi_gro_flush_chain(napi, i, flush_old); + } } EXPORT_SYMBOL(napi_gro_flush); -static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) +static struct list_head *gro_list_prepare(struct napi_struct *napi, + struct sk_buff *skb) { - struct sk_buff *p; unsigned int maclen = skb->dev->hard_header_len; u32 hash = skb_get_hash_raw(skb); + struct list_head *head; + struct sk_buff *p; - for (p = napi->gro_list; p; p = p->next) { + head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list; + list_for_each_entry(p, head, list) { unsigned long diffs; NAPI_GRO_CB(p)->flush = 0; @@ -4933,6 +5336,8 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) maclen); NAPI_GRO_CB(p)->same_flow = !diffs; } + + return head; } static void skb_gro_reset_offset(struct sk_buff *skb) @@ -4975,20 +5380,41 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) } } +static void gro_flush_oldest(struct list_head *head) +{ + struct sk_buff *oldest; + + oldest = list_last_entry(head, struct sk_buff, list); + + /* We are called with head length >= MAX_GRO_SKBS, so this is + * impossible. + */ + if (WARN_ON_ONCE(!oldest)) + return; + + /* Do not adjust napi->gro_hash[].count, caller is adding a new + * SKB to the chain. + */ + list_del(&oldest->list); + napi_gro_complete(oldest); +} + static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { - struct sk_buff **pp = NULL; + u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); + struct list_head *head = &offload_base; struct packet_offload *ptype; __be16 type = skb->protocol; - struct list_head *head = &offload_base; - int same_flow; + struct list_head *gro_head; + struct sk_buff *pp = NULL; enum gro_result ret; + int same_flow; int grow; if (netif_elide_gro(skb->dev)) goto normal; - gro_list_prepare(napi, skb); + gro_head = gro_list_prepare(napi, skb); rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { @@ -5022,7 +5448,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->csum_valid = 0; } - pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); + pp = ptype->callbacks.gro_receive(gro_head, skb); break; } rcu_read_unlock(); @@ -5039,12 +5465,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { - struct sk_buff *nskb = *pp; - - *pp = nskb->next; - nskb->next = NULL; - napi_gro_complete(nskb); - napi->gro_count--; + list_del(&pp->list); + pp->next = NULL; + napi_gro_complete(pp); + napi->gro_hash[hash].count--; } if (same_flow) @@ -5053,26 +5477,16 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (NAPI_GRO_CB(skb)->flush) goto normal; - if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { - struct sk_buff *nskb = napi->gro_list; - - /* locate the end of the list to select the 'oldest' flow */ - while (nskb->next) { - pp = &nskb->next; - nskb = *pp; - } - *pp = NULL; - nskb->next = NULL; - napi_gro_complete(nskb); + if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { + gro_flush_oldest(gro_head); } else { - napi->gro_count++; + napi->gro_hash[hash].count++; } NAPI_GRO_CB(skb)->count = 1; NAPI_GRO_CB(skb)->age = jiffies; NAPI_GRO_CB(skb)->last = skb; skb_shinfo(skb)->gso_size = skb_gro_len(skb); - skb->next = napi->gro_list; - napi->gro_list = skb; + list_add(&skb->list, gro_head); ret = GRO_HELD; pull: @@ -5080,6 +5494,13 @@ pull: if (grow > 0) gro_pull_from_frag0(skb, grow); ok: + if (napi->gro_hash[hash].count) { + if (!test_bit(hash, &napi->gro_bitmask)) + __set_bit(hash, &napi->gro_bitmask); + } else if (test_bit(hash, &napi->gro_bitmask)) { + __clear_bit(hash, &napi->gro_bitmask); + } + return ret; normal: @@ -5478,7 +5899,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) NAPIF_STATE_IN_BUSY_POLL))) return false; - if (n->gro_list) { + if (n->gro_bitmask) { unsigned long timeout = 0; if (work_done) @@ -5687,7 +6108,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) /* Note : we use a relaxed variant of napi_schedule_prep() not setting * NAPI_STATE_MISSED, since we do not react to a device IRQ. */ - if (napi->gro_list && !napi_disable_pending(napi) && + if (napi->gro_bitmask && !napi_disable_pending(napi) && !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) __napi_schedule_irqoff(napi); @@ -5697,11 +6118,16 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { + int i; + INIT_LIST_HEAD(&napi->poll_list); hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; - napi->gro_count = 0; - napi->gro_list = NULL; + napi->gro_bitmask = 0; + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + INIT_LIST_HEAD(&napi->gro_hash[i].list); + napi->gro_hash[i].count = 0; + } napi->skb = NULL; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) @@ -5734,6 +6160,19 @@ void napi_disable(struct napi_struct *n) } EXPORT_SYMBOL(napi_disable); +static void flush_gro_hash(struct napi_struct *napi) +{ + int i; + + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + struct sk_buff *skb, *n; + + list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) + kfree_skb(skb); + napi->gro_hash[i].count = 0; + } +} + /* Must be called in process context */ void netif_napi_del(struct napi_struct *napi) { @@ -5743,9 +6182,8 @@ void netif_napi_del(struct napi_struct *napi) list_del_init(&napi->dev_list); napi_free_frags(napi); - kfree_skb_list(napi->gro_list); - napi->gro_list = NULL; - napi->gro_count = 0; + flush_gro_hash(napi); + napi->gro_bitmask = 0; } EXPORT_SYMBOL(netif_napi_del); @@ -5787,7 +6225,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) goto out_unlock; } - if (n->gro_list) { + if (n->gro_bitmask) { /* flush too old packets * If HZ < 1000, flush all packets. */ @@ -7276,23 +7714,21 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down); -void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, - struct netdev_bpf *xdp) +u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, + enum bpf_netdev_command cmd) { - memset(xdp, 0, sizeof(*xdp)); - xdp->command = XDP_QUERY_PROG; + struct netdev_bpf xdp; - /* Query must always succeed. */ - WARN_ON(bpf_op(dev, xdp) < 0); -} + if (!bpf_op) + return 0; -static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op) -{ - struct netdev_bpf xdp; + memset(&xdp, 0, sizeof(xdp)); + xdp.command = cmd; - __dev_xdp_query(dev, bpf_op, &xdp); + /* Query must always succeed. */ + WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); - return xdp.prog_attached; + return xdp.prog_id; } static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, @@ -7326,12 +7762,19 @@ static void dev_xdp_uninstall(struct net_device *dev) if (!ndo_bpf) return; - __dev_xdp_query(dev, ndo_bpf, &xdp); - if (xdp.prog_attached == XDP_ATTACHED_NONE) - return; + memset(&xdp, 0, sizeof(xdp)); + xdp.command = XDP_QUERY_PROG; + WARN_ON(ndo_bpf(dev, &xdp)); + if (xdp.prog_id) + WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, + NULL)); - /* Program removal should always succeed */ - WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL)); + /* Remove HW offload */ + memset(&xdp, 0, sizeof(xdp)); + xdp.command = XDP_QUERY_PROG_HW; + if (!ndo_bpf(dev, &xdp) && xdp.prog_id) + WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, + NULL)); } /** @@ -7347,12 +7790,15 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags) { const struct net_device_ops *ops = dev->netdev_ops; + enum bpf_netdev_command query; struct bpf_prog *prog = NULL; bpf_op_t bpf_op, bpf_chk; int err; ASSERT_RTNL(); + query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; + bpf_op = bpf_chk = ops->ndo_bpf; if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) return -EOPNOTSUPP; @@ -7362,10 +7808,11 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, bpf_chk = generic_xdp_install; if (fd >= 0) { - if (bpf_chk && __dev_xdp_attached(dev, bpf_chk)) + if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) || + __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW)) return -EEXIST; if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && - __dev_xdp_attached(dev, bpf_op)) + __dev_xdp_query(dev, bpf_op, query)) return -EBUSY; prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, @@ -8834,6 +9281,9 @@ static struct hlist_head * __net_init netdev_create_hash(void) /* Initialize per network namespace state */ static int __net_init netdev_init(struct net *net) { + BUILD_BUG_ON(GRO_HASH_BUCKETS > + 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask)); + if (net != &init_net) INIT_LIST_HEAD(&net->dev_base_head); diff --git a/net/core/devlink.c b/net/core/devlink.c index 22099705cc41..65fc366a78a4 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -326,6 +326,57 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb, pool_type, p_tc_index); } +struct devlink_region { + struct devlink *devlink; + struct list_head list; + const char *name; + struct list_head snapshot_list; + u32 max_snapshots; + u32 cur_snapshots; + u64 size; +}; + +struct devlink_snapshot { + struct list_head list; + struct devlink_region *region; + devlink_snapshot_data_dest_t *data_destructor; + u64 data_len; + u8 *data; + u32 id; +}; + +static struct devlink_region * +devlink_region_get_by_name(struct devlink *devlink, const char *region_name) +{ + struct devlink_region *region; + + list_for_each_entry(region, &devlink->region_list, list) + if (!strcmp(region->name, region_name)) + return region; + + return NULL; +} + +static struct devlink_snapshot * +devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id) +{ + struct devlink_snapshot *snapshot; + + list_for_each_entry(snapshot, ®ion->snapshot_list, list) + if (snapshot->id == id) + return snapshot; + + return NULL; +} + +static void devlink_region_snapshot_del(struct devlink_snapshot *snapshot) +{ + snapshot->region->cur_snapshots--; + list_del(&snapshot->list); + (*snapshot->data_destructor)(snapshot->data); + kfree(snapshot); +} + #define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) #define DEVLINK_NL_FLAG_NEED_PORT BIT(1) #define DEVLINK_NL_FLAG_NEED_SB BIT(2) @@ -2604,6 +2655,919 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return devlink->ops->reload(devlink, info->extack); } +static const struct devlink_param devlink_param_generic[] = { + { + .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME, + .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME, + .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, + .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME, + .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME, + .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE, + }, +}; + +static int devlink_param_generic_verify(const struct devlink_param *param) +{ + /* verify it match generic parameter by id and name */ + if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX) + return -EINVAL; + if (strcmp(param->name, devlink_param_generic[param->id].name)) + return -ENOENT; + + WARN_ON(param->type != devlink_param_generic[param->id].type); + + return 0; +} + +static int devlink_param_driver_verify(const struct devlink_param *param) +{ + int i; + + if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX) + return -EINVAL; + /* verify no such name in generic params */ + for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++) + if (!strcmp(param->name, devlink_param_generic[i].name)) + return -EEXIST; + + return 0; +} + +static struct devlink_param_item * +devlink_param_find_by_name(struct list_head *param_list, + const char *param_name) +{ + struct devlink_param_item *param_item; + + list_for_each_entry(param_item, param_list, list) + if (!strcmp(param_item->param->name, param_name)) + return param_item; + return NULL; +} + +static struct devlink_param_item * +devlink_param_find_by_id(struct list_head *param_list, u32 param_id) +{ + struct devlink_param_item *param_item; + + list_for_each_entry(param_item, param_list, list) + if (param_item->param->id == param_id) + return param_item; + return NULL; +} + +static bool +devlink_param_cmode_is_supported(const struct devlink_param *param, + enum devlink_param_cmode cmode) +{ + return test_bit(cmode, ¶m->supported_cmodes); +} + +static int devlink_param_get(struct devlink *devlink, + const struct devlink_param *param, + struct devlink_param_gset_ctx *ctx) +{ + if (!param->get) + return -EOPNOTSUPP; + return param->get(devlink, param->id, ctx); +} + +static int devlink_param_set(struct devlink *devlink, + const struct devlink_param *param, + struct devlink_param_gset_ctx *ctx) +{ + if (!param->set) + return -EOPNOTSUPP; + return param->set(devlink, param->id, ctx); +} + +static int +devlink_param_type_to_nla_type(enum devlink_param_type param_type) +{ + switch (param_type) { + case DEVLINK_PARAM_TYPE_U8: + return NLA_U8; + case DEVLINK_PARAM_TYPE_U16: + return NLA_U16; + case DEVLINK_PARAM_TYPE_U32: + return NLA_U32; + case DEVLINK_PARAM_TYPE_STRING: + return NLA_STRING; + case DEVLINK_PARAM_TYPE_BOOL: + return NLA_FLAG; + default: + return -EINVAL; + } +} + +static int +devlink_nl_param_value_fill_one(struct sk_buff *msg, + enum devlink_param_type type, + enum devlink_param_cmode cmode, + union devlink_param_value val) +{ + struct nlattr *param_value_attr; + + param_value_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUE); + if (!param_value_attr) + goto nla_put_failure; + + if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode)) + goto value_nest_cancel; + + switch (type) { + case DEVLINK_PARAM_TYPE_U8: + if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_U16: + if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_U32: + if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_STRING: + if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, + val.vstr)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_BOOL: + if (val.vbool && + nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA)) + goto value_nest_cancel; + break; + } + + nla_nest_end(msg, param_value_attr); + return 0; + +value_nest_cancel: + nla_nest_cancel(msg, param_value_attr); +nla_put_failure: + return -EMSGSIZE; +} + +static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink, + struct devlink_param_item *param_item, + enum devlink_command cmd, + u32 portid, u32 seq, int flags) +{ + union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1]; + const struct devlink_param *param = param_item->param; + struct devlink_param_gset_ctx ctx; + struct nlattr *param_values_list; + struct nlattr *param_attr; + int nla_type; + void *hdr; + int err; + int i; + + /* Get value from driver part to driverinit configuration mode */ + for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) { + if (!devlink_param_cmode_is_supported(param, i)) + continue; + if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) { + if (!param_item->driverinit_value_valid) + return -EOPNOTSUPP; + param_value[i] = param_item->driverinit_value; + } else { + ctx.cmode = i; + err = devlink_param_get(devlink, param, &ctx); + if (err) + return err; + param_value[i] = ctx.val; + } + } + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto genlmsg_cancel; + param_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM); + if (!param_attr) + goto genlmsg_cancel; + if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name)) + goto param_nest_cancel; + if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC)) + goto param_nest_cancel; + + nla_type = devlink_param_type_to_nla_type(param->type); + if (nla_type < 0) + goto param_nest_cancel; + if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type)) + goto param_nest_cancel; + + param_values_list = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUES_LIST); + if (!param_values_list) + goto param_nest_cancel; + + for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) { + if (!devlink_param_cmode_is_supported(param, i)) + continue; + err = devlink_nl_param_value_fill_one(msg, param->type, + i, param_value[i]); + if (err) + goto values_list_nest_cancel; + } + + nla_nest_end(msg, param_values_list); + nla_nest_end(msg, param_attr); + genlmsg_end(msg, hdr); + return 0; + +values_list_nest_cancel: + nla_nest_end(msg, param_values_list); +param_nest_cancel: + nla_nest_cancel(msg, param_attr); +genlmsg_cancel: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static void devlink_param_notify(struct devlink *devlink, + struct devlink_param_item *param_item, + enum devlink_command cmd) +{ + struct sk_buff *msg; + int err; + + WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + err = devlink_nl_param_fill(msg, devlink, param_item, cmd, 0, 0, 0); + if (err) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); +} + +static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink_param_item *param_item; + struct devlink *devlink; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + mutex_lock(&devlink->lock); + list_for_each_entry(param_item, &devlink->param_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_param_fill(msg, devlink, param_item, + DEVLINK_CMD_PARAM_GET, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI); + if (err) { + mutex_unlock(&devlink->lock); + goto out; + } + idx++; + } + mutex_unlock(&devlink->lock); + } +out: + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static int +devlink_param_type_get_from_info(struct genl_info *info, + enum devlink_param_type *param_type) +{ + if (!info->attrs[DEVLINK_ATTR_PARAM_TYPE]) + return -EINVAL; + + switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) { + case NLA_U8: + *param_type = DEVLINK_PARAM_TYPE_U8; + break; + case NLA_U16: + *param_type = DEVLINK_PARAM_TYPE_U16; + break; + case NLA_U32: + *param_type = DEVLINK_PARAM_TYPE_U32; + break; + case NLA_STRING: + *param_type = DEVLINK_PARAM_TYPE_STRING; + break; + case NLA_FLAG: + *param_type = DEVLINK_PARAM_TYPE_BOOL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +devlink_param_value_get_from_info(const struct devlink_param *param, + struct genl_info *info, + union devlink_param_value *value) +{ + if (param->type != DEVLINK_PARAM_TYPE_BOOL && + !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) + return -EINVAL; + + switch (param->type) { + case DEVLINK_PARAM_TYPE_U8: + value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_U16: + value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_U32: + value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_STRING: + if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) > + DEVLINK_PARAM_MAX_STRING_VALUE) + return -EINVAL; + value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_BOOL: + value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? + true : false; + break; + } + return 0; +} + +static struct devlink_param_item * +devlink_param_get_from_info(struct devlink *devlink, + struct genl_info *info) +{ + char *param_name; + + if (!info->attrs[DEVLINK_ATTR_PARAM_NAME]) + return NULL; + + param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]); + return devlink_param_find_by_name(&devlink->param_list, param_name); +} + +static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_param_item *param_item; + struct sk_buff *msg; + int err; + + param_item = devlink_param_get_from_info(devlink, info); + if (!param_item) + return -EINVAL; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_param_fill(msg, devlink, param_item, + DEVLINK_CMD_PARAM_GET, + info->snd_portid, info->snd_seq, 0); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + enum devlink_param_type param_type; + struct devlink_param_gset_ctx ctx; + enum devlink_param_cmode cmode; + struct devlink_param_item *param_item; + const struct devlink_param *param; + union devlink_param_value value; + int err = 0; + + param_item = devlink_param_get_from_info(devlink, info); + if (!param_item) + return -EINVAL; + param = param_item->param; + err = devlink_param_type_get_from_info(info, ¶m_type); + if (err) + return err; + if (param_type != param->type) + return -EINVAL; + err = devlink_param_value_get_from_info(param, info, &value); + if (err) + return err; + if (param->validate) { + err = param->validate(devlink, param->id, value, info->extack); + if (err) + return err; + } + + if (!info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]) + return -EINVAL; + cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]); + if (!devlink_param_cmode_is_supported(param, cmode)) + return -EOPNOTSUPP; + + if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) { + param_item->driverinit_value = value; + param_item->driverinit_value_valid = true; + } else { + if (!param->set) + return -EOPNOTSUPP; + ctx.val = value; + ctx.cmode = cmode; + err = devlink_param_set(devlink, param, &ctx); + if (err) + return err; + } + + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); + return 0; +} + +static int devlink_param_register_one(struct devlink *devlink, + const struct devlink_param *param) +{ + struct devlink_param_item *param_item; + + if (devlink_param_find_by_name(&devlink->param_list, + param->name)) + return -EEXIST; + + if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT)) + WARN_ON(param->get || param->set); + else + WARN_ON(!param->get || !param->set); + + param_item = kzalloc(sizeof(*param_item), GFP_KERNEL); + if (!param_item) + return -ENOMEM; + param_item->param = param; + + list_add_tail(¶m_item->list, &devlink->param_list); + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); + return 0; +} + +static void devlink_param_unregister_one(struct devlink *devlink, + const struct devlink_param *param) +{ + struct devlink_param_item *param_item; + + param_item = devlink_param_find_by_name(&devlink->param_list, + param->name); + WARN_ON(!param_item); + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_DEL); + list_del(¶m_item->list); + kfree(param_item); +} + +static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg, + struct devlink *devlink, + struct devlink_snapshot *snapshot) +{ + struct nlattr *snap_attr; + int err; + + snap_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOT); + if (!snap_attr) + return -EINVAL; + + err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id); + if (err) + goto nla_put_failure; + + nla_nest_end(msg, snap_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(msg, snap_attr); + return err; +} + +static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg, + struct devlink *devlink, + struct devlink_region *region) +{ + struct devlink_snapshot *snapshot; + struct nlattr *snapshots_attr; + int err; + + snapshots_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOTS); + if (!snapshots_attr) + return -EINVAL; + + list_for_each_entry(snapshot, ®ion->snapshot_list, list) { + err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot); + if (err) + goto nla_put_failure; + } + + nla_nest_end(msg, snapshots_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(msg, snapshots_attr); + return err; +} + +static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink, + enum devlink_command cmd, u32 portid, + u32 seq, int flags, + struct devlink_region *region) +{ + void *hdr; + int err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + err = devlink_nl_put_handle(msg, devlink); + if (err) + goto nla_put_failure; + + err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->name); + if (err) + goto nla_put_failure; + + err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE, + region->size, + DEVLINK_ATTR_PAD); + if (err) + goto nla_put_failure; + + err = devlink_nl_region_snapshots_id_put(msg, devlink, region); + if (err) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return err; +} + +static void devlink_nl_region_notify(struct devlink_region *region, + struct devlink_snapshot *snapshot, + enum devlink_command cmd) +{ + struct devlink *devlink = region->devlink; + struct sk_buff *msg; + void *hdr; + int err; + + WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd); + if (!hdr) + goto out_free_msg; + + err = devlink_nl_put_handle(msg, devlink); + if (err) + goto out_cancel_msg; + + err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, + region->name); + if (err) + goto out_cancel_msg; + + if (snapshot) { + err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, + snapshot->id); + if (err) + goto out_cancel_msg; + } else { + err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE, + region->size, DEVLINK_ATTR_PAD); + if (err) + goto out_cancel_msg; + } + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); + + return; + +out_cancel_msg: + genlmsg_cancel(msg, hdr); +out_free_msg: + nlmsg_free(msg); +} + +static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_region *region; + const char *region_name; + struct sk_buff *msg; + int err; + + if (!info->attrs[DEVLINK_ATTR_REGION_NAME]) + return -EINVAL; + + region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]); + region = devlink_region_get_by_name(devlink, region_name); + if (!region) + return -EINVAL; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET, + info->snd_portid, info->snd_seq, 0, + region); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink_region *region; + struct devlink *devlink; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + + mutex_lock(&devlink->lock); + list_for_each_entry(region, &devlink->region_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_region_fill(msg, devlink, + DEVLINK_CMD_REGION_GET, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, region); + if (err) { + mutex_unlock(&devlink->lock); + goto out; + } + idx++; + } + mutex_unlock(&devlink->lock); + } +out: + mutex_unlock(&devlink_mutex); + cb->args[0] = idx; + return msg->len; +} + +static int devlink_nl_cmd_region_del(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_snapshot *snapshot; + struct devlink_region *region; + const char *region_name; + u32 snapshot_id; + + if (!info->attrs[DEVLINK_ATTR_REGION_NAME] || + !info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) + return -EINVAL; + + region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]); + snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]); + + region = devlink_region_get_by_name(devlink, region_name); + if (!region) + return -EINVAL; + + snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id); + if (!snapshot) + return -EINVAL; + + devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL); + devlink_region_snapshot_del(snapshot); + return 0; +} + +static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg, + struct devlink *devlink, + u8 *chunk, u32 chunk_size, + u64 addr) +{ + struct nlattr *chunk_attr; + int err; + + chunk_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_CHUNK); + if (!chunk_attr) + return -EINVAL; + + err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk); + if (err) + goto nla_put_failure; + + err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr, + DEVLINK_ATTR_PAD); + if (err) + goto nla_put_failure; + + nla_nest_end(msg, chunk_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(msg, chunk_attr); + return err; +} + +#define DEVLINK_REGION_READ_CHUNK_SIZE 256 + +static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb, + struct devlink *devlink, + struct devlink_region *region, + struct nlattr **attrs, + u64 start_offset, + u64 end_offset, + bool dump, + u64 *new_offset) +{ + struct devlink_snapshot *snapshot; + u64 curr_offset = start_offset; + u32 snapshot_id; + int err = 0; + + *new_offset = start_offset; + + snapshot_id = nla_get_u32(attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]); + snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id); + if (!snapshot) + return -EINVAL; + + if (end_offset > snapshot->data_len || dump) + end_offset = snapshot->data_len; + + while (curr_offset < end_offset) { + u32 data_size; + u8 *data; + + if (end_offset - curr_offset < DEVLINK_REGION_READ_CHUNK_SIZE) + data_size = end_offset - curr_offset; + else + data_size = DEVLINK_REGION_READ_CHUNK_SIZE; + + data = &snapshot->data[curr_offset]; + err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink, + data, data_size, + curr_offset); + if (err) + break; + + curr_offset += data_size; + } + *new_offset = curr_offset; + + return err; +} + +static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + u64 ret_offset, start_offset, end_offset = 0; + struct nlattr *attrs[DEVLINK_ATTR_MAX + 1]; + const struct genl_ops *ops = cb->data; + struct devlink_region *region; + struct nlattr *chunks_attr; + const char *region_name; + struct devlink *devlink; + bool dump = true; + void *hdr; + int err; + + start_offset = *((u64 *)&cb->args[0]); + + err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize, + attrs, DEVLINK_ATTR_MAX, ops->policy, NULL); + if (err) + goto out; + + devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs); + if (IS_ERR(devlink)) + goto out; + + mutex_lock(&devlink_mutex); + mutex_lock(&devlink->lock); + + if (!attrs[DEVLINK_ATTR_REGION_NAME] || + !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) + goto out_unlock; + + region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]); + region = devlink_region_get_by_name(devlink, region_name); + if (!region) + goto out_unlock; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, + DEVLINK_CMD_REGION_READ); + if (!hdr) + goto out_unlock; + + err = devlink_nl_put_handle(skb, devlink); + if (err) + goto nla_put_failure; + + err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name); + if (err) + goto nla_put_failure; + + chunks_attr = nla_nest_start(skb, DEVLINK_ATTR_REGION_CHUNKS); + if (!chunks_attr) + goto nla_put_failure; + + if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] && + attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) { + if (!start_offset) + start_offset = + nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); + + end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); + end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]); + dump = false; + } + + err = devlink_nl_region_read_snapshot_fill(skb, devlink, + region, attrs, + start_offset, + end_offset, dump, + &ret_offset); + + if (err && err != -EMSGSIZE) + goto nla_put_failure; + + /* Check if there was any progress done to prevent infinite loop */ + if (ret_offset == start_offset) + goto nla_put_failure; + + *((u64 *)&cb->args[0]) = ret_offset; + + nla_nest_end(skb, chunks_attr); + genlmsg_end(skb, hdr); + mutex_unlock(&devlink->lock); + mutex_unlock(&devlink_mutex); + + return skb->len; + +nla_put_failure: + genlmsg_cancel(skb, hdr); +out_unlock: + mutex_unlock(&devlink->lock); + mutex_unlock(&devlink_mutex); +out: + return 0; +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, @@ -2624,6 +3588,11 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 }, [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64}, [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64}, + [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 }, }; static const struct genl_ops devlink_nl_ops[] = { @@ -2807,6 +3776,43 @@ static const struct genl_ops devlink_nl_ops[] = { .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | DEVLINK_NL_FLAG_NO_LOCK, }, + { + .cmd = DEVLINK_CMD_PARAM_GET, + .doit = devlink_nl_cmd_param_get_doit, + .dumpit = devlink_nl_cmd_param_get_dumpit, + .policy = devlink_nl_policy, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = DEVLINK_CMD_PARAM_SET, + .doit = devlink_nl_cmd_param_set_doit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, + { + .cmd = DEVLINK_CMD_REGION_GET, + .doit = devlink_nl_cmd_region_get_doit, + .dumpit = devlink_nl_cmd_region_get_dumpit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, + { + .cmd = DEVLINK_CMD_REGION_DEL, + .doit = devlink_nl_cmd_region_del, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, + { + .cmd = DEVLINK_CMD_REGION_READ, + .dumpit = devlink_nl_cmd_region_read_dumpit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, }; static struct genl_family devlink_nl_family __ro_after_init = { @@ -2845,6 +3851,8 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size) INIT_LIST_HEAD(&devlink->sb_list); INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); INIT_LIST_HEAD(&devlink->resource_list); + INIT_LIST_HEAD(&devlink->param_list); + INIT_LIST_HEAD(&devlink->region_list); mutex_init(&devlink->lock); return devlink; } @@ -3434,6 +4442,320 @@ out: } EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister); +/** + * devlink_params_register - register configuration parameters + * + * @devlink: devlink + * @params: configuration parameters array + * @params_count: number of parameters provided + * + * Register the configuration parameters supported by the driver. + */ +int devlink_params_register(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + const struct devlink_param *param = params; + int i; + int err; + + mutex_lock(&devlink->lock); + for (i = 0; i < params_count; i++, param++) { + if (!param || !param->name || !param->supported_cmodes) { + err = -EINVAL; + goto rollback; + } + if (param->generic) { + err = devlink_param_generic_verify(param); + if (err) + goto rollback; + } else { + err = devlink_param_driver_verify(param); + if (err) + goto rollback; + } + err = devlink_param_register_one(devlink, param); + if (err) + goto rollback; + } + + mutex_unlock(&devlink->lock); + return 0; + +rollback: + if (!i) + goto unlock; + for (param--; i > 0; i--, param--) + devlink_param_unregister_one(devlink, param); +unlock: + mutex_unlock(&devlink->lock); + return err; +} +EXPORT_SYMBOL_GPL(devlink_params_register); + +/** + * devlink_params_unregister - unregister configuration parameters + * @devlink: devlink + * @params: configuration parameters to unregister + * @params_count: number of parameters provided + */ +void devlink_params_unregister(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + const struct devlink_param *param = params; + int i; + + mutex_lock(&devlink->lock); + for (i = 0; i < params_count; i++, param++) + devlink_param_unregister_one(devlink, param); + mutex_unlock(&devlink->lock); +} +EXPORT_SYMBOL_GPL(devlink_params_unregister); + +/** + * devlink_param_driverinit_value_get - get configuration parameter + * value for driver initializing + * + * @devlink: devlink + * @param_id: parameter ID + * @init_val: value of parameter in driverinit configuration mode + * + * This function should be used by the driver to get driverinit + * configuration for initialization after reload command. + */ +int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, + union devlink_param_value *init_val) +{ + struct devlink_param_item *param_item; + + if (!devlink->ops || !devlink->ops->reload) + return -EOPNOTSUPP; + + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); + if (!param_item) + return -EINVAL; + + if (!param_item->driverinit_value_valid || + !devlink_param_cmode_is_supported(param_item->param, + DEVLINK_PARAM_CMODE_DRIVERINIT)) + return -EOPNOTSUPP; + + *init_val = param_item->driverinit_value; + + return 0; +} +EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get); + +/** + * devlink_param_driverinit_value_set - set value of configuration + * parameter for driverinit + * configuration mode + * + * @devlink: devlink + * @param_id: parameter ID + * @init_val: value of parameter to set for driverinit configuration mode + * + * This function should be used by the driver to set driverinit + * configuration mode default value. + */ +int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val) +{ + struct devlink_param_item *param_item; + + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); + if (!param_item) + return -EINVAL; + + if (!devlink_param_cmode_is_supported(param_item->param, + DEVLINK_PARAM_CMODE_DRIVERINIT)) + return -EOPNOTSUPP; + + param_item->driverinit_value = init_val; + param_item->driverinit_value_valid = true; + + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); + return 0; +} +EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set); + +/** + * devlink_param_value_changed - notify devlink on a parameter's value + * change. Should be called by the driver + * right after the change. + * + * @devlink: devlink + * @param_id: parameter ID + * + * This function should be used by the driver to notify devlink on value + * change, excluding driverinit configuration mode. + * For driverinit configuration mode driver should use the function + * devlink_param_driverinit_value_set() instead. + */ +void devlink_param_value_changed(struct devlink *devlink, u32 param_id) +{ + struct devlink_param_item *param_item; + + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); + WARN_ON(!param_item); + + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); +} +EXPORT_SYMBOL_GPL(devlink_param_value_changed); + +/** + * devlink_region_create - create a new address region + * + * @devlink: devlink + * @region_name: region name + * @region_max_snapshots: Maximum supported number of snapshots for region + * @region_size: size of region + */ +struct devlink_region *devlink_region_create(struct devlink *devlink, + const char *region_name, + u32 region_max_snapshots, + u64 region_size) +{ + struct devlink_region *region; + int err = 0; + + mutex_lock(&devlink->lock); + + if (devlink_region_get_by_name(devlink, region_name)) { + err = -EEXIST; + goto unlock; + } + + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) { + err = -ENOMEM; + goto unlock; + } + + region->devlink = devlink; + region->max_snapshots = region_max_snapshots; + region->name = region_name; + region->size = region_size; + INIT_LIST_HEAD(®ion->snapshot_list); + list_add_tail(®ion->list, &devlink->region_list); + devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW); + + mutex_unlock(&devlink->lock); + return region; + +unlock: + mutex_unlock(&devlink->lock); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(devlink_region_create); + +/** + * devlink_region_destroy - destroy address region + * + * @region: devlink region to destroy + */ +void devlink_region_destroy(struct devlink_region *region) +{ + struct devlink *devlink = region->devlink; + struct devlink_snapshot *snapshot, *ts; + + mutex_lock(&devlink->lock); + + /* Free all snapshots of region */ + list_for_each_entry_safe(snapshot, ts, ®ion->snapshot_list, list) + devlink_region_snapshot_del(snapshot); + + list_del(®ion->list); + + devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL); + mutex_unlock(&devlink->lock); + kfree(region); +} +EXPORT_SYMBOL_GPL(devlink_region_destroy); + +/** + * devlink_region_shapshot_id_get - get snapshot ID + * + * This callback should be called when adding a new snapshot, + * Driver should use the same id for multiple snapshots taken + * on multiple regions at the same time/by the same trigger. + * + * @devlink: devlink + */ +u32 devlink_region_shapshot_id_get(struct devlink *devlink) +{ + u32 id; + + mutex_lock(&devlink->lock); + id = ++devlink->snapshot_id; + mutex_unlock(&devlink->lock); + + return id; +} +EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get); + +/** + * devlink_region_snapshot_create - create a new snapshot + * This will add a new snapshot of a region. The snapshot + * will be stored on the region struct and can be accessed + * from devlink. This is useful for future analyses of snapshots. + * Multiple snapshots can be created on a region. + * The @snapshot_id should be obtained using the getter function. + * + * @devlink_region: devlink region of the snapshot + * @data_len: size of snapshot data + * @data: snapshot data + * @snapshot_id: snapshot id to be created + * @data_destructor: pointer to destructor function to free data + */ +int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, + u8 *data, u32 snapshot_id, + devlink_snapshot_data_dest_t *data_destructor) +{ + struct devlink *devlink = region->devlink; + struct devlink_snapshot *snapshot; + int err; + + mutex_lock(&devlink->lock); + + /* check if region can hold one more snapshot */ + if (region->cur_snapshots == region->max_snapshots) { + err = -ENOMEM; + goto unlock; + } + + if (devlink_region_snapshot_get_by_id(region, snapshot_id)) { + err = -EEXIST; + goto unlock; + } + + snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL); + if (!snapshot) { + err = -ENOMEM; + goto unlock; + } + + snapshot->id = snapshot_id; + snapshot->region = region; + snapshot->data = data; + snapshot->data_len = data_len; + snapshot->data_destructor = data_destructor; + + list_add_tail(&snapshot->list, ®ion->snapshot_list); + + region->cur_snapshots++; + + devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW); + mutex_unlock(&devlink->lock); + return 0; + +unlock: + mutex_unlock(&devlink->lock); + return err; +} +EXPORT_SYMBOL_GPL(devlink_region_snapshot_create); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index e677a20180cf..c9993c6c2fd4 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -111,6 +111,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload", [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record", [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload", + [NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload", }; static const char diff --git a/net/core/filter.c b/net/core/filter.c index 06da770f543f..104d560946da 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3679,7 +3679,7 @@ BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, if (unlikely(size > IP_TUNNEL_OPTS_MAX)) return -ENOMEM; - ip_tunnel_info_opts_set(info, from, size); + ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); return 0; } @@ -4751,6 +4751,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); + /* else: fall through */ default: return NULL; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 53f96e4f7bf5..08a5184f4b34 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -152,7 +152,9 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL) && !dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS)) + FLOW_DISSECTOR_KEY_ENC_PORTS) && + !dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_ENC_IP)) return; info = skb_tunnel_info(skb); @@ -212,6 +214,16 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, tp->src = key->tp_src; tp->dst = key->tp_dst; } + + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_dissector_key_ip *ip; + + ip = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_ENC_IP, + target_container); + ip->tos = key->tos; + ip->ttl = key->ttl; + } } EXPORT_SYMBOL(skb_flow_dissect_tunnel_info); @@ -589,7 +601,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_vlan *key_vlan; enum flow_dissect_ret fdret; - bool skip_vlan = false; + enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; int num_hdrs = 0; u8 ip_proto = 0; bool ret; @@ -748,14 +760,14 @@ proto_again: } case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { - const struct vlan_hdr *vlan; + const struct vlan_hdr *vlan = NULL; struct vlan_hdr _vlan; - bool vlan_tag_present = skb && skb_vlan_tag_present(skb); + __be16 saved_vlan_tpid = proto; - if (vlan_tag_present) + if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX && + skb && skb_vlan_tag_present(skb)) { proto = skb->protocol; - - if (!vlan_tag_present || eth_type_vlan(skb->protocol)) { + } else { vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); if (!vlan) { @@ -765,20 +777,23 @@ proto_again: proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); - if (skip_vlan) { - fdret = FLOW_DISSECT_RET_PROTO_AGAIN; - break; - } } - skip_vlan = true; - if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_VLAN)) { + if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) { + dissector_vlan = FLOW_DISSECTOR_KEY_VLAN; + } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) { + dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN; + } else { + fdret = FLOW_DISSECT_RET_PROTO_AGAIN; + break; + } + + if (dissector_uses_key(flow_dissector, dissector_vlan)) { key_vlan = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_VLAN, + dissector_vlan, target_container); - if (vlan_tag_present) { + if (!vlan) { key_vlan->vlan_id = skb_vlan_tag_get_id(skb); key_vlan->vlan_priority = (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); @@ -789,6 +804,7 @@ proto_again: (ntohs(vlan->h_vlan_TCI) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } + key_vlan->vlan_tpid = saved_vlan_tpid; } fdret = FLOW_DISSECT_RET_PROTO_AGAIN; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8e3fda9e725c..cbe85d8d4cc2 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1148,7 +1148,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh->nud_state = new; err = 0; notify = old & NUD_VALID; - if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && + if (((old & (NUD_INCOMPLETE | NUD_PROBE)) || + (flags & NEIGH_UPDATE_F_ADMIN)) && (new & NUD_FAILED)) { neigh_invalidate(neigh); notify = 1; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index bb7e80f4ced3..ffa1d18f2c2c 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1047,13 +1047,30 @@ static ssize_t traffic_class_show(struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; - int index = get_netdev_queue_index(queue); - int tc = netdev_txq_to_tc(dev, index); + int index; + int tc; + if (!netif_is_multiqueue(dev)) + return -ENOENT; + + index = get_netdev_queue_index(queue); + + /* If queue belongs to subordinate dev use its TC mapping */ + dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + + tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; - return sprintf(buf, "%u\n", tc); + /* We can report the traffic class one of two ways: + * Subordinate device traffic classes are reported with the traffic + * class first, and then the subordinate class so for example TC0 on + * subordinate device 2 will be reported as "0-2". If the queue + * belongs to the root device it will be reported with just the + * traffic class, so just "0" for TC 0 for example. + */ + return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) : + sprintf(buf, "%u\n", tc); } #ifdef CONFIG_XPS @@ -1214,10 +1231,20 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, cpumask_var_t mask; unsigned long index; + if (!netif_is_multiqueue(dev)) + return -ENOENT; + index = get_netdev_queue_index(queue); if (dev->num_tc) { + /* Do not allow XPS on subordinate device directly */ num_tc = dev->num_tc; + if (num_tc < 0) + return -EINVAL; + + /* If queue belongs to subordinate dev use its map */ + dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; @@ -1227,13 +1254,13 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, return -ENOMEM; rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_maps); + dev_maps = rcu_dereference(dev->xps_cpus_map); if (dev_maps) { for_each_possible_cpu(cpu) { int i, tci = cpu * num_tc + tc; struct xps_map *map; - map = rcu_dereference(dev_maps->cpu_map[tci]); + map = rcu_dereference(dev_maps->attr_map[tci]); if (!map) continue; @@ -1260,6 +1287,9 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue, cpumask_var_t mask; int err; + if (!netif_is_multiqueue(dev)) + return -ENOENT; + if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -1283,6 +1313,88 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue, static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init = __ATTR_RW(xps_cpus); + +static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) +{ + struct net_device *dev = queue->dev; + struct xps_dev_maps *dev_maps; + unsigned long *mask, index; + int j, len, num_tc = 1, tc = 0; + + index = get_netdev_queue_index(queue); + + if (dev->num_tc) { + num_tc = dev->num_tc; + tc = netdev_txq_to_tc(dev, index); + if (tc < 0) + return -EINVAL; + } + mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long), + GFP_KERNEL); + if (!mask) + return -ENOMEM; + + rcu_read_lock(); + dev_maps = rcu_dereference(dev->xps_rxqs_map); + if (!dev_maps) + goto out_no_maps; + + for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues), + j < dev->num_rx_queues;) { + int i, tci = j * num_tc + tc; + struct xps_map *map; + + map = rcu_dereference(dev_maps->attr_map[tci]); + if (!map) + continue; + + for (i = map->len; i--;) { + if (map->queues[i] == index) { + set_bit(j, mask); + break; + } + } + } +out_no_maps: + rcu_read_unlock(); + + len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues); + kfree(mask); + + return len < PAGE_SIZE ? len : -EINVAL; +} + +static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, + size_t len) +{ + struct net_device *dev = queue->dev; + struct net *net = dev_net(dev); + unsigned long *mask, index; + int err; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long), + GFP_KERNEL); + if (!mask) + return -ENOMEM; + + index = get_netdev_queue_index(queue); + + err = bitmap_parse(buf, len, mask, dev->num_rx_queues); + if (err) { + kfree(mask); + return err; + } + + err = __netif_set_xps_queue(dev, mask, index, true); + kfree(mask); + return err ? : len; +} + +static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init + = __ATTR_RW(xps_rxqs); #endif /* CONFIG_XPS */ static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { @@ -1290,6 +1402,7 @@ static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { &queue_traffic_class.attr, #ifdef CONFIG_XPS &xps_cpus_attribute.attr, + &xps_rxqs_attribute.attr, &queue_tx_maxrate.attr, #endif NULL diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 49368e21d228..308ed04984de 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1265,7 +1265,7 @@ static ssize_t pktgen_if_write(struct file *file, buf[len] = 0; if (strcmp(buf, pkt_dev->dst_min) != 0) { memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); - strncpy(pkt_dev->dst_min, buf, len); + strcpy(pkt_dev->dst_min, buf); pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); pkt_dev->cur_daddr = pkt_dev->daddr_min; } @@ -1280,14 +1280,12 @@ static ssize_t pktgen_if_write(struct file *file, if (len < 0) return len; - if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; - buf[len] = 0; if (strcmp(buf, pkt_dev->dst_max) != 0) { memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); - strncpy(pkt_dev->dst_max, buf, len); + strcpy(pkt_dev->dst_max, buf); pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); pkt_dev->cur_daddr = pkt_dev->daddr_max; } @@ -1396,7 +1394,7 @@ static ssize_t pktgen_if_write(struct file *file, buf[len] = 0; if (strcmp(buf, pkt_dev->src_min) != 0) { memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); - strncpy(pkt_dev->src_min, buf, len); + strcpy(pkt_dev->src_min, buf); pkt_dev->saddr_min = in_aton(pkt_dev->src_min); pkt_dev->cur_saddr = pkt_dev->saddr_min; } @@ -1416,7 +1414,7 @@ static ssize_t pktgen_if_write(struct file *file, buf[len] = 0; if (strcmp(buf, pkt_dev->src_max) != 0) { memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); - strncpy(pkt_dev->src_max, buf, len); + strcpy(pkt_dev->src_max, buf); pkt_dev->saddr_max = in_aton(pkt_dev->src_max); pkt_dev->cur_saddr = pkt_dev->saddr_max; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5ef61222fdef..92b6fa5d5f6e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -964,7 +964,8 @@ static size_t rtnl_xdp_size(void) { size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ nla_total_size(1) + /* XDP_ATTACHED */ - nla_total_size(4); /* XDP_PROG_ID */ + nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ + nla_total_size(4); /* XDP_<mode>_PROG_ID */ return xdp_size; } @@ -1353,27 +1354,51 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) return 0; } -static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id) +static u32 rtnl_xdp_prog_skb(struct net_device *dev) { - const struct net_device_ops *ops = dev->netdev_ops; const struct bpf_prog *generic_xdp_prog; - struct netdev_bpf xdp; ASSERT_RTNL(); - *prog_id = 0; generic_xdp_prog = rtnl_dereference(dev->xdp_prog); - if (generic_xdp_prog) { - *prog_id = generic_xdp_prog->aux->id; - return XDP_ATTACHED_SKB; - } - if (!ops->ndo_bpf) - return XDP_ATTACHED_NONE; + if (!generic_xdp_prog) + return 0; + return generic_xdp_prog->aux->id; +} + +static u32 rtnl_xdp_prog_drv(struct net_device *dev) +{ + return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG); +} + +static u32 rtnl_xdp_prog_hw(struct net_device *dev) +{ + return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, + XDP_QUERY_PROG_HW); +} + +static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, + u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, + u32 (*get_prog_id)(struct net_device *dev)) +{ + u32 curr_id; + int err; + + curr_id = get_prog_id(dev); + if (!curr_id) + return 0; - __dev_xdp_query(dev, ops->ndo_bpf, &xdp); - *prog_id = xdp.prog_id; + *prog_id = curr_id; + err = nla_put_u32(skb, attr, curr_id); + if (err) + return err; + + if (*mode != XDP_ATTACHED_NONE) + *mode = XDP_ATTACHED_MULTI; + else + *mode = tgt_mode; - return xdp.prog_attached; + return 0; } static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) @@ -1381,17 +1406,32 @@ static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) struct nlattr *xdp; u32 prog_id; int err; + u8 mode; xdp = nla_nest_start(skb, IFLA_XDP); if (!xdp) return -EMSGSIZE; - err = nla_put_u8(skb, IFLA_XDP_ATTACHED, - rtnl_xdp_attached_mode(dev, &prog_id)); + prog_id = 0; + mode = XDP_ATTACHED_NONE; + err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, + IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); + if (err) + goto err_cancel; + err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, + IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); + if (err) + goto err_cancel; + err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, + IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); + if (err) + goto err_cancel; + + err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); if (err) goto err_cancel; - if (prog_id) { + if (prog_id && mode != XDP_ATTACHED_MULTI) { err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); if (err) goto err_cancel; @@ -2759,9 +2799,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) return err; } - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; - - __dev_notify_flags(dev, old_flags, ~0U); + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { + __dev_notify_flags(dev, old_flags, 0U); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); + } return 0; } EXPORT_SYMBOL(rtnl_configure_link); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e51f8555e11..0c1a00672ba9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3816,14 +3816,14 @@ err: } EXPORT_SYMBOL_GPL(skb_segment); -int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) { struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); - struct sk_buff *lp, *p = *head; unsigned int delta_truesize; + struct sk_buff *lp; if (unlikely(p->len + len >= 65536)) return -E2BIG; @@ -4899,7 +4899,6 @@ EXPORT_SYMBOL(skb_try_coalesce); */ void skb_scrub_packet(struct sk_buff *skb, bool xnet) { - skb->tstamp = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; skb->ignore_df = 0; @@ -4912,8 +4911,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) return; ipvs_reset(skb); - skb_orphan(skb); skb->mark = 0; + skb->tstamp = 0; } EXPORT_SYMBOL_GPL(skb_scrub_packet); diff --git a/net/core/sock.c b/net/core/sock.c index 9e8f65585b81..03fdea5b0f57 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -91,6 +91,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/unaligned.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/errqueue.h> @@ -697,6 +698,7 @@ EXPORT_SYMBOL(sk_mc_loop); int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { + struct sock_txtime sk_txtime; struct sock *sk = sock->sk; int val; int valbool; @@ -1070,6 +1072,26 @@ set_rcvbuf: } break; + case SO_TXTIME: + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + ret = -EPERM; + } else if (optlen != sizeof(struct sock_txtime)) { + ret = -EINVAL; + } else if (copy_from_user(&sk_txtime, optval, + sizeof(struct sock_txtime))) { + ret = -EFAULT; + } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { + ret = -EINVAL; + } else { + sock_valbool_flag(sk, SOCK_TXTIME, true); + sk->sk_clockid = sk_txtime.clockid; + sk->sk_txtime_deadline_mode = + !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); + sk->sk_txtime_report_errors = + !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); + } + break; + default: ret = -ENOPROTOOPT; break; @@ -1115,6 +1137,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, u64 val64; struct linger ling; struct timeval tm; + struct sock_txtime txtime; } v; int lv = sizeof(int); @@ -1403,6 +1426,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sock_flag(sk, SOCK_ZEROCOPY); break; + case SO_TXTIME: + lv = sizeof(v.txtime); + v.txtime.clockid = sk->sk_clockid; + v.txtime.flags |= sk->sk_txtime_deadline_mode ? + SOF_TXTIME_DEADLINE_MODE : 0; + v.txtime.flags |= sk->sk_txtime_report_errors ? + SOF_TXTIME_REPORT_ERRORS : 0; + break; + default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). @@ -2137,6 +2169,13 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; sockc->tsflags |= tsflags; break; + case SCM_TXTIME: + if (!sock_flag(sk, SOCK_TXTIME)) + return -EINVAL; + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) + return -EINVAL; + sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); + break; /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ case SCM_RIGHTS: case SCM_CREDENTIALS: @@ -2401,9 +2440,10 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) { struct proto *prot = sk->sk_prot; long allocated = sk_memory_allocated_add(sk, amt); + bool charged = true; if (mem_cgroup_sockets_enabled && sk->sk_memcg && - !mem_cgroup_charge_skmem(sk->sk_memcg, amt)) + !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt))) goto suppress_allocation; /* Under limit. */ @@ -2461,7 +2501,8 @@ suppress_allocation: return 1; } - trace_sock_exceed_buf_limit(sk, prot, allocated); + if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) + trace_sock_exceed_buf_limit(sk, prot, allocated, kind); sk_memory_allocated_sub(sk, amt); @@ -2818,6 +2859,8 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_pacing_rate = ~0U; sk->sk_pacing_shift = 10; sk->sk_incoming_cpu = -1; + + sk_rx_queue_clear(sk); /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff --git a/net/core/xdp.c b/net/core/xdp.c index 9d1f22072d5d..57285383ed00 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -3,8 +3,11 @@ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. * Released under terms in GPL version 2. See COPYING. */ +#include <linux/bpf.h> +#include <linux/filter.h> #include <linux/types.h> #include <linux/mm.h> +#include <linux/netdevice.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/rhashtable.h> @@ -45,8 +48,8 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) != sizeof(u32)); - /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */ - return key << RHT_HASH_RESERVED_SPACE; + /* Use cyclic increasing ID as direct hash key */ + return key; } static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, @@ -370,3 +373,34 @@ void xdp_return_buff(struct xdp_buff *xdp) __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); } EXPORT_SYMBOL_GPL(xdp_return_buff); + +int xdp_attachment_query(struct xdp_attachment_info *info, + struct netdev_bpf *bpf) +{ + bpf->prog_id = info->prog ? info->prog->aux->id : 0; + bpf->prog_flags = info->prog ? info->flags : 0; + return 0; +} +EXPORT_SYMBOL_GPL(xdp_attachment_query); + +bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, + struct netdev_bpf *bpf) +{ + if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) { + NL_SET_ERR_MSG(bpf->extack, + "program loaded with different flags"); + return false; + } + return true; +} +EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok); + +void xdp_attachment_setup(struct xdp_attachment_info *info, + struct netdev_bpf *bpf) +{ + if (info->prog) + bpf_prog_put(info->prog); + info->prog = bpf->prog; + info->flags = bpf->flags; +} +EXPORT_SYMBOL_GPL(xdp_attachment_setup); diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 1b2120645730..34aba55ed573 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -491,6 +491,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) break; case DN_RUN: sk->sk_shutdown |= SHUTDOWN_MASK; + /* fall through */ case DN_CC: scp->state = DN_CN; } diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index dc5d9af3dc80..a1917025e155 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -775,6 +775,20 @@ struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n) if (!ds) return NULL; + /* We avoid allocating memory outside dsa_switch + * if it is not needed. + */ + if (n <= sizeof(ds->_bitmap) * 8) { + ds->bitmap = &ds->_bitmap; + } else { + ds->bitmap = devm_kcalloc(dev, + BITS_TO_LONGS(n), + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!ds->bitmap)) + return NULL; + } + ds->dev = dev; ds->num_ports = n; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1e3b6a6d8a40..71536c435132 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -900,7 +900,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, cb, dev, dev); + return tcf_block_cb_register(f->block, cb, dev, dev, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cb, dev); return 0; diff --git a/net/dsa/switch.c b/net/dsa/switch.c index b93511726069..142b294d3446 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -136,21 +136,20 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds, { const struct switchdev_obj_port_mdb *mdb = info->mdb; struct switchdev_trans *trans = info->trans; - DECLARE_BITMAP(group, ds->num_ports); int port; /* Build a mask of Multicast group members */ - bitmap_zero(group, ds->num_ports); + bitmap_zero(ds->bitmap, ds->num_ports); if (ds->index == info->sw_index) - set_bit(info->port, group); + set_bit(info->port, ds->bitmap); for (port = 0; port < ds->num_ports; port++) if (dsa_is_dsa_port(ds, port)) - set_bit(port, group); + set_bit(port, ds->bitmap); if (switchdev_trans_ph_prepare(trans)) - return dsa_switch_mdb_prepare_bitmap(ds, mdb, group); + return dsa_switch_mdb_prepare_bitmap(ds, mdb, ds->bitmap); - dsa_switch_mdb_add_bitmap(ds, mdb, group); + dsa_switch_mdb_add_bitmap(ds, mdb, ds->bitmap); return 0; } @@ -204,21 +203,20 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds, { const struct switchdev_obj_port_vlan *vlan = info->vlan; struct switchdev_trans *trans = info->trans; - DECLARE_BITMAP(members, ds->num_ports); int port; /* Build a mask of VLAN members */ - bitmap_zero(members, ds->num_ports); + bitmap_zero(ds->bitmap, ds->num_ports); if (ds->index == info->sw_index) - set_bit(info->port, members); + set_bit(info->port, ds->bitmap); for (port = 0; port < ds->num_ports; port++) if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) - set_bit(port, members); + set_bit(port, ds->bitmap); if (switchdev_trans_ph_prepare(trans)) - return dsa_switch_vlan_prepare_bitmap(ds, vlan, members); + return dsa_switch_vlan_prepare_bitmap(ds, vlan, ds->bitmap); - dsa_switch_vlan_add_bitmap(ds, vlan, members); + dsa_switch_vlan_add_bitmap(ds, vlan, ds->bitmap); return 0; } diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index ee28440f57c5..fd8faa0dfa61 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -427,13 +427,13 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) } EXPORT_SYMBOL(sysfs_format_mac); -struct sk_buff **eth_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; - struct ethhdr *eh, *eh2; - unsigned int hlen, off_eth; const struct packet_offload *ptype; + unsigned int hlen, off_eth; + struct sk_buff *pp = NULL; + struct ethhdr *eh, *eh2; + struct sk_buff *p; __be16 type; int flush = 1; @@ -448,7 +448,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, flush = 0; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index b403499fdabe..f2a0a3bab6b5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -229,6 +229,7 @@ int inet_listen(struct socket *sock, int backlog) err = inet_csk_listen_start(sk, backlog); if (err) goto out; + tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL); } sk->sk_max_ack_backlog = backlog; err = 0; @@ -1384,12 +1385,12 @@ out: } EXPORT_SYMBOL(inet_gso_segment); -struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) +struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; - struct sk_buff **pp = NULL; - struct sk_buff *p; + struct sk_buff *pp = NULL; const struct iphdr *iph; + struct sk_buff *p; unsigned int hlen; unsigned int off; unsigned int id; @@ -1425,7 +1426,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF)); id >>= 16; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { struct iphdr *iph2; u16 flush_id; @@ -1505,8 +1506,8 @@ out: } EXPORT_SYMBOL(inet_gro_receive); -static struct sk_buff **ipip_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *ipip_gro_receive(struct list_head *head, + struct sk_buff *skb) { if (NAPI_GRO_CB(skb)->encap_mark) { NAPI_GRO_CB(skb)->flush = 1; @@ -1882,6 +1883,7 @@ fs_initcall(ipv4_offload_init); static struct packet_type ip_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IP), .func = ip_rcv, + .list_func = ip_list_rcv, }; static int __init inet_init(void) diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 7cf755ef9efb..bbeecd13e534 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -28,8 +28,8 @@ #include <linux/spinlock.h> #include <net/udp.h> -static struct sk_buff **esp4_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *esp4_gro_receive(struct list_head *head, + struct sk_buff *skb) { int offset = skb_gro_offset(skb); struct xfrm_offload *xo; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index c9ec1603666b..500a59906b87 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -224,14 +224,14 @@ drop: return 0; } -static struct sk_buff **fou_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *fou_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - const struct net_offload *ops; - struct sk_buff **pp = NULL; u8 proto = fou_from_sock(sk)->protocol; const struct net_offload **offloads; + const struct net_offload *ops; + struct sk_buff *pp = NULL; /* We can clear the encap_mark for FOU as we are essentially doing * one of two possible things. We are either adding an L4 tunnel @@ -305,13 +305,13 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, return guehdr; } -static struct sk_buff **gue_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *gue_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { const struct net_offload **offloads; const struct net_offload *ops; - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; struct guehdr *guehdr; size_t len, optlen, hdrlen, off; @@ -397,7 +397,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk, skb_gro_pull(skb, hdrlen); - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { const struct guehdr *guehdr2; if (!NAPI_GRO_CB(p)->same_flow) diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 6a7d980105f6..6c63524f598a 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -108,10 +108,10 @@ out: return segs; } -static struct sk_buff **gre_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *gre_gro_receive(struct list_head *head, + struct sk_buff *skb) { - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; const struct gre_base_hdr *greh; unsigned int hlen, grehlen; @@ -182,7 +182,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, null_compute_pseudo); } - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { const struct gre_base_hdr *greh2; if (!NAPI_GRO_CB(p)->same_flow) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 1617604c9284..695979b7ef6d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -429,14 +429,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) icmp_param->data.icmph.checksum = 0; + ipcm_init(&ipc); inet->tos = ip_hdr(skb)->tos; sk->sk_mark = mark; daddr = ipc.addr = ip_hdr(skb)->saddr; saddr = fib_compute_spec_dst(skb); - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; if (icmp_param->replyopts.opt.opt.optlen) { ipc.opt = &icmp_param->replyopts.opt; @@ -710,11 +707,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; sk->sk_mark = mark; + ipcm_init(&ipc); ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, type, code, &icmp_param); diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 1e4cf3ab560f..d3162baca9f1 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -20,6 +20,7 @@ #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/slab.h> +#include <linux/rhashtable.h> #include <net/sock.h> #include <net/inet_frag.h> diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2d8efeecf619..c8ca5d8f0f75 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -587,6 +587,8 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, goto err_free_skb; key = &tun_info->key; + if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) + goto err_free_rt; md = ip_tunnel_info_opts(tun_info); if (!md) goto err_free_rt; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 7582713dd18f..3196cf58f418 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -307,7 +307,8 @@ drop: return true; } -static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +static int ip_rcv_finish_core(struct net *net, struct sock *sk, + struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); int (*edemux)(struct sk_buff *skb); @@ -315,13 +316,6 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) struct rtable *rt; int err; - /* if ingress device is enslaved to an L3 master device pass the - * skb to its handler for processing - */ - skb = l3mdev_ip_rcv(skb); - if (!skb) - return NET_RX_SUCCESS; - if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk && @@ -393,7 +387,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - return dst_input(skb); + return NET_RX_SUCCESS; drop: kfree_skb(skb); @@ -405,13 +399,29 @@ drop_error: goto drop; } +static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + int ret; + + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip_rcv(skb); + if (!skb) + return NET_RX_SUCCESS; + + ret = ip_rcv_finish_core(net, sk, skb); + if (ret != NET_RX_DROP) + ret = dst_input(skb); + return ret; +} + /* * Main IP Receive routine. */ -int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) { const struct iphdr *iph; - struct net *net; u32 len; /* When the interface is in promisc. mode, drop all the crap @@ -421,7 +431,6 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, goto drop; - net = dev_net(dev); __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); skb = skb_share_check(skb, GFP_ATOMIC); @@ -489,9 +498,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, /* Must drop socket now because of tproxy. */ skb_orphan(skb); - return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, - net, NULL, skb, dev, NULL, - ip_rcv_finish); + return skb; csum_error: __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); @@ -500,5 +507,113 @@ inhdr_error: drop: kfree_skb(skb); out: - return NET_RX_DROP; + return NULL; +} + +/* + * IP receive entry point + */ +int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, + struct net_device *orig_dev) +{ + struct net *net = dev_net(dev); + + skb = ip_rcv_core(skb, net); + if (skb == NULL) + return NET_RX_DROP; + return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, + net, NULL, skb, dev, NULL, + ip_rcv_finish); +} + +static void ip_sublist_rcv_finish(struct list_head *head) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) { + list_del(&skb->list); + /* Handle ip{6}_forward case, as sch_direct_xmit have + * another kind of SKB-list usage (see validate_xmit_skb_list) + */ + skb->next = NULL; + dst_input(skb); + } +} + +static void ip_list_rcv_finish(struct net *net, struct sock *sk, + struct list_head *head) +{ + struct dst_entry *curr_dst = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + struct dst_entry *dst; + + list_del(&skb->list); + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip_rcv(skb); + if (!skb) + continue; + if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP) + continue; + + dst = skb_dst(skb); + if (curr_dst != dst) { + /* dispatch old sublist */ + if (!list_empty(&sublist)) + ip_sublist_rcv_finish(&sublist); + /* start new sublist */ + INIT_LIST_HEAD(&sublist); + curr_dst = dst; + } + list_add_tail(&skb->list, &sublist); + } + /* dispatch final sublist */ + ip_sublist_rcv_finish(&sublist); +} + +static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, + struct net *net) +{ + NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL, + head, dev, NULL, ip_rcv_finish); + ip_list_rcv_finish(net, NULL, head); +} + +/* Receive a list of IP packets */ +void ip_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev) +{ + struct net_device *curr_dev = NULL; + struct net *curr_net = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + struct net_device *dev = skb->dev; + struct net *net = dev_net(dev); + + list_del(&skb->list); + skb = ip_rcv_core(skb, net); + if (skb == NULL) + continue; + + if (curr_dev != dev || curr_net != net) { + /* dispatch old sublist */ + if (!list_empty(&sublist)) + ip_sublist_rcv(&sublist, curr_dev, curr_net); + /* start new sublist */ + INIT_LIST_HEAD(&sublist); + curr_dev = dev; + curr_net = net; + } + list_add_tail(&skb->list, &sublist); + } + /* dispatch final sublist */ + ip_sublist_rcv(&sublist, curr_dev, curr_net); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b3308e9d9762..e2b6bd478afb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -423,7 +423,8 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) } /* Note: skb->sk can be different from sk, in case of tunnels */ -int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) +int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, + __u8 tos) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); @@ -462,7 +463,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) inet->inet_dport, inet->inet_sport, sk->sk_protocol, - RT_CONN_FLAGS(sk), + RT_CONN_FLAGS_TOS(sk, tos), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; @@ -478,7 +479,7 @@ packet_routed: skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); - *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); + *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff)); if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) iph->frag_off = htons(IP_DF); else @@ -511,7 +512,7 @@ no_route: kfree_skb(skb); return -EHOSTUNREACH; } -EXPORT_SYMBOL(ip_queue_xmit); +EXPORT_SYMBOL(__ip_queue_xmit); static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) { @@ -1145,14 +1146,15 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->fragsize = ip_sk_use_pmtu(sk) ? dst_mtu(&rt->dst) : rt->dst.dev->mtu; - cork->gso_size = sk->sk_type == SOCK_DGRAM && - sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0; + cork->gso_size = ipc->gso_size; cork->dst = &rt->dst; cork->length = 0; cork->ttl = ipc->ttl; cork->tos = ipc->tos; cork->priority = ipc->priority; - cork->tx_flags = ipc->tx_flags; + cork->transmit_time = ipc->sockc.transmit_time; + cork->tx_flags = 0; + sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags); return 0; } @@ -1413,6 +1415,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = cork->transmit_time; /* * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec * on dst refcount @@ -1545,11 +1548,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) return; + ipcm_init(&ipc); ipc.addr = daddr; - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; if (replyopts.opt.opt.optlen) { ipc.opt = &replyopts.opt; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9f79b9803a16..5660adcf7a04 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -60,6 +60,7 @@ #include <linux/netfilter_ipv4.h> #include <linux/compat.h> #include <linux/export.h> +#include <linux/rhashtable.h> #include <net/ip_tunnels.h> #include <net/checksum.h> #include <net/netlink.h> @@ -1051,7 +1052,7 @@ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *skb; int ret; - if (assert == IGMPMSG_WHOLEPKT) + if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); else skb = alloc_skb(128, GFP_ATOMIC); @@ -1059,7 +1060,7 @@ static int ipmr_cache_report(struct mr_table *mrt, if (!skb) return -ENOBUFS; - if (assert == IGMPMSG_WHOLEPKT) { + if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) { /* Ugly, but we have no choice with this interface. * Duplicate old header, fix ihl, length etc. * And all this only to mangle msg->im_msgtype and @@ -1070,9 +1071,12 @@ static int ipmr_cache_report(struct mr_table *mrt, skb_reset_transport_header(skb); msg = (struct igmpmsg *)skb_network_header(skb); memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); - msg->im_msgtype = IGMPMSG_WHOLEPKT; + msg->im_msgtype = assert; msg->im_mbz = 0; - msg->im_vif = mrt->mroute_reg_vif_num; + if (assert == IGMPMSG_WRVIFWHOLE) + msg->im_vif = vifi; + else + msg->im_vif = mrt->mroute_reg_vif_num; ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + sizeof(struct iphdr)); @@ -1371,6 +1375,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, struct mr_table *mrt; struct vifctl vif; struct mfcctl mfc; + bool do_wrvifwhole; u32 uval; /* There's one exception to the lock - MRT_DONE which needs to unlock */ @@ -1501,10 +1506,12 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, break; } + do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE); val = !!val; if (val != mrt->mroute_do_pim) { mrt->mroute_do_pim = val; mrt->mroute_do_assert = val; + mrt->mroute_do_wrvifwhole = do_wrvifwhole; } break; case MRT_TABLE: @@ -1982,6 +1989,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt, MFC_ASSERT_THRESH)) { c->_c.mfc_un.res.last_assert = jiffies; ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); + if (mrt->mroute_do_wrvifwhole) + ipmr_cache_report(mrt, skb, true_vifi, + IGMPMSG_WRVIFWHOLE); } goto dont_forward; } @@ -2658,7 +2668,9 @@ static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb) mrt->mroute_reg_vif_num) || nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT, mrt->mroute_do_assert) || - nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim)) + nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) || + nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE, + mrt->mroute_do_wrvifwhole)) return false; return true; diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index cafb0506c8c9..1ad9aa62a97b 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c @@ -2,6 +2,7 @@ * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation */ +#include <linux/rhashtable.h> #include <linux/mroute_base.h> /* Sets everything common except 'dev', since that is done under locking */ diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c index 4388de0e5380..1e6f28c97d3a 100644 --- a/net/ipv4/netfilter/nf_log_ipv4.c +++ b/net/ipv4/netfilter/nf_log_ipv4.c @@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = { }; /* One level of recursion won't kill us */ -static void dump_ipv4_packet(struct nf_log_buf *m, +static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int iphoff) { @@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m, /* Max length: 3+maxlen */ if (!iphoff) { /* Only recurse once. */ nf_log_buf_add(m, "["); - dump_ipv4_packet(m, info, skb, + dump_ipv4_packet(net, m, info, skb, iphoff + ih->ihl*4+sizeof(_icmph)); nf_log_buf_add(m, "] "); } @@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m, /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && !iphoff) - nf_log_dump_sk_uid_gid(m, skb->sk); + nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!iphoff && skb->mark) @@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf, if (in != NULL) dump_ipv4_mac_header(m, loginfo, skb); - dump_ipv4_packet(m, loginfo, skb, 0); + dump_ipv4_packet(net, m, loginfo, skb, 0); nf_log_buf_close(m); } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 2ed64bca54e3..b54c964ad925 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -739,13 +739,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* no remote port */ } - ipc.sockc.tsflags = sk->sk_tsflags; - ipc.addr = inet->inet_saddr; - ipc.opt = NULL; - ipc.oif = sk->sk_bound_dev_if; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; + ipcm_init_sk(&ipc, inet); if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); @@ -769,8 +763,6 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) rcu_read_unlock(); } - sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); - saddr = ipc.addr; ipc.addr = faddr = daddr; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 77350c1256ce..b46e4cf9a55a 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -287,6 +287,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDelivered", LINUX_MIB_TCPDELIVERED), SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE), SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED), + SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP), + SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index abb3c9490c55..33df4d76db2d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -381,6 +381,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = sockc->transmit_time; skb_dst_set(skb, &rt->dst); *rtp = NULL; @@ -561,13 +562,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) daddr = inet->inet_daddr; } - ipc.sockc.tsflags = sk->sk_tsflags; - ipc.addr = inet->inet_saddr; - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - ipc.oif = sk->sk_bound_dev_if; + ipcm_init_sk(&ipc, inet); if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); @@ -670,8 +665,6 @@ back_from_confirm: &rt, msg->msg_flags, &ipc.sockc); else { - sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); - if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4491faf83f4f..bce53b1728a6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -817,8 +817,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, * This occurs when user tries to read * from never connected socket. */ - if (!sock_flag(sk, SOCK_DONE)) - ret = -ENOTCONN; + ret = -ENOTCONN; break; } if (!timeo) { @@ -1241,7 +1240,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) /* 'common' sending to sendq */ } - sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) { @@ -1275,9 +1274,6 @@ restart: int linear; new_segment: - /* Allocate new segment. If the interface is SG, - * allocate skb fitting to single page. - */ if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; @@ -2042,13 +2038,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, break; if (sk->sk_state == TCP_CLOSE) { - if (!sock_flag(sk, SOCK_DONE)) { - /* This occurs when user tries to read - * from never connected socket. - */ - copied = -ENOTCONN; - break; - } + /* This occurs when user tries to read + * from never connected socket. + */ + copied = -ENOTCONN; break; } @@ -2576,6 +2569,7 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt_us = 0; + tp->rcv_rtt_last_tsecr = 0; tp->write_seq += tp->max_window + 2; if (tp->write_seq == 0) tp->write_seq = 1; diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 58e2f479ffb4..3b5f45b9e81e 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -205,7 +205,11 @@ static u32 bbr_bw(const struct sock *sk) */ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) { - rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache); + unsigned int mss = tcp_sk(sk)->mss_cache; + + if (!tcp_needs_internal_pacing(sk)) + mss = tcp_mss_to_mtu(sk, mss); + rate *= mss; rate *= gain; rate >>= BBR_SCALE; rate *= USEC_PER_SEC; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8e5522c6833a..91dbb9afb950 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -78,6 +78,7 @@ #include <linux/errqueue.h> #include <trace/events/tcp.h> #include <linux/static_key.h> +#include <net/busy_poll.h> int sysctl_tcp_max_orphans __read_mostly = NR_FILE; @@ -582,9 +583,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, { struct tcp_sock *tp = tcp_sk(sk); - if (tp->rx_opt.rcv_tsecr && - (TCP_SKB_CB(skb)->end_seq - - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { + if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) + return; + tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; + + if (TCP_SKB_CB(skb)->end_seq - + TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) { u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; u32 delta_us; @@ -3458,7 +3462,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) static void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; - tp->rx_opt.ts_recent_stamp = get_seconds(); + tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); } static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) @@ -4339,6 +4343,11 @@ static bool tcp_try_coalesce(struct sock *sk, if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; +#ifdef CONFIG_TLS_DEVICE + if (from->decrypted != to->decrypted) + return false; +#endif + if (!skb_try_coalesce(to, from, fragstolen, &delta)) return false; @@ -4617,8 +4626,10 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) skb->data_len = data_len; skb->len = size; - if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) + if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); goto err_free; + } err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); if (err) @@ -4674,18 +4685,21 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { - if (tcp_receive_window(tp) == 0) + if (tcp_receive_window(tp) == 0) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; + } /* Ok. In sequence. In window. */ queue_and_out: if (skb_queue_len(&sk->sk_receive_queue) == 0) sk_forced_mem_schedule(sk, skb->truesize); - else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) + else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); goto drop; + } eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); - tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (skb->len) tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) @@ -4741,8 +4755,10 @@ drop: /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ - if (!tcp_receive_window(tp)) + if (!tcp_receive_window(tp)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; + } goto queue_and_out; } @@ -4860,6 +4876,9 @@ restart: break; memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); +#ifdef CONFIG_TLS_DEVICE + nskb->decrypted = skb->decrypted; +#endif TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; if (list) __skb_queue_before(list, skb, nskb); @@ -4887,6 +4906,10 @@ restart: skb == tail || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) goto end; +#ifdef CONFIG_TLS_DEVICE + if (skb->decrypted != nskb->decrypted) + goto end; +#endif } } } @@ -5484,6 +5507,11 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); + /* When receiving pure ack in fast path, update + * last ts ecr directly instead of calling + * tcp_rcv_rtt_measure_ts() + */ + tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; return; } else { /* Header too small */ TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); @@ -5585,6 +5613,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) if (skb) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); + sk_mark_napi_id(sk, skb); } tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB); @@ -6413,6 +6442,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->txhash = net_tx_rndhash(); tcp_openreq_init_rwin(req, sk, dst); + sk_rx_queue_set(req_to_sk(req), skb); if (!want_cookie) { tcp_reqsk_record_syn(sk, req, skb); fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3b2711e33e4c..9e041fa5c545 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -155,7 +155,8 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && - (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { + (!twp || (reuse && time_after32(ktime_get_seconds(), + tcptw->tw_ts_recent_stamp)))) { /* In case of repair and re-using TIME-WAIT sockets we still * want to be sure that it is safe as above but honor the * sequence numbers and time stamps set as part of the repair diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 1dda1341a223..75ef332a7caf 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -144,7 +144,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, tw->tw_substate = TCP_TIME_WAIT; tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tmp_opt.saw_tstamp) { - tcptw->tw_ts_recent_stamp = get_seconds(); + tcptw->tw_ts_recent_stamp = ktime_get_seconds(); tcptw->tw_ts_recent = tmp_opt.rcv_tsval; } @@ -189,7 +189,7 @@ kill: if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; - tcptw->tw_ts_recent_stamp = get_seconds(); + tcptw->tw_ts_recent_stamp = ktime_get_seconds(); } inet_twsk_put(tw); @@ -449,119 +449,122 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, struct sk_buff *skb) { struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); + const struct inet_request_sock *ireq = inet_rsk(req); + struct tcp_request_sock *treq = tcp_rsk(req); + struct inet_connection_sock *newicsk; + struct tcp_sock *oldtp, *newtp; - if (newsk) { - const struct inet_request_sock *ireq = inet_rsk(req); - struct tcp_request_sock *treq = tcp_rsk(req); - struct inet_connection_sock *newicsk = inet_csk(newsk); - struct tcp_sock *newtp = tcp_sk(newsk); - struct tcp_sock *oldtp = tcp_sk(sk); - - smc_check_reset_syn_req(oldtp, req, newtp); - - /* Now setup tcp_sock */ - newtp->pred_flags = 0; - - newtp->rcv_wup = newtp->copied_seq = - newtp->rcv_nxt = treq->rcv_isn + 1; - newtp->segs_in = 1; - - newtp->snd_sml = newtp->snd_una = - newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; - - INIT_LIST_HEAD(&newtp->tsq_node); - INIT_LIST_HEAD(&newtp->tsorted_sent_queue); - - tcp_init_wl(newtp, treq->rcv_isn); - - newtp->srtt_us = 0; - newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); - minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); - newicsk->icsk_rto = TCP_TIMEOUT_INIT; - newicsk->icsk_ack.lrcvtime = tcp_jiffies32; - - newtp->packets_out = 0; - newtp->retrans_out = 0; - newtp->sacked_out = 0; - newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - newtp->tlp_high_seq = 0; - newtp->lsndtime = tcp_jiffies32; - newsk->sk_txhash = treq->txhash; - newtp->last_oow_ack_time = 0; - newtp->total_retrans = req->num_retrans; - - /* So many TCP implementations out there (incorrectly) count the - * initial SYN frame in their delayed-ACK and congestion control - * algorithms that we must have the following bandaid to talk - * efficiently to them. -DaveM - */ - newtp->snd_cwnd = TCP_INIT_CWND; - newtp->snd_cwnd_cnt = 0; - - /* There's a bubble in the pipe until at least the first ACK. */ - newtp->app_limited = ~0U; - - tcp_init_xmit_timers(newsk); - newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; - - newtp->rx_opt.saw_tstamp = 0; - - newtp->rx_opt.dsack = 0; - newtp->rx_opt.num_sacks = 0; - - newtp->urg_data = 0; - - if (sock_flag(newsk, SOCK_KEEPOPEN)) - inet_csk_reset_keepalive_timer(newsk, - keepalive_time_when(newtp)); - - newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; - newtp->rx_opt.sack_ok = ireq->sack_ok; - newtp->window_clamp = req->rsk_window_clamp; - newtp->rcv_ssthresh = req->rsk_rcv_wnd; - newtp->rcv_wnd = req->rsk_rcv_wnd; - newtp->rx_opt.wscale_ok = ireq->wscale_ok; - if (newtp->rx_opt.wscale_ok) { - newtp->rx_opt.snd_wscale = ireq->snd_wscale; - newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; - } else { - newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; - newtp->window_clamp = min(newtp->window_clamp, 65535U); - } - newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << - newtp->rx_opt.snd_wscale); - newtp->max_window = newtp->snd_wnd; - - if (newtp->rx_opt.tstamp_ok) { - newtp->rx_opt.ts_recent = req->ts_recent; - newtp->rx_opt.ts_recent_stamp = get_seconds(); - newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; - } else { - newtp->rx_opt.ts_recent_stamp = 0; - newtp->tcp_header_len = sizeof(struct tcphdr); - } - newtp->tsoffset = treq->ts_off; + if (!newsk) + return NULL; + + newicsk = inet_csk(newsk); + newtp = tcp_sk(newsk); + oldtp = tcp_sk(sk); + + smc_check_reset_syn_req(oldtp, req, newtp); + + /* Now setup tcp_sock */ + newtp->pred_flags = 0; + + newtp->rcv_wup = newtp->copied_seq = + newtp->rcv_nxt = treq->rcv_isn + 1; + newtp->segs_in = 1; + + newtp->snd_sml = newtp->snd_una = + newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; + + INIT_LIST_HEAD(&newtp->tsq_node); + INIT_LIST_HEAD(&newtp->tsorted_sent_queue); + + tcp_init_wl(newtp, treq->rcv_isn); + + newtp->srtt_us = 0; + newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); + minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); + newicsk->icsk_rto = TCP_TIMEOUT_INIT; + newicsk->icsk_ack.lrcvtime = tcp_jiffies32; + + newtp->packets_out = 0; + newtp->retrans_out = 0; + newtp->sacked_out = 0; + newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + newtp->tlp_high_seq = 0; + newtp->lsndtime = tcp_jiffies32; + newsk->sk_txhash = treq->txhash; + newtp->last_oow_ack_time = 0; + newtp->total_retrans = req->num_retrans; + + /* So many TCP implementations out there (incorrectly) count the + * initial SYN frame in their delayed-ACK and congestion control + * algorithms that we must have the following bandaid to talk + * efficiently to them. -DaveM + */ + newtp->snd_cwnd = TCP_INIT_CWND; + newtp->snd_cwnd_cnt = 0; + + /* There's a bubble in the pipe until at least the first ACK. */ + newtp->app_limited = ~0U; + + tcp_init_xmit_timers(newsk); + newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; + + newtp->rx_opt.saw_tstamp = 0; + + newtp->rx_opt.dsack = 0; + newtp->rx_opt.num_sacks = 0; + + newtp->urg_data = 0; + + if (sock_flag(newsk, SOCK_KEEPOPEN)) + inet_csk_reset_keepalive_timer(newsk, + keepalive_time_when(newtp)); + + newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; + newtp->rx_opt.sack_ok = ireq->sack_ok; + newtp->window_clamp = req->rsk_window_clamp; + newtp->rcv_ssthresh = req->rsk_rcv_wnd; + newtp->rcv_wnd = req->rsk_rcv_wnd; + newtp->rx_opt.wscale_ok = ireq->wscale_ok; + if (newtp->rx_opt.wscale_ok) { + newtp->rx_opt.snd_wscale = ireq->snd_wscale; + newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; + } else { + newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; + newtp->window_clamp = min(newtp->window_clamp, 65535U); + } + newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; + newtp->max_window = newtp->snd_wnd; + + if (newtp->rx_opt.tstamp_ok) { + newtp->rx_opt.ts_recent = req->ts_recent; + newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); + newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; + } else { + newtp->rx_opt.ts_recent_stamp = 0; + newtp->tcp_header_len = sizeof(struct tcphdr); + } + newtp->tsoffset = treq->ts_off; #ifdef CONFIG_TCP_MD5SIG - newtp->md5sig_info = NULL; /*XXX*/ - if (newtp->af_specific->md5_lookup(sk, newsk)) - newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; + newtp->md5sig_info = NULL; /*XXX*/ + if (newtp->af_specific->md5_lookup(sk, newsk)) + newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; #endif - if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) - newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; - newtp->rx_opt.mss_clamp = req->mss; - tcp_ecn_openreq_child(newtp, req); - newtp->fastopen_req = NULL; - newtp->fastopen_rsk = NULL; - newtp->syn_data_acked = 0; - newtp->rack.mstamp = 0; - newtp->rack.advanced = 0; - newtp->rack.reo_wnd_steps = 1; - newtp->rack.last_delivered = 0; - newtp->rack.reo_wnd_persist = 0; - newtp->rack.dsack_seen = 0; - - __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); - } + if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) + newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; + newtp->rx_opt.mss_clamp = req->mss; + tcp_ecn_openreq_child(newtp, req); + newtp->fastopen_req = NULL; + newtp->fastopen_rsk = NULL; + newtp->syn_data_acked = 0; + newtp->rack.mstamp = 0; + newtp->rack.advanced = 0; + newtp->rack.reo_wnd_steps = 1; + newtp->rack.last_delivered = 0; + newtp->rack.reo_wnd_persist = 0; + newtp->rack.dsack_seen = 0; + + __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); + return newsk; } EXPORT_SYMBOL(tcp_create_openreq_child); @@ -600,7 +603,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * it can be estimated (approximately) * from another data. */ - tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); + tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 8cc7c3487330..870b0a335061 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -180,9 +180,9 @@ out: return segs; } -struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) { - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; struct tcphdr *th; struct tcphdr *th2; @@ -220,7 +220,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) len = skb_gro_len(skb); flags = tcp_flag_word(th); - for (; (p = *head); head = &p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -233,7 +233,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) goto found; } - + p = NULL; goto out_check_final; found: @@ -262,8 +262,11 @@ found: flush |= (len - 1) >= mss; flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); +#ifdef CONFIG_TLS_DEVICE + flush |= p->decrypted ^ skb->decrypted; +#endif - if (flush || skb_gro_receive(head, skb)) { + if (flush || skb_gro_receive(p, skb)) { mss = 1; goto out_check_final; } @@ -277,7 +280,7 @@ out_check_final: TCP_FLAG_FIN)); if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) - pp = head; + pp = p; out: NAPI_GRO_CB(skb)->flush |= (flush != 0); @@ -302,7 +305,7 @@ int tcp_gro_complete(struct sk_buff *skb) } EXPORT_SYMBOL(tcp_gro_complete); -static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) +static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) { /* Don't bother verifying checksum if we're going to flush anyway. */ if (!NAPI_GRO_CB(skb)->flush && diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 00e5a300ddb9..6cbab56e7407 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -973,17 +973,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) return HRTIMER_NORESTART; } -/* BBR congestion control needs pacing. - * Same remark for SO_MAX_PACING_RATE. - * sch_fq packet scheduler is efficiently handling pacing, - * but is not always installed/used. - * Return true if TCP stack should pace packets itself. - */ -static bool tcp_needs_internal_pacing(const struct sock *sk) -{ - return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; -} - static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) { u64 len_ns; @@ -995,9 +984,6 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) if (!rate || rate == ~0U) return; - /* Should account for header sizes as sch_fq does, - * but lets make things simple. - */ len_ns = (u64)skb->len * NSEC_PER_SEC; do_div(len_ns, rate); hrtimer_start(&tcp_sk(sk)->pacing_timer, diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index c61240e43923..4dff40dad4dc 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -146,6 +146,10 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, rs->prior_mstamp); /* ack phase */ rs->interval_us = max(snd_us, ack_us); + /* Record both segment send and ack receive intervals */ + rs->snd_interval_us = snd_us; + rs->rcv_interval_us = ack_us; + /* Normally we expect interval_us >= min-rtt. * Note that rate may still be over-estimated when a spuriously * retransmistted skb was first (s)acked because "interval_us" diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 24e116ddae79..060e841dde40 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -926,11 +926,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; fl4 = &inet->cork.fl.u.ip4; @@ -977,9 +972,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) connected = 1; } - ipc.sockc.tsflags = sk->sk_tsflags; - ipc.addr = inet->inet_saddr; - ipc.oif = sk->sk_bound_dev_if; + ipcm_init_sk(&ipc, inet); ipc.gso_size = up->gso_size; if (msg->msg_controllen) { @@ -1027,8 +1020,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) saddr = ipc.addr; ipc.addr = faddr = daddr; - sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); - if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) { err = -EINVAL; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 69c54540d5b4..0c0522b79b43 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -343,10 +343,11 @@ out: return segs; } -struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, - struct udphdr *uh, udp_lookup_t lookup) +struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, + struct udphdr *uh, udp_lookup_t lookup) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; @@ -371,7 +372,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, unflush: flush = 0; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -399,8 +400,8 @@ out: } EXPORT_SYMBOL(udp_gro_receive); -static struct sk_buff **udp4_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *udp4_gro_receive(struct list_head *head, + struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 91580c62bb86..1659a6b3cf42 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -385,8 +385,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) if (ndev->cnf.stable_secret.initialized) ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; - else - ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode; ndev->cnf.mtu6 = dev->mtu; ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); @@ -5210,7 +5208,9 @@ static inline size_t inet6_ifla6_size(void) + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */ + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */ + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */ - + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */ + + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */ + + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */ + + 0; } static inline size_t inet6_if_nlmsg_size(void) @@ -5892,32 +5892,31 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, loff_t *ppos) { int ret = 0; - int new_val; + u32 new_val; struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; struct net *net = (struct net *)ctl->extra2; + struct ctl_table tmp = { + .data = &new_val, + .maxlen = sizeof(new_val), + .mode = ctl->mode, + }; if (!rtnl_trylock()) return restart_syscall(); - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + new_val = *((u32 *)ctl->data); - if (write) { - new_val = *((int *)ctl->data); + ret = proc_douintvec(&tmp, write, buffer, lenp, ppos); + if (ret != 0) + goto out; + if (write) { if (check_addr_gen_mode(new_val) < 0) { ret = -EINVAL; goto out; } - /* request for default */ - if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) { - ipv6_devconf_dflt.addr_gen_mode = new_val; - - /* request for individual net device */ - } else { - if (!idev) - goto out; - + if (idev) { if (check_stable_privacy(idev, net, new_val) < 0) { ret = -EINVAL; goto out; @@ -5927,7 +5926,21 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, idev->cnf.addr_gen_mode = new_val; addrconf_dev_config(idev->dev); } + } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) { + struct net_device *dev; + + net->ipv6.devconf_dflt->addr_gen_mode = new_val; + for_each_netdev(net, dev) { + idev = __in6_dev_get(dev); + if (idev && + idev->cnf.addr_gen_mode != new_val) { + idev->cnf.addr_gen_mode = new_val; + addrconf_dev_config(idev->dev); + } + } } + + *((u32 *)ctl->data) = new_val; } out: diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 9ed0eae91758..c9535354149f 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -764,6 +764,7 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted); static struct packet_type ipv6_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .func = ipv6_rcv, + .list_func = ipv6_list_rcv, }; static int __init ipv6_packet_init(void) diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2ee08b6a86a4..201306b9b5ea 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -736,7 +736,7 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, - struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc) + struct ipcm6_cookie *ipc6) { struct in6_pktinfo *src_info; struct cmsghdr *cmsg; @@ -755,7 +755,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, } if (cmsg->cmsg_level == SOL_SOCKET) { - err = __sock_cmsg_send(sk, msg, cmsg, sockc); + err = __sock_cmsg_send(sk, msg, cmsg, &ipc6->sockc); if (err) return err; continue; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 27f59b61f70f..ddfa533a84e5 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -49,8 +49,8 @@ static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) return 0; } -static struct sk_buff **esp6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *esp6_gro_receive(struct list_head *head, + struct sk_buff *skb) { int offset = skb_gro_offset(skb); struct xfrm_offload *xo; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index be491bf6ab6e..24611c8b0562 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -430,7 +430,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; - struct sockcm_cookie sockc_unused = {0}; struct ipcm6_cookie ipc6; int iif = 0; int addr_type = 0; @@ -545,7 +544,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; - ipc6.tclass = np->tclass; + ipcm6_init_sk(&ipc6, np); fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = icmpv6_route_lookup(net, skb, sk, &fl6); @@ -553,8 +552,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, goto out; ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - ipc6.dontfrag = np->dontfrag; - ipc6.opt = NULL; msg.skb = skb; msg.offset = skb_network_offset(skb); @@ -575,7 +572,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), &ipc6, &fl6, (struct rt6_info *)dst, - MSG_DONTWAIT, &sockc_unused)) { + MSG_DONTWAIT)) { ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { @@ -679,7 +676,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) struct dst_entry *dst; struct ipcm6_cookie ipc6; u32 mark = IP6_REPLY_MARK(net, skb->mark); - struct sockcm_cookie sockc_unused = {0}; saddr = &ipv6_hdr(skb)->daddr; @@ -726,16 +722,14 @@ static void icmpv6_echo_reply(struct sk_buff *skb) msg.offset = 0; msg.type = ICMPV6_ECHO_REPLY; + ipcm6_init_sk(&ipc6, np); ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); - ipc6.dontfrag = np->dontfrag; - ipc6.opt = NULL; if (ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), &ipc6, &fl6, - (struct rt6_info *)dst, MSG_DONTWAIT, - &sockc_unused)) { + (struct rt6_info *)dst, MSG_DONTWAIT)) { __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { diff --git a/net/ipv6/ila/Makefile b/net/ipv6/ila/Makefile index 4b32e5921e5c..b7739aba6e68 100644 --- a/net/ipv6/ila/Makefile +++ b/net/ipv6/ila/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_IPV6_ILA) += ila.o -ila-objs := ila_common.o ila_lwt.o ila_xlat.o +ila-objs := ila_main.o ila_common.o ila_lwt.o ila_xlat.o diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h index 3c7a11b62334..1f747bcbec29 100644 --- a/net/ipv6/ila/ila.h +++ b/net/ipv6/ila/ila.h @@ -19,6 +19,7 @@ #include <linux/skbuff.h> #include <linux/types.h> #include <net/checksum.h> +#include <net/genetlink.h> #include <net/ip.h> #include <net/protocol.h> #include <uapi/linux/ila.h> @@ -104,9 +105,31 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p, void ila_init_saved_csum(struct ila_params *p); +struct ila_net { + struct { + struct rhashtable rhash_table; + spinlock_t *locks; /* Bucket locks for entry manipulation */ + unsigned int locks_mask; + bool hooks_registered; + } xlat; +}; + int ila_lwt_init(void); void ila_lwt_fini(void); -int ila_xlat_init(void); -void ila_xlat_fini(void); + +int ila_xlat_init_net(struct net *net); +void ila_xlat_exit_net(struct net *net); + +int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_dump_start(struct netlink_callback *cb); +int ila_xlat_nl_dump_done(struct netlink_callback *cb); +int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb); + +extern unsigned int ila_net_id; + +extern struct genl_family ila_nl_family; #endif /* __ILA_H */ diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index 8c88ecf29b93..579310466eac 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -154,33 +154,3 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p, iaddr->loc = p->locator; } -static int __init ila_init(void) -{ - int ret; - - ret = ila_lwt_init(); - - if (ret) - goto fail_lwt; - - ret = ila_xlat_init(); - if (ret) - goto fail_xlat; - - return 0; -fail_xlat: - ila_lwt_fini(); -fail_lwt: - return ret; -} - -static void __exit ila_fini(void) -{ - ila_xlat_fini(); - ila_lwt_fini(); -} - -module_init(ila_init); -module_exit(ila_fini); -MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>"); -MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c new file mode 100644 index 000000000000..18fac76b9520 --- /dev/null +++ b/net/ipv6/ila/ila_main.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <net/genetlink.h> +#include <net/ila.h> +#include <net/netns/generic.h> +#include <uapi/linux/genetlink.h> +#include "ila.h" + +static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { + [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, + [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, }, + [ILA_ATTR_IFINDEX] = { .type = NLA_U32, }, + [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, }, + [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, }, +}; + +static const struct genl_ops ila_nl_ops[] = { + { + .cmd = ILA_CMD_ADD, + .doit = ila_xlat_nl_cmd_add_mapping, + .policy = ila_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = ILA_CMD_DEL, + .doit = ila_xlat_nl_cmd_del_mapping, + .policy = ila_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = ILA_CMD_FLUSH, + .doit = ila_xlat_nl_cmd_flush, + .policy = ila_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = ILA_CMD_GET, + .doit = ila_xlat_nl_cmd_get_mapping, + .start = ila_xlat_nl_dump_start, + .dumpit = ila_xlat_nl_dump, + .done = ila_xlat_nl_dump_done, + .policy = ila_nl_policy, + }, +}; + +unsigned int ila_net_id; + +struct genl_family ila_nl_family __ro_after_init = { + .hdrsize = 0, + .name = ILA_GENL_NAME, + .version = ILA_GENL_VERSION, + .maxattr = ILA_ATTR_MAX, + .netnsok = true, + .parallel_ops = true, + .module = THIS_MODULE, + .ops = ila_nl_ops, + .n_ops = ARRAY_SIZE(ila_nl_ops), +}; + +static __net_init int ila_init_net(struct net *net) +{ + int err; + + err = ila_xlat_init_net(net); + if (err) + goto ila_xlat_init_fail; + + return 0; + +ila_xlat_init_fail: + return err; +} + +static __net_exit void ila_exit_net(struct net *net) +{ + ila_xlat_exit_net(net); +} + +static struct pernet_operations ila_net_ops = { + .init = ila_init_net, + .exit = ila_exit_net, + .id = &ila_net_id, + .size = sizeof(struct ila_net), +}; + +static int __init ila_init(void) +{ + int ret; + + ret = register_pernet_device(&ila_net_ops); + if (ret) + goto register_device_fail; + + ret = genl_register_family(&ila_nl_family); + if (ret) + goto register_family_fail; + + ret = ila_lwt_init(); + if (ret) + goto fail_lwt; + + return 0; + +fail_lwt: + genl_unregister_family(&ila_nl_family); +register_family_fail: + unregister_pernet_device(&ila_net_ops); +register_device_fail: + return ret; +} + +static void __exit ila_fini(void) +{ + ila_lwt_fini(); + genl_unregister_family(&ila_nl_family); + unregister_pernet_device(&ila_net_ops); +} + +module_init(ila_init); +module_exit(ila_fini); +MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>"); +MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 10ae13560b40..51a15ce50a64 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -22,36 +22,14 @@ struct ila_map { struct rcu_head rcu; }; -static unsigned int ila_net_id; - -struct ila_net { - struct rhashtable rhash_table; - spinlock_t *locks; /* Bucket locks for entry manipulation */ - unsigned int locks_mask; - bool hooks_registered; -}; - +#define MAX_LOCKS 1024 #define LOCKS_PER_CPU 10 static int alloc_ila_locks(struct ila_net *ilan) { - unsigned int i, size; - unsigned int nr_pcpus = num_possible_cpus(); - - nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); - size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU); - - if (sizeof(spinlock_t) != 0) { - ilan->locks = kvmalloc_array(size, sizeof(spinlock_t), - GFP_KERNEL); - if (!ilan->locks) - return -ENOMEM; - for (i = 0; i < size; i++) - spin_lock_init(&ilan->locks[i]); - } - ilan->locks_mask = size - 1; - - return 0; + return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask, + MAX_LOCKS, LOCKS_PER_CPU, + GFP_KERNEL); } static u32 hashrnd __read_mostly; @@ -71,7 +49,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc) static inline spinlock_t *ila_get_lock(struct ila_net *ilan, struct ila_locator loc) { - return &ilan->locks[ila_locator_hash(loc) & ilan->locks_mask]; + return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask]; } static inline int ila_cmp_wildcards(struct ila_map *ila, @@ -115,16 +93,6 @@ static const struct rhashtable_params rht_params = { .obj_cmpfn = ila_cmpfn, }; -static struct genl_family ila_nl_family; - -static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { - [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, - [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, }, - [ILA_ATTR_IFINDEX] = { .type = NLA_U32, }, - [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, }, - [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, }, -}; - static int parse_nl_config(struct genl_info *info, struct ila_xlat_params *xp) { @@ -162,7 +130,7 @@ static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr, { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->loc, + ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc, rht_params); while (ila) { if (!ila_cmp_wildcards(ila, iaddr, ifindex)) @@ -179,7 +147,7 @@ static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp, { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, + ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &xp->ip.locator_match, rht_params); while (ila) { @@ -196,9 +164,9 @@ static inline void ila_release(struct ila_map *ila) kfree_rcu(ila, rcu); } -static void ila_free_cb(void *ptr, void *arg) +static void ila_free_node(struct ila_map *ila) { - struct ila_map *ila = (struct ila_map *)ptr, *next; + struct ila_map *next; /* Assume rcu_readlock held */ while (ila) { @@ -208,6 +176,11 @@ static void ila_free_cb(void *ptr, void *arg) } } +static void ila_free_cb(void *ptr, void *arg) +{ + ila_free_node((struct ila_map *)ptr); +} + static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila); static unsigned int @@ -235,7 +208,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); int err = 0, order; - if (!ilan->hooks_registered) { + if (!ilan->xlat.hooks_registered) { /* We defer registering net hooks in the namespace until the * first mapping is added. */ @@ -244,7 +217,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) if (err) return err; - ilan->hooks_registered = true; + ilan->xlat.hooks_registered = true; } ila = kzalloc(sizeof(*ila), GFP_KERNEL); @@ -259,12 +232,12 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) spin_lock(lock); - head = rhashtable_lookup_fast(&ilan->rhash_table, + head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &xp->ip.locator_match, rht_params); if (!head) { /* New entry for the rhash_table */ - err = rhashtable_lookup_insert_fast(&ilan->rhash_table, + err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table, &ila->node, rht_params); } else { struct ila_map *tila = head, *prev = NULL; @@ -290,7 +263,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) } else { /* Make this ila new head */ RCU_INIT_POINTER(ila->next, head); - err = rhashtable_replace_fast(&ilan->rhash_table, + err = rhashtable_replace_fast(&ilan->xlat.rhash_table, &head->node, &ila->node, rht_params); if (err) @@ -316,7 +289,7 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp) spin_lock(lock); - head = rhashtable_lookup_fast(&ilan->rhash_table, + head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &xp->ip.locator_match, rht_params); ila = head; @@ -346,15 +319,15 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp) * table */ err = rhashtable_replace_fast( - &ilan->rhash_table, &ila->node, + &ilan->xlat.rhash_table, &ila->node, &head->node, rht_params); if (err) goto out; } else { /* Entry no longer used */ - err = rhashtable_remove_fast(&ilan->rhash_table, - &ila->node, - rht_params); + err = rhashtable_remove_fast( + &ilan->xlat.rhash_table, + &ila->node, rht_params); } } @@ -369,7 +342,7 @@ out: return err; } -static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) +int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct ila_xlat_params p; @@ -382,7 +355,7 @@ static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) return ila_add_mapping(net, &p); } -static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) +int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct ila_xlat_params xp; @@ -397,6 +370,59 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) return 0; } +static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan, + struct ila_map *ila) +{ + return ila_get_lock(ilan, ila->xp.ip.locator_match); +} + +int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = genl_info_net(info); + struct ila_net *ilan = net_generic(net, ila_net_id); + struct rhashtable_iter iter; + struct ila_map *ila; + spinlock_t *lock; + int ret; + + ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter, GFP_KERNEL); + if (ret) + goto done; + + rhashtable_walk_start(&iter); + + for (;;) { + ila = rhashtable_walk_next(&iter); + + if (IS_ERR(ila)) { + if (PTR_ERR(ila) == -EAGAIN) + continue; + ret = PTR_ERR(ila); + goto done; + } else if (!ila) { + break; + } + + lock = lock_from_ila_map(ilan, ila); + + spin_lock(lock); + + ret = rhashtable_remove_fast(&ilan->xlat.rhash_table, + &ila->node, rht_params); + if (!ret) + ila_free_node(ila); + + spin_unlock(lock); + + if (ret) + break; + } + +done: + rhashtable_walk_stop(&iter); + return ret; +} + static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) { if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR, @@ -434,7 +460,7 @@ nla_put_failure: return -EMSGSIZE; } -static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) +int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct ila_net *ilan = net_generic(net, ila_net_id); @@ -475,27 +501,34 @@ out_free: struct ila_dump_iter { struct rhashtable_iter rhiter; + int skip; }; -static int ila_nl_dump_start(struct netlink_callback *cb) +int ila_xlat_nl_dump_start(struct netlink_callback *cb) { struct net *net = sock_net(cb->skb->sk); struct ila_net *ilan = net_generic(net, ila_net_id); - struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; + struct ila_dump_iter *iter; + int ret; - if (!iter) { - iter = kmalloc(sizeof(*iter), GFP_KERNEL); - if (!iter) - return -ENOMEM; + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; - cb->args[0] = (long)iter; + ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter->rhiter, + GFP_KERNEL); + if (ret) { + kfree(iter); + return ret; } - return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter, - GFP_KERNEL); + iter->skip = 0; + cb->args[0] = (long)iter; + + return ret; } -static int ila_nl_dump_done(struct netlink_callback *cb) +int ila_xlat_nl_dump_done(struct netlink_callback *cb) { struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; @@ -506,24 +539,49 @@ static int ila_nl_dump_done(struct netlink_callback *cb) return 0; } -static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) +int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; struct rhashtable_iter *rhiter = &iter->rhiter; + int skip = iter->skip; struct ila_map *ila; int ret; rhashtable_walk_start(rhiter); - for (;;) { - ila = rhashtable_walk_next(rhiter); + /* Get first entry */ + ila = rhashtable_walk_peek(rhiter); + + if (ila && !IS_ERR(ila) && skip) { + /* Skip over visited entries */ + while (ila && skip) { + /* Skip over any ila entries in this list that we + * have already dumped. + */ + ila = rcu_access_pointer(ila->next); + skip--; + } + } + + skip = 0; + + for (;;) { if (IS_ERR(ila)) { - if (PTR_ERR(ila) == -EAGAIN) - continue; ret = PTR_ERR(ila); - goto done; + if (ret == -EAGAIN) { + /* Table has changed and iter has reset. Return + * -EAGAIN to the application even if we have + * written data to the skb. The application + * needs to deal with this. + */ + + goto out_ret; + } else { + break; + } } else if (!ila) { + ret = 0; break; } @@ -532,90 +590,54 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->nlh->nlmsg_seq, NLM_F_MULTI, skb, ILA_CMD_GET); if (ret) - goto done; + goto out; + skip++; ila = rcu_access_pointer(ila->next); } + + skip = 0; + ila = rhashtable_walk_next(rhiter); } - ret = skb->len; +out: + iter->skip = skip; + ret = (skb->len ? : ret); -done: +out_ret: rhashtable_walk_stop(rhiter); return ret; } -static const struct genl_ops ila_nl_ops[] = { - { - .cmd = ILA_CMD_ADD, - .doit = ila_nl_cmd_add_mapping, - .policy = ila_nl_policy, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = ILA_CMD_DEL, - .doit = ila_nl_cmd_del_mapping, - .policy = ila_nl_policy, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = ILA_CMD_GET, - .doit = ila_nl_cmd_get_mapping, - .start = ila_nl_dump_start, - .dumpit = ila_nl_dump, - .done = ila_nl_dump_done, - .policy = ila_nl_policy, - }, -}; - -static struct genl_family ila_nl_family __ro_after_init = { - .hdrsize = 0, - .name = ILA_GENL_NAME, - .version = ILA_GENL_VERSION, - .maxattr = ILA_ATTR_MAX, - .netnsok = true, - .parallel_ops = true, - .module = THIS_MODULE, - .ops = ila_nl_ops, - .n_ops = ARRAY_SIZE(ila_nl_ops), -}; - #define ILA_HASH_TABLE_SIZE 1024 -static __net_init int ila_init_net(struct net *net) +int ila_xlat_init_net(struct net *net) { - int err; struct ila_net *ilan = net_generic(net, ila_net_id); + int err; err = alloc_ila_locks(ilan); if (err) return err; - rhashtable_init(&ilan->rhash_table, &rht_params); + rhashtable_init(&ilan->xlat.rhash_table, &rht_params); return 0; } -static __net_exit void ila_exit_net(struct net *net) +void ila_xlat_exit_net(struct net *net) { struct ila_net *ilan = net_generic(net, ila_net_id); - rhashtable_free_and_destroy(&ilan->rhash_table, ila_free_cb, NULL); + rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL); - kvfree(ilan->locks); + free_bucket_spinlocks(ilan->xlat.locks); - if (ilan->hooks_registered) + if (ilan->xlat.hooks_registered) nf_unregister_net_hooks(net, ila_nf_hook_ops, ARRAY_SIZE(ila_nf_hook_ops)); } -static struct pernet_operations ila_net_ops = { - .init = ila_init_net, - .exit = ila_exit_net, - .id = &ila_net_id, - .size = sizeof(struct ila_net), -}; - static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) { struct ila_map *ila; @@ -642,28 +664,3 @@ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) return 0; } -int __init ila_xlat_init(void) -{ - int ret; - - ret = register_pernet_device(&ila_net_ops); - if (ret) - goto exit; - - ret = genl_register_family(&ila_nl_family); - if (ret < 0) - goto unregister; - - return 0; - -unregister: - unregister_pernet_device(&ila_net_ops); -exit: - return ret; -} - -void ila_xlat_fini(void) -{ - genl_unregister_family(&ila_nl_family); - unregister_pernet_device(&ila_net_ops); -} diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 3eee7637bdfe..cb54a8a3c273 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -373,7 +373,6 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; - struct sockcm_cookie sockc_junk; struct ipcm6_cookie ipc6; err = -ENOMEM; @@ -392,7 +391,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(&flowi6, 0, sizeof(flowi6)); ipc6.opt = fl->opt; - err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk); + err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6); if (err) goto done; err = -EINVAL; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index cd2cfb04e5d8..fc7dd3a04360 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -989,6 +989,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); dsfield = key->tos; + if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) + goto tx_err; md = ip_tunnel_info_opts(tun_info); if (!md) goto tx_err; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index f08d34491ece..6242682be876 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -47,17 +47,11 @@ #include <net/inet_ecn.h> #include <net/dst_metadata.h> -int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +static void ip6_rcv_finish_core(struct net *net, struct sock *sk, + struct sk_buff *skb) { void (*edemux)(struct sk_buff *skb); - /* if ingress device is enslaved to an L3 master device pass the - * skb to its handler for processing - */ - skb = l3mdev_ip6_rcv(skb); - if (!skb) - return NET_RX_SUCCESS; - if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { const struct inet6_protocol *ipprot; @@ -67,20 +61,73 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) } if (!skb_valid_dst(skb)) ip6_route_input(skb); +} + +int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip6_rcv(skb); + if (!skb) + return NET_RX_SUCCESS; + ip6_rcv_finish_core(net, sk, skb); return dst_input(skb); } -int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +static void ip6_sublist_rcv_finish(struct list_head *head) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) + dst_input(skb); +} + +static void ip6_list_rcv_finish(struct net *net, struct sock *sk, + struct list_head *head) +{ + struct dst_entry *curr_dst = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + struct dst_entry *dst; + + list_del(&skb->list); + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip6_rcv(skb); + if (!skb) + continue; + ip6_rcv_finish_core(net, sk, skb); + dst = skb_dst(skb); + if (curr_dst != dst) { + /* dispatch old sublist */ + if (!list_empty(&sublist)) + ip6_sublist_rcv_finish(&sublist); + /* start new sublist */ + INIT_LIST_HEAD(&sublist); + curr_dst = dst; + } + list_add_tail(&skb->list, &sublist); + } + /* dispatch final sublist */ + ip6_sublist_rcv_finish(&sublist); +} + +static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, + struct net *net) { const struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; - struct net *net = dev_net(skb->dev); if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); - return NET_RX_DROP; + return NULL; } rcu_read_lock(); @@ -196,7 +243,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (ipv6_parse_hopopts(skb) < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); - return NET_RX_DROP; + return NULL; } } @@ -205,15 +252,67 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt /* Must drop socket now because of tproxy. */ skb_orphan(skb); - return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, - net, NULL, skb, dev, NULL, - ip6_rcv_finish); + return skb; err: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); - return NET_RX_DROP; + return NULL; +} + +int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +{ + struct net *net = dev_net(skb->dev); + + skb = ip6_rcv_core(skb, dev, net); + if (skb == NULL) + return NET_RX_DROP; + return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, + net, NULL, skb, dev, NULL, + ip6_rcv_finish); +} + +static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev, + struct net *net) +{ + NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, + head, dev, NULL, ip6_rcv_finish); + ip6_list_rcv_finish(net, NULL, head); +} + +/* Receive a list of IPv6 packets */ +void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev) +{ + struct net_device *curr_dev = NULL; + struct net *curr_net = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + struct net_device *dev = skb->dev; + struct net *net = dev_net(dev); + + list_del(&skb->list); + skb = ip6_rcv_core(skb, dev, net); + if (skb == NULL) + continue; + + if (curr_dev != dev || curr_net != net) { + /* dispatch old sublist */ + if (!list_empty(&sublist)) + ip6_sublist_rcv(&sublist, curr_dev, curr_net); + /* start new sublist */ + INIT_LIST_HEAD(&sublist); + curr_dev = dev; + curr_net = net; + } + list_add_tail(&skb->list, &sublist); + } + /* dispatch final sublist */ + ip6_sublist_rcv(&sublist, curr_dev, curr_net); } /* diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 5b3f2f89ef41..37ff4805b20c 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -163,11 +163,11 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *ipv6_gro_receive(struct list_head *head, + struct sk_buff *skb) { const struct net_offload *ops; - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; struct ipv6hdr *iph; unsigned int nlen; @@ -214,7 +214,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, flush--; nlen = skb_network_header_len(skb); - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { const struct ipv6hdr *iph2; __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ @@ -263,8 +263,8 @@ out: return pp; } -static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, + struct sk_buff *skb) { /* Common GRO receive for SIT and IP6IP6 */ @@ -278,8 +278,8 @@ static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head, return ipv6_gro_receive(head, skb); } -static struct sk_buff **ip4ip6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, + struct sk_buff *skb) { /* Common GRO receive for SIT and IP6IP6 */ diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a14fb4fcdf18..8047fd41ba88 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1219,13 +1219,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (mtu < IPV6_MIN_MTU) return -EINVAL; cork->base.fragsize = mtu; - cork->base.gso_size = sk->sk_type == SOCK_DGRAM && - sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0; + cork->base.gso_size = ipc6->gso_size; + cork->base.tx_flags = 0; + sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags); if (dst_allfrag(xfrm_dst_path(&rt->dst))) cork->base.flags |= IPCORK_ALLFRAG; cork->base.length = 0; + cork->base.transmit_time = ipc6->sockc.transmit_time; + return 0; } @@ -1238,8 +1241,7 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - unsigned int flags, struct ipcm6_cookie *ipc6, - const struct sockcm_cookie *sockc) + unsigned int flags, struct ipcm6_cookie *ipc6) { struct sk_buff *skb, *skb_prev = NULL; unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu; @@ -1249,7 +1251,6 @@ static int __ip6_append_data(struct sock *sk, int copy; int err; int offset = 0; - __u8 tx_flags = 0; u32 tskey = 0; struct rt6_info *rt = (struct rt6_info *)cork->dst; struct ipv6_txoptions *opt = v6_cork->opt; @@ -1268,6 +1269,10 @@ static int __ip6_append_data(struct sock *sk, mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize; orig_mtu = mtu; + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) + tskey = sk->sk_tskey++; + hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + @@ -1317,13 +1322,6 @@ emsgsize: rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) csummode = CHECKSUM_PARTIAL; - if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { - sock_tx_timestamp(sk, sockc->tsflags, &tx_flags); - if (tx_flags & SKBTX_ANY_SW_TSTAMP && - sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) - tskey = sk->sk_tskey++; - } - /* * Let's try using as much space as possible. * Use MTU if total length of the message fits into the MTU. @@ -1442,8 +1440,8 @@ alloc_new_skb: dst_exthdrlen); /* Only the initial fragment is time stamped */ - skb_shinfo(skb)->tx_flags = tx_flags; - tx_flags = 0; + skb_shinfo(skb)->tx_flags = cork->tx_flags; + cork->tx_flags = 0; skb_shinfo(skb)->tskey = tskey; tskey = 0; @@ -1560,8 +1558,7 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, - const struct sockcm_cookie *sockc) + struct rt6_info *rt, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -1589,7 +1586,7 @@ int ip6_append_data(struct sock *sk, return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, ipc6, sockc); + from, length, transhdrlen, flags, ipc6); } EXPORT_SYMBOL_GPL(ip6_append_data); @@ -1673,6 +1670,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = cork->base.transmit_time; + skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { @@ -1747,8 +1746,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - struct inet_cork_full *cork, - const struct sockcm_cookie *sockc) + struct inet_cork_full *cork) { struct inet6_cork v6_cork; struct sk_buff_head queue; @@ -1776,7 +1774,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, ipc6, sockc); + flags, ipc6); if (err) { __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0d0f0053bb11..d0b7e0249c13 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -32,6 +32,7 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/compat.h> +#include <linux/rhashtable.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/raw.h> diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 568ca4187cd1..c0cac9cc3a28 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -500,7 +500,6 @@ sticky_done: struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; - struct sockcm_cookie sockc_junk; struct ipcm6_cookie ipc6; memset(&fl6, 0, sizeof(fl6)); @@ -533,7 +532,7 @@ sticky_done: msg.msg_control = (void *)(opt+1); ipc6.opt = opt; - retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk); + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6); if (retv) goto done; update: diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c index b397a8fe88b9..c6bf580d0f33 100644 --- a/net/ipv6/netfilter/nf_log_ipv6.c +++ b/net/ipv6/netfilter/nf_log_ipv6.c @@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = { }; /* One level of recursion won't kill us */ -static void dump_ipv6_packet(struct nf_log_buf *m, +static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int ip6hoff, int recurse) @@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m, /* Max length: 3+maxlen */ if (recurse) { nf_log_buf_add(m, "["); - dump_ipv6_packet(m, info, skb, + dump_ipv6_packet(net, m, info, skb, ptr + sizeof(_icmp6h), 0); nf_log_buf_add(m, "] "); } @@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m, /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && recurse) - nf_log_dump_sk_uid_gid(m, skb->sk); + nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (recurse && skb->mark) @@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf, if (in != NULL) dump_ipv6_mac_header(m, loginfo, skb); - dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1); + dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1); nf_log_buf_close(m); } diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 96f56bf49a30..4c04bccc7417 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -62,7 +62,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; - struct sockcm_cookie junk = {0}; struct ipcm6_cookie ipc6; pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); @@ -119,7 +118,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.fl6_icmp_code = user_icmph.icmp6_code; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - ipc6.tclass = np->tclass; + ipcm6_init_sk(&ipc6, np); fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false); @@ -142,13 +141,11 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) pfh.family = AF_INET6; ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - ipc6.dontfrag = np->dontfrag; - ipc6.opt = NULL; lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, 0, &ipc6, &fl6, rt, - MSG_DONTWAIT, &junk); + MSG_DONTWAIT); if (err) { ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index afc307c89d1a..413d98bf24f4 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -620,7 +620,7 @@ out: static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, struct flowi6 *fl6, struct dst_entry **dstp, - unsigned int flags) + unsigned int flags, const struct sockcm_cookie *sockc) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); @@ -650,6 +650,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->protocol = htons(ETH_P_IPV6); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = sockc->transmit_time; skb_dst_set(skb, &rt->dst); *dstp = NULL; @@ -766,7 +767,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst = NULL; struct raw6_frag_vec rfv; struct flowi6 fl6; - struct sockcm_cookie sockc; struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; u16 proto; @@ -790,10 +790,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = sk->sk_mark; fl6.flowi6_uid = sk->sk_uid; - ipc6.hlimit = -1; - ipc6.tclass = -1; - ipc6.dontfrag = -1; - ipc6.opt = NULL; + ipcm6_init(&ipc6); + ipc6.sockc.tsflags = sk->sk_tsflags; if (sin6) { if (addr_len < SIN6_LEN_RFC2133) @@ -847,14 +845,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (fl6.flowi6_oif == 0) fl6.flowi6_oif = sk->sk_bound_dev_if; - sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); ipc6.opt = opt; - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -921,13 +918,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) back_from_confirm: if (inet->hdrincl) - err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags); + err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, + msg->msg_flags, &ipc6.sockc); else { ipc6.opt = opt; lock_sock(sk); err = ip6_append_data(sk, raw6_getfrag, &rfv, len, 0, &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, &sockc); + msg->msg_flags); if (err) ip6_flush_pending_frames(sk); diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 0fdf2a55e746..8d0ba757a46c 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -17,6 +17,7 @@ #include <linux/net.h> #include <linux/in6.h> #include <linux/slab.h> +#include <linux/rhashtable.h> #include <net/ipv6.h> #include <net/protocol.h> diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 558fe8cc6d43..8546f94f30d4 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -22,6 +22,7 @@ #include <linux/icmpv6.h> #include <linux/mroute6.h> #include <linux/slab.h> +#include <linux/rhashtable.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 278e49cd67d4..e72947c99454 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -15,8 +15,8 @@ #include <net/ip6_checksum.h> #include "ip6_offload.h" -static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *tcp6_gro_receive(struct list_head *head, + struct sk_buff *skb) { /* Don't bother verifying checksum if we're going to flush anyway. */ if (!NAPI_GRO_CB(skb)->flush && diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e6645cae403e..f6b96956a8ed 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1141,13 +1141,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int err; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); - struct sockcm_cookie sockc; - ipc6.hlimit = -1; - ipc6.tclass = -1; - ipc6.dontfrag = -1; + ipcm6_init(&ipc6); ipc6.gso_size = up->gso_size; - sockc.tsflags = sk->sk_tsflags; + ipc6.sockc.tsflags = sk->sk_tsflags; /* destination address check */ if (sin6) { @@ -1282,7 +1279,7 @@ do_udp_sendmsg: err = udp_cmsg_send(sk, msg, &ipc6.gso_size); if (err > 0) err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, - &ipc6, &sockc); + &ipc6); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -1376,7 +1373,7 @@ back_from_confirm: skb = ip6_make_skb(sk, getfrag, msg, ulen, sizeof(struct udphdr), &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, &cork, &sockc); + msg->msg_flags, &cork); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_v6_send_skb(skb, &fl6, &cork.base); @@ -1402,7 +1399,7 @@ do_append_data: up->len += ulen; err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), &ipc6, &fl6, (struct rt6_info *)dst, - corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, &sockc); + corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 03a2ff3fe1e6..95dee9ca8d22 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -114,8 +114,8 @@ out: return segs; } -static struct sk_buff **udp6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *udp6_gro_receive(struct list_head *head, + struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 40261cb68e83..1ea285bad84b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -322,8 +322,7 @@ int l2tp_session_register(struct l2tp_session *session, if (tunnel->version == L2TP_HDR_VER_3) { pn = l2tp_pernet(tunnel->l2tp_net); - g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net), - session->session_id); + g_head = l2tp_session_id_hash_2(pn, session->session_id); spin_lock_bh(&pn->l2tp_session_hlist_lock); @@ -783,7 +782,7 @@ EXPORT_SYMBOL(l2tp_recv_common); /* Drop skbs from the session's reorder_q */ -int l2tp_session_queue_purge(struct l2tp_session *session) +static int l2tp_session_queue_purge(struct l2tp_session *session) { struct sk_buff *skb = NULL; BUG_ON(!session); @@ -794,7 +793,6 @@ int l2tp_session_queue_purge(struct l2tp_session *session) } return 0; } -EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame * here. The skb is not on a list when we get here. @@ -1009,8 +1007,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) return bufp - optr; } -static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, - struct flowi *fl, size_t data_len) +static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, + struct flowi *fl, size_t data_len) { struct l2tp_tunnel *tunnel = session->tunnel; unsigned int len = skb->len; @@ -1052,8 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, atomic_long_inc(&tunnel->stats.tx_errors); atomic_long_inc(&session->stats.tx_errors); } - - return 0; } /* If caller requires the skb to have a ppp header, the header must be @@ -1193,7 +1189,7 @@ end: /* When the tunnel is closed, all the attached sessions need to go too. */ -void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) +static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) { int hash; struct hlist_node *walk; @@ -1242,7 +1238,6 @@ again: } write_unlock_bh(&tunnel->hlist_lock); } -EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); /* Tunnel socket destroy hook for UDP encapsulation */ static void l2tp_udp_encap_destroy(struct sock *sk) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index c199020f8a8a..a5c09d3a5698 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -180,9 +180,6 @@ struct l2tp_tunnel { struct net *l2tp_net; /* the net we belong to */ refcount_t ref_count; -#ifdef CONFIG_DEBUG_FS - void (*show)(struct seq_file *m, void *arg); -#endif int (*recv_payload_hook)(struct sk_buff *skb); void (*old_sk_destruct)(struct sock *); struct sock *sock; /* Parent socket */ @@ -190,8 +187,6 @@ struct l2tp_tunnel { * was created by userspace */ struct work_struct del_work; - - uint8_t priv[0]; /* private data */ }; struct l2tp_nl_cmd_ops { @@ -201,11 +196,6 @@ struct l2tp_nl_cmd_ops { int (*session_delete)(struct l2tp_session *session); }; -static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel) -{ - return &tunnel->priv[0]; -} - static inline void *l2tp_session_priv(struct l2tp_session *session) { return &session->priv[0]; @@ -229,7 +219,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, struct l2tp_tunnel_cfg *cfg); -void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, @@ -244,7 +233,6 @@ void l2tp_session_free(struct l2tp_session *session); void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); -int l2tp_session_queue_purge(struct l2tp_session *session); int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); void l2tp_session_set_header_len(struct l2tp_session *session, int version); diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index e87686f7d63c..b5d7dde003ef 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -177,9 +177,6 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) atomic_long_read(&tunnel->stats.rx_packets), atomic_long_read(&tunnel->stats.rx_bytes), atomic_long_read(&tunnel->stats.rx_errors)); - - if (tunnel->show != NULL) - tunnel->show(m, tunnel); } static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 957369192ca1..672e5b753738 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -500,7 +500,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi6 fl6; - struct sockcm_cookie sockc_unused = {0}; struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; int transhdrlen = 4; /* zero session-id */ @@ -525,9 +524,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = sk->sk_mark; fl6.flowi6_uid = sk->sk_uid; - ipc6.hlimit = -1; - ipc6.tclass = -1; - ipc6.dontfrag = -1; + ipcm6_init(&ipc6); if (lsa) { if (addr_len < SIN6_LEN_RFC2133) @@ -575,8 +572,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) opt->tot_len = sizeof(struct ipv6_txoptions); ipc6.opt = opt; - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, - &sockc_unused); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -641,7 +637,7 @@ back_from_confirm: err = ip6_append_data(sk, ip_generic_getfrag, msg, ulen, transhdrlen, &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, &sockc_unused); + msg->msg_flags); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e398797878a9..9ac02c93df98 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -424,12 +424,6 @@ static void pppol2tp_put_sk(struct rcu_head *head) sock_put(ps->__sk); } -/* Called by l2tp_core when a session socket is being closed. - */ -static void pppol2tp_session_close(struct l2tp_session *session) -{ -} - /* Really kill the session socket. (Called from sock_put() if * refcnt == 0.) */ @@ -573,7 +567,6 @@ static void pppol2tp_session_init(struct l2tp_session *session) struct dst_entry *dst; session->recv_skb = pppol2tp_recv; - session->session_close = pppol2tp_session_close; #if IS_ENABLED(CONFIG_L2TP_DEBUGFS) session->show = pppol2tp_show; #endif @@ -595,40 +588,113 @@ static void pppol2tp_session_init(struct l2tp_session *session) } } +struct l2tp_connect_info { + u8 version; + int fd; + u32 tunnel_id; + u32 peer_tunnel_id; + u32 session_id; + u32 peer_session_id; +}; + +static int pppol2tp_sockaddr_get_info(const void *sa, int sa_len, + struct l2tp_connect_info *info) +{ + switch (sa_len) { + case sizeof(struct sockaddr_pppol2tp): + { + const struct sockaddr_pppol2tp *sa_v2in4 = sa; + + if (sa_v2in4->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 2; + info->fd = sa_v2in4->pppol2tp.fd; + info->tunnel_id = sa_v2in4->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v2in4->pppol2tp.d_tunnel; + info->session_id = sa_v2in4->pppol2tp.s_session; + info->peer_session_id = sa_v2in4->pppol2tp.d_session; + + break; + } + case sizeof(struct sockaddr_pppol2tpv3): + { + const struct sockaddr_pppol2tpv3 *sa_v3in4 = sa; + + if (sa_v3in4->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 3; + info->fd = sa_v3in4->pppol2tp.fd; + info->tunnel_id = sa_v3in4->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v3in4->pppol2tp.d_tunnel; + info->session_id = sa_v3in4->pppol2tp.s_session; + info->peer_session_id = sa_v3in4->pppol2tp.d_session; + + break; + } + case sizeof(struct sockaddr_pppol2tpin6): + { + const struct sockaddr_pppol2tpin6 *sa_v2in6 = sa; + + if (sa_v2in6->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 2; + info->fd = sa_v2in6->pppol2tp.fd; + info->tunnel_id = sa_v2in6->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v2in6->pppol2tp.d_tunnel; + info->session_id = sa_v2in6->pppol2tp.s_session; + info->peer_session_id = sa_v2in6->pppol2tp.d_session; + + break; + } + case sizeof(struct sockaddr_pppol2tpv3in6): + { + const struct sockaddr_pppol2tpv3in6 *sa_v3in6 = sa; + + if (sa_v3in6->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 3; + info->fd = sa_v3in6->pppol2tp.fd; + info->tunnel_id = sa_v3in6->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v3in6->pppol2tp.d_tunnel; + info->session_id = sa_v3in6->pppol2tp.s_session; + info->peer_session_id = sa_v3in6->pppol2tp.d_session; + + break; + } + default: + return -EINVAL; + } + + return 0; +} + /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; - struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct l2tp_session *session = NULL; + struct l2tp_connect_info info; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; struct l2tp_session_cfg cfg = { 0, }; - int error = 0; - u32 tunnel_id, peer_tunnel_id; - u32 session_id, peer_session_id; bool drop_refcnt = false; bool drop_tunnel = false; bool new_session = false; bool new_tunnel = false; - int ver = 2; - int fd; - - lock_sock(sk); - - error = -EINVAL; + int error; - if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) && - sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) && - sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) && - sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6)) - goto end; + error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info); + if (error < 0) + return error; - if (sp->sa_protocol != PX_PROTO_OL2TP) - goto end; + lock_sock(sk); /* Check for already bound sockets */ error = -EBUSY; @@ -640,56 +706,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (sk->sk_user_data) goto end; /* socket is already attached */ - /* Get params from socket address. Handle L2TPv2 and L2TPv3. - * This is nasty because there are different sockaddr_pppol2tp - * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use - * the sockaddr size to determine which structure the caller - * is using. - */ - peer_tunnel_id = 0; - if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { - fd = sp->pppol2tp.fd; - tunnel_id = sp->pppol2tp.s_tunnel; - peer_tunnel_id = sp->pppol2tp.d_tunnel; - session_id = sp->pppol2tp.s_session; - peer_session_id = sp->pppol2tp.d_session; - } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { - struct sockaddr_pppol2tpv3 *sp3 = - (struct sockaddr_pppol2tpv3 *) sp; - ver = 3; - fd = sp3->pppol2tp.fd; - tunnel_id = sp3->pppol2tp.s_tunnel; - peer_tunnel_id = sp3->pppol2tp.d_tunnel; - session_id = sp3->pppol2tp.s_session; - peer_session_id = sp3->pppol2tp.d_session; - } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) { - struct sockaddr_pppol2tpin6 *sp6 = - (struct sockaddr_pppol2tpin6 *) sp; - fd = sp6->pppol2tp.fd; - tunnel_id = sp6->pppol2tp.s_tunnel; - peer_tunnel_id = sp6->pppol2tp.d_tunnel; - session_id = sp6->pppol2tp.s_session; - peer_session_id = sp6->pppol2tp.d_session; - } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) { - struct sockaddr_pppol2tpv3in6 *sp6 = - (struct sockaddr_pppol2tpv3in6 *) sp; - ver = 3; - fd = sp6->pppol2tp.fd; - tunnel_id = sp6->pppol2tp.s_tunnel; - peer_tunnel_id = sp6->pppol2tp.d_tunnel; - session_id = sp6->pppol2tp.s_session; - peer_session_id = sp6->pppol2tp.d_session; - } else { - error = -EINVAL; - goto end; /* bad socket address */ - } - /* Don't bind if tunnel_id is 0 */ error = -EINVAL; - if (tunnel_id == 0) + if (!info.tunnel_id) goto end; - tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id); + tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id); if (tunnel) drop_tunnel = true; @@ -697,7 +719,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, * peer_session_id is 0. Otherwise look up tunnel using supplied * tunnel id. */ - if ((session_id == 0) && (peer_session_id == 0)) { + if (!info.session_id && !info.peer_session_id) { if (tunnel == NULL) { struct l2tp_tunnel_cfg tcfg = { .encap = L2TP_ENCAPTYPE_UDP, @@ -707,12 +729,16 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, /* Prevent l2tp_tunnel_register() from trying to set up * a kernel socket. */ - if (fd < 0) { + if (info.fd < 0) { error = -EBADF; goto end; } - error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); + error = l2tp_tunnel_create(sock_net(sk), info.fd, + info.version, + info.tunnel_id, + info.peer_tunnel_id, &tcfg, + &tunnel); if (error < 0) goto end; @@ -741,9 +767,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; if (tunnel->peer_tunnel_id == 0) - tunnel->peer_tunnel_id = peer_tunnel_id; + tunnel->peer_tunnel_id = info.peer_tunnel_id; - session = l2tp_session_get(sock_net(sk), tunnel, session_id); + session = l2tp_session_get(sock_net(sk), tunnel, info.session_id); if (session) { drop_refcnt = true; @@ -772,8 +798,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, cfg.pw_type = L2TP_PWTYPE_PPP; session = l2tp_session_create(sizeof(struct pppol2tp_session), - tunnel, session_id, - peer_session_id, &cfg); + tunnel, info.session_id, + info.peer_session_id, &cfg); if (IS_ERR(session)) { error = PTR_ERR(session); goto end; diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index e3589ade62e0..bb707789ef2b 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -12,6 +12,7 @@ mac80211-y := \ scan.o offchannel.o \ ht.o agg-tx.o agg-rx.o \ vht.o \ + he.o \ ibss.o \ iface.o \ rate.o \ diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index e83c19d4c292..6a4f154c99f6 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -245,6 +245,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, }; int i, ret = -EOPNOTSUPP; u16 status = WLAN_STATUS_REQUEST_DECLINED; + u16 max_buf_size; if (tid >= IEEE80211_FIRST_TSPEC_TSID) { ht_dbg(sta->sdata, @@ -268,13 +269,18 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, goto end; } + if (sta->sta.he_cap.has_he) + max_buf_size = IEEE80211_MAX_AMPDU_BUF; + else + max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + /* sanity check for incoming parameters: * check if configuration can support the BA policy * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || - (buf_size > IEEE80211_MAX_AMPDU_BUF)) { + (buf_size > max_buf_size)) { status = WLAN_STATUS_INVALID_QOS_PARAM; ht_dbg_ratelimited(sta->sdata, "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", @@ -283,7 +289,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, } /* determine default buffer size */ if (buf_size == 0) - buf_size = IEEE80211_MAX_AMPDU_BUF; + buf_size = max_buf_size; /* make sure the size doesn't exceed the maximum supported by the hw */ if (buf_size > sta->sta.max_rx_aggregation_subframes) diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index ac4295296514..69e831bc317b 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -463,6 +463,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) .timeout = 0, }; int ret; + u16 buf_size; tid_tx = rcu_dereference_protected_tid_tx(sta, tid); @@ -511,11 +512,22 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); + if (sta->sta.he_cap.has_he) { + buf_size = local->hw.max_tx_aggregation_subframes; + } else { + /* + * We really should use what the driver told us it will + * transmit as the maximum, but certain APs (e.g. the + * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012) + * will crash when we use a lower number. + */ + buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + } + /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, params.ssn, - IEEE80211_MAX_AMPDU_BUF, - tid_tx->timeout); + buf_size, tid_tx->timeout); } /* @@ -905,8 +917,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, { struct tid_ampdu_tx *tid_tx; struct ieee80211_txq *txq; - u16 capab, tid; - u8 buf_size; + u16 capab, tid, buf_size; bool amsdu; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index bdf6fa78d0d2..02f3672e7b5e 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1412,6 +1412,11 @@ static int sta_apply_parameters(struct ieee80211_local *local, ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, params->vht_capa, sta); + if (params->he_capa) + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, + (void *)params->he_capa, + params->he_capa_len, sta); + if (params->opmode_notif_used) { /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it @@ -3486,7 +3491,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, } local_bh_disable(); - ieee80211_xmit(sdata, sta, skb); + ieee80211_xmit(sdata, sta, skb, 0); local_bh_enable(); ret = 0; diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index 690c142a7a44..5ac743816b59 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -116,16 +116,16 @@ static void ieee80211_get_stats(struct net_device *dev, data[i++] = sta->sta_state; - if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.txrate); i++; - if (sinfo.filled & BIT(NL80211_STA_INFO_RX_BITRATE)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.rxrate); i++; - if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL_AVG)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) data[i] = (u8)sinfo.signal_avg; i++; } else { diff --git a/net/mac80211/he.c b/net/mac80211/he.c new file mode 100644 index 000000000000..769078ed5a12 --- /dev/null +++ b/net/mac80211/he.c @@ -0,0 +1,55 @@ +/* + * HE handling + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ieee80211_i.h" + +void +ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + struct sta_info *sta) +{ + struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap; + struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; + u8 he_ppe_size; + u8 mcs_nss_size; + u8 he_total_size; + + memset(he_cap, 0, sizeof(*he_cap)); + + if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband)) + return; + + /* Make sure size is OK */ + mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem); + he_ppe_size = + ieee80211_he_ppe_size(he_cap_ie[sizeof(he_cap->he_cap_elem) + + mcs_nss_size], + he_cap_ie_elem->phy_cap_info); + he_total_size = sizeof(he_cap->he_cap_elem) + mcs_nss_size + + he_ppe_size; + if (he_cap_len < he_total_size) + return; + + memcpy(&he_cap->he_cap_elem, he_cap_ie, sizeof(he_cap->he_cap_elem)); + + /* HE Tx/Rx HE MCS NSS Support Field */ + memcpy(&he_cap->he_mcs_nss_supp, + &he_cap_ie[sizeof(he_cap->he_cap_elem)], mcs_nss_size); + + /* Check if there are (optional) PPE Thresholds */ + if (he_cap->he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) + memcpy(he_cap->ppe_thres, + &he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size], + he_ppe_size); + + he_cap->has_he = true; +} diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 26a7ba3b698f..f849ea814993 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -352,7 +352,7 @@ void ieee80211_ba_session_work(struct work_struct *work) test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_manage_offl)) ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid, - IEEE80211_MAX_AMPDU_BUF, + IEEE80211_MAX_AMPDU_BUF_HT, false, true); if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d1978aa1c15d..172aeae21ae9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -165,6 +165,7 @@ typedef unsigned __bitwise ieee80211_tx_result; #define TX_DROP ((__force ieee80211_tx_result) 1u) #define TX_QUEUED ((__force ieee80211_tx_result) 2u) +#define IEEE80211_TX_NO_SEQNO BIT(0) #define IEEE80211_TX_UNICAST BIT(1) #define IEEE80211_TX_PS_BUFFERED BIT(2) @@ -364,6 +365,7 @@ enum ieee80211_sta_flags { IEEE80211_STA_DISABLE_160MHZ = BIT(13), IEEE80211_STA_DISABLE_WMM = BIT(14), IEEE80211_STA_ENABLE_RRM = BIT(15), + IEEE80211_STA_DISABLE_HE = BIT(16), }; struct ieee80211_mgd_auth_data { @@ -1453,6 +1455,10 @@ struct ieee802_11_elems { const struct ieee80211_vht_cap *vht_cap_elem; const struct ieee80211_vht_operation *vht_operation; const struct ieee80211_meshconf_ie *mesh_config; + const u8 *he_cap; + const struct ieee80211_he_operation *he_operation; + const struct ieee80211_mu_edca_param_set *mu_edca_param_set; + const u8 *uora_element; const u8 *mesh_id; const u8 *peering; const __le16 *awake_window; @@ -1482,6 +1488,7 @@ struct ieee802_11_elems { u8 ext_supp_rates_len; u8 wmm_info_len; u8 wmm_param_len; + u8 he_cap_len; u8 mesh_id_len; u8 peering_len; u8 preq_len; @@ -1824,6 +1831,13 @@ void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, enum nl80211_chan_width ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta); +/* HE */ +void +ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + struct sta_info *sta); + /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, @@ -1880,19 +1894,20 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify, bool enable_qos); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, struct sk_buff *skb); + struct sta_info *sta, struct sk_buff *skb, + u32 txdata_flags); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band); + enum nl80211_band band, u32 txdata_flags); static inline void ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band) + enum nl80211_band band, u32 txdata_flags) { rcu_read_lock(); - __ieee80211_tx_skb_tid_band(sdata, skb, tid, band); + __ieee80211_tx_skb_tid_band(sdata, skb, tid, band, txdata_flags); rcu_read_unlock(); } @@ -1910,7 +1925,7 @@ static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, } __ieee80211_tx_skb_tid_band(sdata, skb, tid, - chanctx_conf->def.chan->band); + chanctx_conf->def.chan->band, 0); rcu_read_unlock(); } @@ -2031,26 +2046,27 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf); + +enum { + IEEE80211_PROBE_FLAG_DIRECTED = BIT(0), + IEEE80211_PROBE_FLAG_MIN_CONTENT = BIT(1), + IEEE80211_PROBE_FLAG_RANDOM_SN = BIT(2), +}; + int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, - struct cfg80211_chan_def *chandef); + struct cfg80211_chan_def *chandef, + u32 flags); struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, - bool directed); -void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, - const u8 *src, const u8 *dst, - const u8 *ssid, size_t ssid_len, - const u8 *ie, size_t ie_len, - u32 ratemask, bool directed, u32 tx_flags, - struct ieee80211_channel *channel, bool scan); - + u32 flags); u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates); @@ -2073,6 +2089,9 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap); u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, const struct cfg80211_chan_def *chandef); +u8 *ieee80211_ie_build_he_cap(u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + u8 *end); int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 555e389b7dfa..5e6cf2cee965 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1130,7 +1130,7 @@ static void ieee80211_uninit(struct net_device *dev) static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); @@ -1176,7 +1176,7 @@ static const struct net_device_ops ieee80211_dataif_ops = { static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index fb73451ed85e..4fb2709cb527 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -557,10 +558,19 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211); - if (!ops->hw_scan) + if (!ops->hw_scan) { wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_AP_SCAN; - + /* + * if the driver behaves correctly using the probe request + * (template) from mac80211, then both of these should be + * supported even with hw scan - but let drivers opt in. + */ + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); + } if (!ops->set_key) wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -588,8 +598,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; - local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; - local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; + local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; @@ -816,7 +826,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) int result, i; enum nl80211_band band; int channels, max_bitrates; - bool supp_ht, supp_vht; + bool supp_ht, supp_vht, supp_he; netdev_features_t feature_whitelist; struct cfg80211_chan_def dflt_chandef = {}; @@ -896,6 +906,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) max_bitrates = 0; supp_ht = false; supp_vht = false; + supp_he = false; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; @@ -922,6 +933,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; + if (!supp_he) + supp_he = !!ieee80211_get_he_sta_cap(sband); + if (!sband->ht_cap.ht_supported) continue; @@ -1011,6 +1025,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); + /* HE cap element is variable in size - set len to allow max size */ + /* + * TODO: 1 is added at the end of the calculation to accommodate for + * the temporary placing of the HE capabilities IE under EXT. + * Remove it once it is placed in the final place. + */ + if (supp_he) + local->scan_ies_len += + 2 + sizeof(struct ieee80211_he_cap_elem) + + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN + 1; + if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ local->hw.wiphy->max_scan_ssids = 4; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a59187c016e0..7fb9957359a3 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -149,6 +149,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, + const struct ieee80211_he_operation *he_oper, struct cfg80211_chan_def *chandef, bool tracking) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -207,7 +208,27 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, } vht_chandef = *chandef; - if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) { + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && he_oper && + (le32_to_cpu(he_oper->he_oper_params) & + IEEE80211_HE_OPERATION_VHT_OPER_INFO)) { + struct ieee80211_vht_operation he_oper_vht_cap; + + /* + * Set only first 3 bytes (other 2 aren't used in + * ieee80211_chandef_vht_oper() anyway) + */ + memcpy(&he_oper_vht_cap, he_oper->optional, 3); + he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0); + + if (!ieee80211_chandef_vht_oper(&he_oper_vht_cap, + &vht_chandef)) { + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) + sdata_info(sdata, + "HE AP VHT information is invalid, disable HE\n"); + ret = IEEE80211_STA_DISABLE_HE; + goto out; + } + } else if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information is invalid, disable VHT\n"); @@ -300,12 +321,14 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ht_cap *ht_cap, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, + const struct ieee80211_he_operation *he_oper, const u8 *bssid, u32 *changed) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct ieee80211_supported_band *sband; - struct ieee80211_channel *chan; + struct ieee80211_channel *chan = sdata->vif.bss_conf.chandef.chan; + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[chan->band]; struct cfg80211_chan_def chandef; u16 ht_opmode; u32 flags; @@ -320,6 +343,11 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT) vht_oper = NULL; + /* don't check HE if we associated as non-HE station */ + if (ifmgd->flags & IEEE80211_STA_DISABLE_HE || + !ieee80211_get_he_sta_cap(sband)) + he_oper = NULL; + if (WARN_ON_ONCE(!sta)) return -EINVAL; @@ -333,12 +361,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.ht_operation_mode = ht_opmode; } - chan = sdata->vif.bss_conf.chandef.chan; - sband = local->hw.wiphy->bands[chan->band]; - - /* calculate new channel (type) based on HT/VHT operation IEs */ + /* calculate new channel (type) based on HT/VHT/HE operation IEs */ flags = ieee80211_determine_chantype(sdata, sband, chan, - ht_oper, vht_oper, + ht_oper, vht_oper, he_oper, &chandef, true); /* @@ -582,6 +607,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); } +/* This function determines HE capability flags for the association + * and builds the IE. + */ +static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct ieee80211_supported_band *sband) +{ + u8 *pos; + const struct ieee80211_sta_he_cap *he_cap = NULL; + u8 he_cap_size; + + he_cap = ieee80211_get_he_sta_cap(sband); + if (!he_cap) + return; + + /* + * TODO: the 1 added is because this temporarily is under the EXTENSION + * IE. Get rid of it when it moves. + */ + he_cap_size = + 2 + 1 + sizeof(he_cap->he_cap_elem) + + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + pos = skb_put(skb, he_cap_size); + ieee80211_ie_build_he_cap(pos, he_cap, pos + he_cap_size); +} + static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; @@ -643,6 +696,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 2 + 2 * sband->n_channels + /* supported channels */ 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ 2 + sizeof(struct ieee80211_vht_cap) + /* VHT */ + 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + /* HE */ + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN + assoc_data->ie_len + /* extra IEs */ (assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) + 9, /* WMM */ @@ -827,11 +883,41 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) offset = noffset; } + /* if present, add any custom IEs that go before HE */ + if (assoc_data->ie_len) { + static const u8 before_he[] = { + /* + * no need to list the ones split off before VHT + * or generated here + */ + WLAN_EID_OPMODE_NOTIF, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE, + /* 11ai elements */ + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN, + /* TODO: add 11ah/11aj/11ak elements */ + }; + + /* RIC already taken above, so no need to handle here anymore */ + noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len, + before_he, ARRAY_SIZE(before_he), + offset); + pos = skb_put(skb, noffset - offset); + memcpy(pos, assoc_data->ie + offset, noffset - offset); + offset = noffset; + } + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) ieee80211_add_vht_ie(sdata, skb, sband, &assoc_data->ap_vht_cap); - /* if present, add any custom non-vendor IEs that go after HT */ + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) + ieee80211_add_he_ie(sdata, skb, sband); + + /* if present, add any custom non-vendor IEs that go after HE */ if (assoc_data->ie_len) { noffset = ieee80211_ie_split_vendor(assoc_data->ie, assoc_data->ie_len, @@ -898,6 +984,11 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + /* Don't send NDPs when STA is connected HE */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) + return; + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP)); if (!skb) @@ -929,6 +1020,10 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; + /* Don't send NDPs when connected HE */ + if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) + return; + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); if (!skb) return; @@ -1700,9 +1795,11 @@ static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work) } /* MLME */ -static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - const u8 *wmm_param, size_t wmm_param_len) +static bool +ieee80211_sta_wmm_params(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const u8 *wmm_param, size_t wmm_param_len, + const struct ieee80211_mu_edca_param_set *mu_edca) { struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS]; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -1749,6 +1846,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_bk; break; case 2: /* AC_VI */ ac = IEEE80211_AC_VI; @@ -1756,6 +1856,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_vi; break; case 3: /* AC_VO */ ac = IEEE80211_AC_VO; @@ -1763,6 +1866,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_vo; break; case 0: /* AC_BE */ default: @@ -1771,6 +1877,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_be; break; } @@ -2219,6 +2328,20 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, ieee80211_sta_reset_conn_monitor(sdata); } +static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + const u8 *ssid, size_t ssid_len, + struct ieee80211_channel *channel) +{ + struct sk_buff *skb; + + skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel, + ssid, ssid_len, NULL, 0, + IEEE80211_PROBE_FLAG_DIRECTED); + if (skb) + ieee80211_tx_skb(sdata, skb); +} + static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -2265,10 +2388,9 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) else ssid_len = ssid[1]; - ieee80211_send_probe_req(sdata, sdata->vif.addr, dst, - ssid + 2, ssid_len, NULL, - 0, (u32) -1, true, 0, - ifmgd->associated->channel, false); + ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst, + ssid + 2, ssid_len, + ifmgd->associated->channel); rcu_read_unlock(); } @@ -2370,7 +2492,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid, (u32) -1, cbss->channel, ssid + 2, ssid_len, - NULL, 0, true); + NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); rcu_read_unlock(); return skb; @@ -3008,6 +3130,25 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, goto out; } + /* + * If AP doesn't support HT, or it doesn't have HE mandatory IEs, mark + * HE as disabled. If on the 5GHz band, make sure it supports VHT. + */ + if (ifmgd->flags & IEEE80211_STA_DISABLE_HT || + (sband->band == NL80211_BAND_5GHZ && + ifmgd->flags & IEEE80211_STA_DISABLE_VHT) || + (!elems.he_cap && !elems.he_operation)) + ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && + (!elems.he_cap || !elems.he_operation)) { + mutex_unlock(&sdata->local->sta_mtx); + sdata_info(sdata, + "HE AP is missing HE capability/operation\n"); + ret = false; + goto out; + } + /* Set up internal HT/VHT capabilities */ if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, @@ -3017,6 +3158,48 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems.vht_cap_elem, sta); + if (elems.he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && + elems.he_cap) { + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, + elems.he_cap, + elems.he_cap_len, + sta); + + bss_conf->he_support = sta->sta.he_cap.has_he; + } else { + bss_conf->he_support = false; + } + + if (bss_conf->he_support) { + u32 he_oper_params = + le32_to_cpu(elems.he_operation->he_oper_params); + + bss_conf->bss_color = he_oper_params & + IEEE80211_HE_OPERATION_BSS_COLOR_MASK; + bss_conf->htc_trig_based_pkt_ext = + (he_oper_params & + IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK) << + IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET; + bss_conf->frame_time_rts_th = + (he_oper_params & + IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK) << + IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET; + + bss_conf->multi_sta_back_32bit = + sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP; + + bss_conf->ack_enabled = + sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN; + + bss_conf->uora_exists = !!elems.uora_element; + if (elems.uora_element) + bss_conf->uora_ocw_range = elems.uora_element[0]; + + /* TODO: OPEN: what happens if BSS color disable is set? */ + } + /* * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data * in their association response, so ignore that data for our own @@ -3076,7 +3259,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) { ieee80211_set_wmm_default(sdata, false, false); } else if (!ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, - elems.wmm_param_len)) { + elems.wmm_param_len, + elems.mu_edca_param_set)) { /* still enable QoS since we might have HT/VHT */ ieee80211_set_wmm_default(sdata, false, true); /* set the disable-WMM flag in this case to disable @@ -3590,7 +3774,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, - elems.wmm_param_len)) + elems.wmm_param_len, + elems.mu_edca_param_set)) changed |= BSS_CHANGED_QOS; /* @@ -3629,7 +3814,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (ieee80211_config_bw(sdata, sta, elems.ht_cap_elem, elems.ht_operation, - elems.vht_operation, bssid, &changed)) { + elems.vht_operation, elems.he_operation, + bssid, &changed)) { mutex_unlock(&local->sta_mtx); sdata_info(sdata, "failed to follow AP %pM bandwidth change, disconnect\n", @@ -4266,6 +4452,68 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, return chains; } +static bool +ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband, + const struct ieee80211_he_operation *he_op) +{ + const struct ieee80211_sta_he_cap *sta_he_cap = + ieee80211_get_he_sta_cap(sband); + u16 ap_min_req_set; + int i; + + if (!sta_he_cap || !he_op) + return false; + + ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); + + /* Need to go over for 80MHz, 160MHz and for 80+80 */ + for (i = 0; i < 3; i++) { + const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp = + &sta_he_cap->he_mcs_nss_supp; + u16 sta_mcs_map_rx = + le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]); + u16 sta_mcs_map_tx = + le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]); + u8 nss; + bool verified = true; + + /* + * For each band there is a maximum of 8 spatial streams + * possible. Each of the sta_mcs_map_* is a 16-bit struct built + * of 2 bits per NSS (1-8), with the values defined in enum + * ieee80211_he_mcs_support. Need to make sure STA TX and RX + * capabilities aren't less than the AP's minimum requirements + * for this HE BSS per SS. + * It is enough to find one such band that meets the reqs. + */ + for (nss = 8; nss > 0; nss--) { + u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3; + u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3; + u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; + + if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED) + continue; + + /* + * Make sure the HE AP doesn't require MCSs that aren't + * supported by the client + */ + if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) { + verified = false; + break; + } + } + + if (verified) + return true; + } + + /* If here, STA doesn't meet AP's HE min requirements */ + return false; +} + static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss) { @@ -4274,6 +4522,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ht_cap *ht_cap = NULL; const struct ieee80211_ht_operation *ht_oper = NULL; const struct ieee80211_vht_operation *vht_oper = NULL; + const struct ieee80211_he_operation *he_oper = NULL; struct ieee80211_supported_band *sband; struct cfg80211_chan_def chandef; int ret; @@ -4329,6 +4578,24 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, } } + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && + ieee80211_get_he_sta_cap(sband)) { + const struct cfg80211_bss_ies *ies; + const u8 *he_oper_ie; + + ies = rcu_dereference(cbss->ies); + he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, + ies->data, ies->len); + if (he_oper_ie && + he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3])) + he_oper = (void *)(he_oper_ie + 3); + else + he_oper = NULL; + + if (!ieee80211_verify_sta_he_mcs_support(sband, he_oper)) + ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + } + /* Allow VHT if at least one channel on the sband supports 80 MHz */ have_80mhz = false; for (i = 0; i < sband->n_channels; i++) { @@ -4345,7 +4612,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, cbss->channel, - ht_oper, vht_oper, + ht_oper, vht_oper, he_oper, &chandef, false); sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), @@ -4751,8 +5018,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + ifmgd->flags |= IEEE80211_STA_DISABLE_HE; netdev_info(sdata->dev, - "disabling HT/VHT due to WEP/TKIP use\n"); + "disabling HE/HT/VHT due to WEP/TKIP use\n"); } } diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index f1d40b6645ff..8ef4153cd299 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -262,7 +262,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, if (roc->mgmt_tx_cookie) { if (!WARN_ON(!roc->frame)) { ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7, - roc->chan->band); + roc->chan->band, 0); roc->frame = NULL; } } else { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0a38cc1cbebc..a16ba568e2a3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -175,6 +175,20 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, len += 12; } + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE) { + len = ALIGN(len, 2); + len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE_MU) { + len = ALIGN(len, 2); + len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); + } + if (status->chains) { /* antenna and antenna signal fields */ len += 2 * hweight8(status->chains); @@ -263,6 +277,19 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, int mpdulen, chain; unsigned long chains = status->chains; struct ieee80211_vendor_radiotap rtap = {}; + struct ieee80211_radiotap_he he = {}; + struct ieee80211_radiotap_he_mu he_mu = {}; + + if (status->flag & RX_FLAG_RADIOTAP_HE) { + he = *(struct ieee80211_radiotap_he *)skb->data; + skb_pull(skb, sizeof(he)); + WARN_ON_ONCE(status->encoding != RX_ENC_HE); + } + + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { + he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; + skb_pull(skb, sizeof(he_mu)); + } if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { rtap = *(struct ieee80211_vendor_radiotap *)skb->data; @@ -520,6 +547,89 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos++ = flags; } + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE) { +#define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val)) + + if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { + he.data6 |= HE_PREP(DATA6_NSTS, + FIELD_GET(RX_ENC_FLAG_STBC_MASK, + status->enc_flags)); + he.data3 |= HE_PREP(DATA3_STBC, 1); + } else { + he.data6 |= HE_PREP(DATA6_NSTS, status->nss); + } + +#define CHECK_GI(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ + (int)NL80211_RATE_INFO_HE_GI_##s) + + CHECK_GI(0_8); + CHECK_GI(1_6); + CHECK_GI(3_2); + + he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); + he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); + he.data3 |= HE_PREP(DATA3_CODING, + !!(status->enc_flags & RX_ENC_FLAG_LDPC)); + + he.data5 |= HE_PREP(DATA5_GI, status->he_gi); + + switch (status->bw) { + case RATE_INFO_BW_20: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); + break; + case RATE_INFO_BW_40: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); + break; + case RATE_INFO_BW_80: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); + break; + case RATE_INFO_BW_160: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); + break; + case RATE_INFO_BW_HE_RU: +#define CHECK_RU_ALLOC(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ + NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) + + CHECK_RU_ALLOC(26); + CHECK_RU_ALLOC(52); + CHECK_RU_ALLOC(106); + CHECK_RU_ALLOC(242); + CHECK_RU_ALLOC(484); + CHECK_RU_ALLOC(996); + CHECK_RU_ALLOC(2x996); + + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + status->he_ru + 4); + break; + default: + WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); + } + + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); + memcpy(pos, &he, sizeof(he)); + pos += sizeof(he); + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE_MU) { + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); + memcpy(pos, &he_mu, sizeof(he_mu)); + pos += sizeof(he_mu); + } + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { *pos++ = status->chain_signal[chain]; *pos++ = chain; @@ -613,6 +723,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, rcu_dereference(local->monitor_sdata); bool only_monitor = false; + if (status->flag & RX_FLAG_RADIOTAP_HE) + rtap_space += sizeof(struct ieee80211_radiotap_he); + + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) + rtap_space += sizeof(struct ieee80211_radiotap_he_mu); + if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; @@ -3241,7 +3357,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) } __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, - status->band); + status->band, 0); } dev_kfree_skb(rx->skb); return RX_QUEUED; @@ -3386,8 +3502,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, status = IEEE80211_SKB_RXCB((rx->skb)); sband = rx->local->hw.wiphy->bands[status->band]; - if (!(status->encoding == RX_ENC_HT) && - !(status->encoding == RX_ENC_VHT)) + if (status->encoding == RX_ENC_LEGACY) rate = &sband->bitrates[status->rate_idx]; ieee80211_rx_cooked_monitor(rx, rate); @@ -4386,6 +4501,14 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, status->rate_idx, status->nss)) goto drop; break; + case RX_ENC_HE: + if (WARN_ONCE(status->rate_idx > 11 || + !status->nss || + status->nss > 8, + "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", + status->rate_idx, status->nss)) + goto drop; + break; default: WARN_ON_ONCE(1); /* fall through */ diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 2e917a6d239d..5d2a11777718 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -20,6 +20,7 @@ #include <net/sch_generic.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/random.h> #include <net/mac80211.h> #include "ieee80211_i.h" @@ -293,6 +294,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) struct cfg80211_chan_def chandef; u8 bands_used = 0; int i, ielen, n_chans; + u32 flags = 0; req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); @@ -331,12 +333,16 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) local->hw_scan_req->req.n_channels = n_chans; ieee80211_prepare_scan_chandef(&chandef, req->scan_width); + if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->req.ie, local->hw_scan_ies_bufsize, &local->hw_scan_req->ies, req->ie, req->ie_len, - bands_used, req->rates, &chandef); + bands_used, req->rates, &chandef, + flags); local->hw_scan_req->req.ie_len = ielen; local->hw_scan_req->req.no_cck = req->no_cck; ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); @@ -528,6 +534,35 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local) round_jiffies_relative(0)); } +static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, + u32 ratemask, u32 flags, u32 tx_flags, + struct ieee80211_channel *channel) +{ + struct sk_buff *skb; + u32 txdata_flags = 0; + + skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, + ssid, ssid_len, + ie, ie_len, flags); + + if (skb) { + if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { + struct ieee80211_hdr *hdr = (void *)skb->data; + u16 sn = get_random_u32(); + + txdata_flags |= IEEE80211_TX_NO_SEQNO; + hdr->seq_ctrl = + cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); + } + IEEE80211_SKB_CB(skb)->flags |= tx_flags; + ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band, + txdata_flags); + } +} + static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, unsigned long *next_delay) { @@ -535,7 +570,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata; struct cfg80211_scan_request *scan_req; enum nl80211_band band = local->hw.conf.chandef.chan->band; - u32 tx_flags; + u32 flags = 0, tx_flags; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); @@ -543,17 +578,21 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK; if (scan_req->no_cck) tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE; + if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN) + flags |= IEEE80211_PROBE_FLAG_RANDOM_SN; sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)); for (i = 0; i < scan_req->n_ssids; i++) - ieee80211_send_probe_req( + ieee80211_send_scan_probe_req( sdata, local->scan_addr, scan_req->bssid, scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, scan_req->ie, scan_req->ie_len, - scan_req->rates[band], false, - tx_flags, local->hw.conf.chandef.chan, true); + scan_req->rates[band], flags, + tx_flags, local->hw.conf.chandef.chan); /* * After sending probe requests, wait for probe responses @@ -1141,6 +1180,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, u32 rate_masks[NUM_NL80211_BANDS] = {}; u8 bands_used = 0; u8 *ie; + u32 flags = 0; iebufsz = local->scan_ies_len + req->ie_len; @@ -1157,6 +1197,9 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, } } + if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + ie = kcalloc(iebufsz, num_bands, GFP_KERNEL); if (!ie) { ret = -ENOMEM; @@ -1167,7 +1210,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, ieee80211_build_preq_ies(local, ie, num_bands * iebufsz, &sched_scan_ies, req->ie, - req->ie_len, bands_used, rate_masks, &chandef); + req->ie_len, bands_used, rate_masks, &chandef, + flags); ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); if (ret == 0) { diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 6428f1ac37b6..f34202242d24 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1323,6 +1323,11 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid, struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; + /* Don't send NDPs when STA is connected HE */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) + return; + if (qos) { fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC | @@ -1391,7 +1396,7 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid, } info->band = chanctx_conf->def.chan->band; - ieee80211_xmit(sdata, sta, skb); + ieee80211_xmit(sdata, sta, skb, 0); rcu_read_unlock(); } @@ -1968,7 +1973,7 @@ sta_get_last_rx_stats(struct sta_info *sta) return stats; } -static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, +static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, struct rate_info *rinfo) { rinfo->bw = STA_STATS_GET(BW, rate); @@ -2005,6 +2010,14 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); break; } + case STA_STATS_RATE_TYPE_HE: + rinfo->flags = RATE_INFO_FLAGS_HE_MCS; + rinfo->mcs = STA_STATS_GET(HE_MCS, rate); + rinfo->nss = STA_STATS_GET(HE_NSS, rate); + rinfo->he_gi = STA_STATS_GET(HE_GI, rate); + rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); + rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); + break; } } @@ -2101,38 +2114,38 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, drv_sta_statistics(local, sdata, &sta->sta, sinfo); - sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) | - BIT(NL80211_STA_INFO_STA_FLAGS) | - BIT(NL80211_STA_INFO_BSS_PARAM) | - BIT(NL80211_STA_INFO_CONNECTED_TIME) | - BIT(NL80211_STA_INFO_RX_DROP_MISC); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | + BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | + BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); if (sdata->vif.type == NL80211_IFTYPE_STATION) { sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); } sinfo->connected_time = ktime_get_seconds() - sta->last_connected; sinfo->inactive_time = jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); - if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) | - BIT(NL80211_STA_INFO_TX_BYTES)))) { + if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { sinfo->tx_bytes = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) sinfo->tx_bytes += sta->tx_stats.bytes[ac]; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { sinfo->tx_packets = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) sinfo->tx_packets += sta->tx_stats.packets[ac]; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); } - if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) | - BIT(NL80211_STA_INFO_RX_BYTES)))) { + if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); if (sta->pcpu_rx_stats) { @@ -2144,10 +2157,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, } } - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { sinfo->rx_packets = sta->rx_stats.packets; if (sta->pcpu_rx_stats) { for_each_possible_cpu(cpu) { @@ -2157,17 +2170,17 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->rx_packets += cpurxs->packets; } } - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { sinfo->tx_retries = sta->status_stats.retry_count; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { sinfo->tx_failed = sta->status_stats.retry_failed; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); } sinfo->rx_dropped_misc = sta->rx_stats.dropped; @@ -2182,23 +2195,23 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, if (sdata->vif.type == NL80211_IFTYPE_STATION && !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX) | - BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | + BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); } if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { - if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { sinfo->signal = (s8)last_rxstats->last_signal; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } if (!sta->pcpu_rx_stats && - !(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) { + !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { sinfo->signal_avg = -ewma_signal_read(&sta->rx_stats_avg.signal); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } } @@ -2207,11 +2220,11 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, * pcpu statistics */ if (last_rxstats->chains && - !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) | - BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { - sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL); + !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | + BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); if (!sta->pcpu_rx_stats) - sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); sinfo->chains = last_rxstats->chains; @@ -2223,15 +2236,15 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, } } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &sinfo->txrate); - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { @@ -2244,18 +2257,18 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, if (ieee80211_vif_is_mesh(&sdata->vif)) { #ifdef CONFIG_MAC80211_MESH - sinfo->filled |= BIT(NL80211_STA_INFO_LLID) | - BIT(NL80211_STA_INFO_PLID) | - BIT(NL80211_STA_INFO_PLINK_STATE) | - BIT(NL80211_STA_INFO_LOCAL_PM) | - BIT(NL80211_STA_INFO_PEER_PM) | - BIT(NL80211_STA_INFO_NONPEER_PM); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | + BIT_ULL(NL80211_STA_INFO_PLID) | + BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | + BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | + BIT_ULL(NL80211_STA_INFO_PEER_PM) | + BIT_ULL(NL80211_STA_INFO_NONPEER_PM); sinfo->llid = sta->mesh->llid; sinfo->plid = sta->mesh->plid; sinfo->plink_state = sta->mesh->plink_state; if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { - sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); sinfo->t_offset = sta->mesh->t_offset; } sinfo->local_pm = sta->mesh->local_pm; @@ -2300,7 +2313,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, thr = sta_get_expected_throughput(sta); if (thr != 0) { - sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); sinfo->expected_throughput = thr; } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 81b35f623792..9a04327d71d1 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -170,7 +170,7 @@ struct tid_ampdu_tx { u8 dialog_token; u8 stop_initiator; bool tx_stop; - u8 buf_size; + u16 buf_size; u16 failed_bar_ssn; bool bar_pending; @@ -405,7 +405,7 @@ struct ieee80211_sta_rx_stats { int last_signal; u8 chains; s8 chain_signal_last[IEEE80211_MAX_CHAINS]; - u16 last_rate; + u32 last_rate; struct u64_stats_sync syncp; u64 bytes; u64 msdu[IEEE80211_NUM_TIDS + 1]; @@ -764,6 +764,7 @@ enum sta_stats_type { STA_STATS_RATE_TYPE_LEGACY, STA_STATS_RATE_TYPE_HT, STA_STATS_RATE_TYPE_VHT, + STA_STATS_RATE_TYPE_HE, }; #define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0) @@ -771,9 +772,14 @@ enum sta_stats_type { #define STA_STATS_FIELD_LEGACY_BAND GENMASK( 7, 4) #define STA_STATS_FIELD_VHT_MCS GENMASK( 3, 0) #define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4) +#define STA_STATS_FIELD_HE_MCS GENMASK( 3, 0) +#define STA_STATS_FIELD_HE_NSS GENMASK( 7, 4) #define STA_STATS_FIELD_BW GENMASK(11, 8) #define STA_STATS_FIELD_SGI GENMASK(12, 12) #define STA_STATS_FIELD_TYPE GENMASK(15, 13) +#define STA_STATS_FIELD_HE_RU GENMASK(18, 16) +#define STA_STATS_FIELD_HE_GI GENMASK(20, 19) +#define STA_STATS_FIELD_HE_DCM GENMASK(21, 21) #define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v) #define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v) @@ -782,7 +788,7 @@ enum sta_stats_type { static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) { - u16 r; + u32 r; r = STA_STATS_FIELD(BW, s->bw); @@ -804,6 +810,14 @@ static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) r |= STA_STATS_FIELD(LEGACY_BAND, s->band); r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx); break; + case RX_ENC_HE: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HE); + r |= STA_STATS_FIELD(HE_NSS, s->nss); + r |= STA_STATS_FIELD(HE_MCS, s->rate_idx); + r |= STA_STATS_FIELD(HE_GI, s->he_gi); + r |= STA_STATS_FIELD(HE_RU, s->he_ru); + r |= STA_STATS_FIELD(HE_DCM, s->he_dcm); + break; default: WARN_ON(1); return STA_STATS_RATE_INVALID; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 80a7edf8d314..0ab69a1964f8 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -92,7 +92,7 @@ STA_ENTRY \ __field(u16, tid) \ __field(u16, ssn) \ - __field(u8, buf_size) \ + __field(u16, buf_size) \ __field(bool, amsdu) \ __field(u16, timeout) \ __field(u16, action) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index fa1f1e63a264..6a79d564de35 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -825,6 +825,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { + if (tx->flags & IEEE80211_TX_NO_SEQNO) + return TX_CONTINUE; /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ @@ -1854,7 +1856,7 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb); */ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb, - bool txpending) + bool txpending, u32 txdata_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_data tx; @@ -1872,6 +1874,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); + tx.flags |= txdata_flags; + if (unlikely(res_prepare == TX_DROP)) { ieee80211_free_txskb(&local->hw, skb); return true; @@ -1933,7 +1937,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, } void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, struct sk_buff *skb) + struct sta_info *sta, struct sk_buff *skb, + u32 txdata_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1968,7 +1973,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, } ieee80211_set_qos_hdr(sdata, skb); - ieee80211_tx(sdata, sta, skb, false); + ieee80211_tx(sdata, sta, skb, false, txdata_flags); } static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, @@ -2289,7 +2294,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, if (!ieee80211_parse_tx_radiotap(local, skb)) goto fail_rcu; - ieee80211_xmit(sdata, NULL, skb); + ieee80211_xmit(sdata, NULL, skb, 0); rcu_read_unlock(); return NETDEV_TX_OK; @@ -3648,7 +3653,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, ieee80211_tx_stats(dev, skb->len); - ieee80211_xmit(sdata, sta, skb); + ieee80211_xmit(sdata, sta, skb, 0); } goto out; out_free: @@ -3867,7 +3872,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, return true; } info->band = chanctx_conf->def.chan->band; - result = ieee80211_tx(sdata, NULL, skb, true); + result = ieee80211_tx(sdata, NULL, skb, true, 0); } else { struct sk_buff_head skbs; @@ -4783,7 +4788,7 @@ EXPORT_SYMBOL(ieee80211_unreserve_tid); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band) + enum nl80211_band band, u32 txdata_flags) { int ac = ieee80211_ac_from_tid(tid); @@ -4800,7 +4805,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, */ local_bh_disable(); IEEE80211_SKB_CB(skb)->band = band; - ieee80211_xmit(sdata, NULL, skb); + ieee80211_xmit(sdata, NULL, skb, txdata_flags); local_bh_enable(); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5e2e511c4a6f..3e68132a41fa 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1095,6 +1095,21 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, if (elen >= sizeof(*elems->max_idle_period_ie)) elems->max_idle_period_ie = (void *)pos; break; + case WLAN_EID_EXTENSION: + if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA && + elen >= (sizeof(*elems->mu_edca_param_set) + 1)) { + elems->mu_edca_param_set = (void *)&pos[1]; + } else if (pos[0] == WLAN_EID_EXT_HE_CAPABILITY) { + elems->he_cap = (void *)&pos[1]; + elems->he_cap_len = elen - 1; + } else if (pos[0] == WLAN_EID_EXT_HE_OPERATION && + elen >= sizeof(*elems->he_operation) && + elen >= ieee80211_he_oper_size(&pos[1])) { + elems->he_operation = (void *)&pos[1]; + } else if (pos[0] == WLAN_EID_EXT_UORA && elen >= 1) { + elems->uora_element = (void *)&pos[1]; + } + break; default: break; } @@ -1353,9 +1368,10 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, enum nl80211_band band, u32 rate_mask, struct cfg80211_chan_def *chandef, - size_t *offset) + size_t *offset, u32 flags) { struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *he_cap; u8 *pos = buffer, *end = buffer + buffer_len; size_t noffset; int supp_rates_len, i; @@ -1433,6 +1449,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, chandef->chan->center_freq); } + if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT) + goto done; + /* insert custom IEs that go before HT */ if (ie && ie_len) { static const u8 before_ht[] = { @@ -1460,11 +1479,6 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, sband->ht_cap.cap); } - /* - * If adding more here, adjust code in main.c - * that calculates local->scan_ies_len. - */ - /* insert custom IEs that go before VHT */ if (ie && ie_len) { static const u8 before_vht[] = { @@ -1507,9 +1521,43 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, sband->vht_cap.cap); } + /* insert custom IEs that go before HE */ + if (ie && ie_len) { + static const u8 before_he[] = { + /* + * no need to list the ones split off before VHT + * or generated here + */ + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_REQ_PARAMS, + WLAN_EID_AP_CSN, + /* TODO: add 11ah/11aj/11ak elements */ + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_he, ARRAY_SIZE(before_he), + *offset); + if (end - pos < noffset - *offset) + goto out_err; + memcpy(pos, ie + *offset, noffset - *offset); + pos += noffset - *offset; + *offset = noffset; + } + + he_cap = ieee80211_get_he_sta_cap(sband); + if (he_cap) { + pos = ieee80211_ie_build_he_cap(pos, he_cap, end); + if (!pos) + goto out_err; + } + + /* + * If adding more here, adjust code in main.c + * that calculates local->scan_ies_len. + */ + return pos - buffer; out_err: WARN_ONCE(1, "not enough space for preq IEs\n"); + done: return pos - buffer; } @@ -1518,7 +1566,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, - struct cfg80211_chan_def *chandef) + struct cfg80211_chan_def *chandef, + u32 flags) { size_t pos = 0, old_pos = 0, custom_ie_offset = 0; int i; @@ -1533,7 +1582,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, ie, ie_len, i, rate_masks[i], chandef, - &custom_ie_offset); + &custom_ie_offset, + flags); ie_desc->ies[i] = buffer + old_pos; ie_desc->len[i] = pos - old_pos; old_pos = pos; @@ -1561,7 +1611,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, - bool directed) + u32 flags) { struct ieee80211_local *local = sdata->local; struct cfg80211_chan_def chandef; @@ -1577,7 +1627,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, * badly-behaved APs don't respond when this parameter is included. */ chandef.width = sdata->vif.bss_conf.chandef.width; - if (directed) + if (flags & IEEE80211_PROBE_FLAG_DIRECTED) chandef.chan = NULL; else chandef.chan = chan; @@ -1591,7 +1641,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb), skb_tailroom(skb), &dummy_ie_desc, ie, ie_len, BIT(chan->band), - rate_masks, &chandef); + rate_masks, &chandef, flags); skb_put(skb, ies_len); if (dst) { @@ -1605,27 +1655,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, return skb; } -void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, - const u8 *src, const u8 *dst, - const u8 *ssid, size_t ssid_len, - const u8 *ie, size_t ie_len, - u32 ratemask, bool directed, u32 tx_flags, - struct ieee80211_channel *channel, bool scan) -{ - struct sk_buff *skb; - - skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, - ssid, ssid_len, - ie, ie_len, directed); - if (skb) { - IEEE80211_SKB_CB(skb)->flags |= tx_flags; - if (scan) - ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); - else - ieee80211_tx_skb(sdata, skb); - } -} - u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates) @@ -2412,6 +2441,72 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, return pos; } +u8 *ieee80211_ie_build_he_cap(u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + u8 *end) +{ + u8 n; + u8 ie_len; + u8 *orig_pos = pos; + + /* Make sure we have place for the IE */ + /* + * TODO: the 1 added is because this temporarily is under the EXTENSION + * IE. Get rid of it when it moves. + */ + if (!he_cap) + return orig_pos; + + n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); + ie_len = 2 + 1 + + sizeof(he_cap->he_cap_elem) + n + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + + if ((end - pos) < ie_len) + return orig_pos; + + *pos++ = WLAN_EID_EXTENSION; + pos++; /* We'll set the size later below */ + *pos++ = WLAN_EID_EXT_HE_CAPABILITY; + + /* Fixed data */ + memcpy(pos, &he_cap->he_cap_elem, sizeof(he_cap->he_cap_elem)); + pos += sizeof(he_cap->he_cap_elem); + + memcpy(pos, &he_cap->he_mcs_nss_supp, n); + pos += n; + + /* Check if PPE Threshold should be present */ + if ((he_cap->he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) + goto end; + + /* + * Calculate how many PPET16/PPET8 pairs are to come. Algorithm: + * (NSS_M1 + 1) x (num of 1 bits in RU_INDEX_BITMASK) + */ + n = hweight8(he_cap->ppe_thres[0] & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >> + IEEE80211_PPE_THRES_NSS_POS)); + + /* + * Each pair is 6 bits, and we need to add the 7 "header" bits to the + * total size. + */ + n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; + n = DIV_ROUND_UP(n, 8); + + /* Copy PPE Thresholds */ + memcpy(pos, &he_cap->ppe_thres, n); + pos += n; + +end: + orig_pos[1] = (pos - orig_pos) - 2; + return pos; +} + u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode, bool rifs_mode) diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 168af54db975..dc240cb47ddf 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -603,6 +603,21 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct) } EXPORT_SYMBOL(nf_conntrack_destroy); +bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + struct nf_ct_hook *ct_hook; + bool ret = false; + + rcu_read_lock(); + ct_hook = rcu_dereference(nf_ct_hook); + if (ct_hook) + ret = ct_hook->get_tuple_skb(dst_tuple, skb); + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL(nf_ct_get_tuple_skb); + /* Built-in default zone used e.g. by modules. */ const struct nf_conntrack_zone nf_ct_zone_dflt = { .id = NF_CT_DEFAULT_ZONE_ID, diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c index a1086bdec242..5423b197d98a 100644 --- a/net/netfilter/nf_conntrack_broadcast.c +++ b/net/netfilter/nf_conntrack_broadcast.c @@ -32,7 +32,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb, __be32 mask = 0; /* we're only interested in locally generated packets */ - if (skb->sk == NULL) + if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk))) goto out; if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) goto out; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3d5280425027..805500197c22 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1683,6 +1683,41 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) return 0; } +static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + const struct nf_conntrack_tuple *src_tuple; + const struct nf_conntrack_tuple_hash *hash; + struct nf_conntrack_tuple srctuple; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + + ct = nf_ct_get(skb, &ctinfo); + if (ct) { + src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo)); + memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); + return true; + } + + if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), + NFPROTO_IPV4, dev_net(skb->dev), + &srctuple)) + return false; + + hash = nf_conntrack_find_get(dev_net(skb->dev), + &nf_ct_zone_dflt, + &srctuple); + if (!hash) + return false; + + ct = nf_ct_tuplehash_to_ctrack(hash); + src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir); + memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); + nf_ct_put(ct); + + return true; +} + /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(int (*iter)(struct nf_conn *i, void *data), @@ -2204,6 +2239,7 @@ err_cachep: static struct nf_ct_hook nf_conntrack_hook = { .update = nf_conntrack_update, .destroy = destroy_conntrack, + .get_tuple_skb = nf_conntrack_get_tuple_skb, }; void nf_conntrack_init_end(void) diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c index dc61399e30be..a8c5c846aec1 100644 --- a/net/netfilter/nf_log_common.c +++ b/net/netfilter/nf_log_common.c @@ -132,9 +132,10 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, } EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header); -void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk) +void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, + struct sock *sk) { - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk))) return; read_lock_bh(&sk->sk_callback_lock); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 46f9df99d276..86df2a1666fd 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -108,6 +108,7 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) struct flowi fl; unsigned int hh_len; struct dst_entry *dst; + struct sock *sk = skb->sk; int err; err = xfrm_decode_session(skb, &fl, family); @@ -119,7 +120,10 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) dst = ((struct xfrm_dst *)dst)->route; dst_hold(dst); - dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); + if (sk && !net_eq(net, sock_net(sk))) + sk = NULL; + + dst = xfrm_lookup(net, dst, &fl, sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 896d4a36081d..3f211e1025c1 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -14,6 +14,7 @@ #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/vmalloc.h> +#include <linux/rhashtable.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nf_tables.h> diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 1105a23bda5e..2b94dcc43456 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -107,7 +107,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, break; case NFT_META_SKUID: sk = skb_to_full_sk(skb); - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || + !net_eq(nft_net(pkt), sock_net(sk))) goto err; read_lock_bh(&sk->sk_callback_lock); @@ -123,7 +124,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, break; case NFT_META_SKGID: sk = skb_to_full_sk(skb); - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || + !net_eq(nft_net(pkt), sock_net(sk))) goto err; read_lock_bh(&sk->sk_callback_lock); @@ -214,7 +216,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, #ifdef CONFIG_CGROUP_NET_CLASSID case NFT_META_CGROUP: sk = skb_to_full_sk(skb); - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || + !net_eq(nft_net(pkt), sock_net(sk))) goto err; *dest = sock_cgroup_classid(&sk->sk_cgrp_data); break; diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 74e1b3bd6954..998c2b546f6d 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -23,6 +23,9 @@ static void nft_socket_eval(const struct nft_expr *expr, struct sock *sk = skb->sk; u32 *dest = ®s->data[priv->dreg]; + if (sk && !net_eq(nft_net(pkt), sock_net(sk))) + sk = NULL; + if (!sk) switch(nft_pf(pkt)) { case NFPROTO_IPV4: @@ -39,7 +42,7 @@ static void nft_socket_eval(const struct nft_expr *expr, return; } - if(!sk) { + if (!sk) { nft_reg_store8(dest, 0); return; } diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index 7df2dece57d3..5d92e1781980 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c @@ -72,8 +72,9 @@ static bool cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_cgroup_info_v0 *info = par->matchinfo; + struct sock *sk = skb->sk; - if (skb->sk == NULL || !sk_fullsock(skb->sk)) + if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) return false; return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^ @@ -85,8 +86,9 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) const struct xt_cgroup_info_v1 *info = par->matchinfo; struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data; struct cgroup *ancestor = info->priv; + struct sock *sk = skb->sk; - if (!skb->sk || !sk_fullsock(skb->sk)) + if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) return false; if (ancestor) diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index 3d705c688a27..46686fb73784 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c @@ -67,7 +67,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par) struct sock *sk = skb_to_full_sk(skb); struct net *net = xt_net(par); - if (sk == NULL || sk->sk_socket == NULL) + if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk))) return (info->match ^ info->invert) == 0; else if (info->match & info->invert & XT_OWNER_SOCKET) /* diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 07085c22b19c..f44de4bc2100 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -265,7 +265,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par) } /* use TTL as seen before forwarding */ - if (xt_out(par) != NULL && skb->sk == NULL) + if (xt_out(par) != NULL && + (!skb->sk || !net_eq(net, sock_net(skb->sk)))) ttl++; spin_lock_bh(&recent_lock); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 5c0779c4fa3c..0472f3472842 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -56,8 +56,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, struct sk_buff *pskb = (struct sk_buff *)skb; struct sock *sk = skb->sk; + if (!net_eq(xt_net(par), sock_net(sk))) + sk = NULL; + if (!sk) sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par)); + if (sk) { bool wildcard; bool transparent = true; @@ -113,8 +117,12 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par) struct sk_buff *pskb = (struct sk_buff *)skb; struct sock *sk = skb->sk; + if (!net_eq(xt_net(par), sock_net(sk))) + sk = NULL; + if (!sk) sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par)); + if (sk) { bool wildcard; bool transparent = true; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 30a5df27116e..85ae53d8fd09 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1057,6 +1057,28 @@ static int sample(struct datapath *dp, struct sk_buff *skb, clone_flow_key); } +/* When 'last' is true, clone() should always consume the 'skb'. + * Otherwise, clone() should keep 'skb' intact regardless what + * actions are executed within clone(). + */ +static int clone(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, const struct nlattr *attr, + bool last) +{ + struct nlattr *actions; + struct nlattr *clone_arg; + int rem = nla_len(attr); + bool dont_clone_flow_key; + + /* The first action is always 'OVS_CLONE_ATTR_ARG'. */ + clone_arg = nla_data(attr); + dont_clone_flow_key = nla_get_u32(clone_arg); + actions = nla_next(clone_arg, &rem); + + return clone_execute(dp, skb, key, 0, actions, rem, last, + !dont_clone_flow_key); +} + static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr) { @@ -1336,6 +1358,17 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, consume_skb(skb); return 0; } + break; + + case OVS_ACTION_ATTR_CLONE: { + bool last = nla_is_last(a, rem); + + err = clone(dp, skb, key, a, last); + if (last) + return err; + + break; + } } if (unlikely(err)) { diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 492ab0c36f7c..a70097ecf33c 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2460,6 +2460,40 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, return 0; } +static int validate_and_copy_clone(struct net *net, + const struct nlattr *attr, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, + bool log, bool last) +{ + int start, err; + u32 exec; + + if (nla_len(attr) && nla_len(attr) < NLA_HDRLEN) + return -EINVAL; + + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log); + if (start < 0) + return start; + + exec = last || !actions_may_change_flow(attr); + + err = ovs_nla_add_action(sfa, OVS_CLONE_ATTR_EXEC, &exec, + sizeof(exec), log); + if (err) + return err; + + err = __ovs_nla_copy_actions(net, attr, key, sfa, + eth_type, vlan_tci, log); + if (err) + return err; + + add_nested_action_end(*sfa, start); + + return 0; +} + void ovs_match_init(struct sw_flow_match *match, struct sw_flow_key *key, bool reset_key, @@ -2516,7 +2550,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, struct ovs_tunnel_info *ovs_tun; struct nlattr *a; int err = 0, start, opts_type; + __be16 dst_opt_type; + dst_opt_type = 0; ovs_match_init(&match, &key, true, NULL); opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log); if (opts_type < 0) @@ -2528,10 +2564,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, err = validate_geneve_opts(&key); if (err < 0) return err; + dst_opt_type = TUNNEL_GENEVE_OPT; break; case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: + dst_opt_type = TUNNEL_VXLAN_OPT; break; case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: + dst_opt_type = TUNNEL_ERSPAN_OPT; break; } } @@ -2574,7 +2613,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, */ ip_tunnel_info_opts_set(tun_info, TUN_METADATA_OPTS(&key, key.tun_opts_len), - key.tun_opts_len); + key.tun_opts_len, dst_opt_type); add_nested_action_end(*sfa, start); return err; @@ -2844,6 +2883,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, [OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1, [OVS_ACTION_ATTR_POP_NSH] = 0, [OVS_ACTION_ATTR_METER] = sizeof(u32), + [OVS_ACTION_ATTR_CLONE] = (u32)-1, }; const struct ovs_action_push_vlan *vlan; int type = nla_type(a); @@ -3033,6 +3073,18 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, /* Non-existent meters are simply ignored. */ break; + case OVS_ACTION_ATTR_CLONE: { + bool last = nla_is_last(a, rem); + + err = validate_and_copy_clone(net, a, key, sfa, + eth_type, vlan_tci, + log, last); + if (err) + return err; + skip_copy = true; + break; + } + default: OVS_NLERR(log, "Unknown Action type %d", type); return -EINVAL; @@ -3111,6 +3163,26 @@ out: return err; } +static int clone_action_to_attr(const struct nlattr *attr, + struct sk_buff *skb) +{ + struct nlattr *start; + int err = 0, rem = nla_len(attr); + + start = nla_nest_start(skb, OVS_ACTION_ATTR_CLONE); + if (!start) + return -EMSGSIZE; + + err = ovs_nla_put_actions(nla_data(attr), rem, skb); + + if (err) + nla_nest_cancel(skb, start); + else + nla_nest_end(skb, start); + + return err; +} + static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) { const struct nlattr *ovs_key = nla_data(a); @@ -3199,6 +3271,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb) return err; break; + case OVS_ACTION_ATTR_CLONE: + err = clone_action_to_attr(a, skb); + if (err) + return err; + break; + default: if (nla_put(skb, type, nla_len(a), nla_data(a))) return -EMSGSIZE; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 9b27d0cd766d..e3e00d3a972e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po) return po->xmit == packet_direct_xmit; } -static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) +static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { - return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; + return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL); } static u16 packet_pick_tx_queue(struct sk_buff *skb) @@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb) __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { - queue_index = __packet_pick_tx_queue(dev, skb); + queue_index = __packet_pick_tx_queue(dev, skb, NULL); } return queue_index; @@ -1951,7 +1952,7 @@ retry: goto out_unlock; } - sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) @@ -1962,6 +1963,7 @@ retry: skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = sockc.transmit_time; sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); @@ -2457,6 +2459,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; + skb->tstamp = sockc->transmit_time; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; @@ -2633,7 +2636,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) if (unlikely(!(dev->flags & IFF_UP))) goto out_put; - sockc.tsflags = po->sk.sk_tsflags; + sockcm_init(&sockc, &po->sk); if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) @@ -2829,7 +2832,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; - sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); @@ -2905,6 +2908,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; + skb->tstamp = sockc.transmit_time; if (has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index b4e421aa9727..1eaf2550a9f8 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -376,8 +376,6 @@ static void release_refill(struct rds_connection *conn) * This tries to allocate and post unused work requests after making sure that * they have all the allocations they need to queue received fragments into * sockets. - * - * -1 is returned if posting fails due to temporary resource exhaustion. */ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) { @@ -1025,7 +1023,6 @@ int rds_ib_recv_path(struct rds_conn_path *cp) { struct rds_connection *conn = cp->cp_conn; struct rds_ib_connection *ic = conn->c_transport_data; - int ret = 0; rdsdebug("conn %p\n", conn); if (rds_conn_up(conn)) { @@ -1034,7 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp) rds_ib_stats_inc(s_ib_rx_refill_from_thread); } - return ret; + return 0; } int rds_ib_recv_init(void) diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a01169fb5325..7af246764a35 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -183,6 +183,17 @@ config NET_SCH_CBS To compile this code as a module, choose M here: the module will be called sch_cbs. +config NET_SCH_ETF + tristate "Earliest TxTime First (ETF)" + help + Say Y here if you want to use the Earliest TxTime First (ETF) packet + scheduling algorithm. + + See the top of <file:net/sched/sch_etf.c> for more details. + + To compile this code as a module, choose M here: the + module will be called sch_etf. + config NET_SCH_GRED tristate "Generic Random Early Detection (GRED)" ---help--- @@ -284,6 +295,17 @@ config NET_SCH_FQ_CODEL If unsure, say N. +config NET_SCH_CAKE + tristate "Common Applications Kept Enhanced (CAKE)" + help + Say Y here if you want to use the Common Applications Kept Enhanced + (CAKE) queue management algorithm. + + To compile this driver as a module, choose M here: the module + will be called sch_cake. + + If unsure, say N. + config NET_SCH_FQ tristate "Fair Queue" help diff --git a/net/sched/Makefile b/net/sched/Makefile index 8811d3804878..673ee7d26ff2 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -50,10 +50,12 @@ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o +obj-$(CONFIG_NET_SCH_CAKE) += sch_cake.o obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o +obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 3f4cf930f809..148a89ab789b 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -55,6 +55,24 @@ static void tcf_action_goto_chain_exec(const struct tc_action *a, res->goto_tp = rcu_dereference_bh(chain->filter_chain); } +static void tcf_free_cookie_rcu(struct rcu_head *p) +{ + struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); + + kfree(cookie->data); + kfree(cookie); +} + +static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, + struct tc_cookie *new_cookie) +{ + struct tc_cookie *old; + + old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); + if (old) + call_rcu(&old->rcu, tcf_free_cookie_rcu); +} + /* XXX: For standalone actions, we don't need a RCU grace period either, because * actions are always connected to filters and filters are already destroyed in * RCU callbacks, so after a RCU grace period actions are already disconnected @@ -65,44 +83,64 @@ static void free_tcf(struct tc_action *p) free_percpu(p->cpu_bstats); free_percpu(p->cpu_qstats); - if (p->act_cookie) { - kfree(p->act_cookie->data); - kfree(p->act_cookie); - } + tcf_set_action_cookie(&p->act_cookie, NULL); if (p->goto_chain) tcf_action_goto_chain_fini(p); kfree(p); } -static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) +static void tcf_action_cleanup(struct tc_action *p) { - spin_lock(&idrinfo->lock); - idr_remove(&idrinfo->action_idr, p->tcfa_index); - spin_unlock(&idrinfo->lock); + if (p->ops->cleanup) + p->ops->cleanup(p); + gen_kill_estimator(&p->tcfa_rate_est); free_tcf(p); } +static int __tcf_action_put(struct tc_action *p, bool bind) +{ + struct tcf_idrinfo *idrinfo = p->idrinfo; + + if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) { + if (bind) + atomic_dec(&p->tcfa_bindcnt); + idr_remove(&idrinfo->action_idr, p->tcfa_index); + spin_unlock(&idrinfo->lock); + + tcf_action_cleanup(p); + return 1; + } + + if (bind) + atomic_dec(&p->tcfa_bindcnt); + + return 0; +} + int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) { int ret = 0; - ASSERT_RTNL(); - + /* Release with strict==1 and bind==0 is only called through act API + * interface (classifiers always bind). Only case when action with + * positive reference count and zero bind count can exist is when it was + * also created with act API (unbinding last classifier will destroy the + * action if it was created by classifier). So only case when bind count + * can be changed after initial check is when unbound action is + * destroyed by act API while classifier binds to action with same id + * concurrently. This result either creation of new action(same behavior + * as before), or reusing existing action if concurrent process + * increments reference count before action is deleted. Both scenarios + * are acceptable. + */ if (p) { - if (bind) - p->tcfa_bindcnt--; - else if (strict && p->tcfa_bindcnt > 0) + if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) return -EPERM; - p->tcfa_refcnt--; - if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { - if (p->ops->cleanup) - p->ops->cleanup(p); - tcf_idr_remove(p->idrinfo, p); + if (__tcf_action_put(p, bind)) ret = ACT_P_DELETED; - } } return ret; @@ -111,10 +149,15 @@ EXPORT_SYMBOL(__tcf_idr_release); static size_t tcf_action_shared_attrs_size(const struct tc_action *act) { + struct tc_cookie *act_cookie; u32 cookie_len = 0; - if (act->act_cookie) - cookie_len = nla_total_size(act->act_cookie->len); + rcu_read_lock(); + act_cookie = rcu_dereference(act->act_cookie); + + if (act_cookie) + cookie_len = nla_total_size(act_cookie->len); + rcu_read_unlock(); return nla_total_size(0) /* action number nested */ + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ @@ -257,46 +300,77 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, } EXPORT_SYMBOL(tcf_generic_walker); -static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) +static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, + struct tc_action **a, int bind) { - struct tc_action *p = NULL; + struct tcf_idrinfo *idrinfo = tn->idrinfo; + struct tc_action *p; spin_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); + if (IS_ERR(p)) { + p = NULL; + } else if (p) { + refcount_inc(&p->tcfa_refcnt); + if (bind) + atomic_inc(&p->tcfa_bindcnt); + } spin_unlock(&idrinfo->lock); - return p; + if (p) { + *a = p; + return true; + } + return false; } int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) { - struct tcf_idrinfo *idrinfo = tn->idrinfo; - struct tc_action *p = tcf_idr_lookup(index, idrinfo); - - if (p) { - *a = p; - return 1; - } - return 0; + return __tcf_idr_check(tn, index, a, 0); } EXPORT_SYMBOL(tcf_idr_search); bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, int bind) { + return __tcf_idr_check(tn, index, a, bind); +} +EXPORT_SYMBOL(tcf_idr_check); + +int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) +{ struct tcf_idrinfo *idrinfo = tn->idrinfo; - struct tc_action *p = tcf_idr_lookup(index, idrinfo); + struct tc_action *p; + int ret = 0; - if (index && p) { - if (bind) - p->tcfa_bindcnt++; - p->tcfa_refcnt++; - *a = p; - return true; + spin_lock(&idrinfo->lock); + p = idr_find(&idrinfo->action_idr, index); + if (!p) { + spin_unlock(&idrinfo->lock); + return -ENOENT; } - return false; + + if (!atomic_read(&p->tcfa_bindcnt)) { + if (refcount_dec_and_test(&p->tcfa_refcnt)) { + struct module *owner = p->ops->owner; + + WARN_ON(p != idr_remove(&idrinfo->action_idr, + p->tcfa_index)); + spin_unlock(&idrinfo->lock); + + tcf_action_cleanup(p); + module_put(owner); + return 0; + } + ret = 0; + } else { + ret = -EPERM; + } + + spin_unlock(&idrinfo->lock); + return ret; } -EXPORT_SYMBOL(tcf_idr_check); +EXPORT_SYMBOL(tcf_idr_delete_index); int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, @@ -304,14 +378,13 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, { struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); struct tcf_idrinfo *idrinfo = tn->idrinfo; - struct idr *idr = &idrinfo->action_idr; int err = -ENOMEM; if (unlikely(!p)) return -ENOMEM; - p->tcfa_refcnt = 1; + refcount_set(&p->tcfa_refcnt, 1); if (bind) - p->tcfa_bindcnt = 1; + atomic_set(&p->tcfa_bindcnt, 1); if (cpustats) { p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); @@ -322,20 +395,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, goto err2; } spin_lock_init(&p->tcfa_lock); - idr_preload(GFP_KERNEL); - spin_lock(&idrinfo->lock); - /* user doesn't specify an index */ - if (!index) { - index = 1; - err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC); - } else { - err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); - } - spin_unlock(&idrinfo->lock); - idr_preload_end(); - if (err) - goto err3; - p->tcfa_index = index; p->tcfa_tm.install = jiffies; p->tcfa_tm.lastuse = jiffies; @@ -345,7 +404,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, &p->tcfa_rate_est, &p->tcfa_lock, NULL, est); if (err) - goto err4; + goto err3; } p->idrinfo = idrinfo; @@ -353,8 +412,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, INIT_LIST_HEAD(&p->list); *a = p; return 0; -err4: - idr_remove(idr, index); err3: free_percpu(p->cpu_qstats); err2: @@ -370,11 +427,78 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) struct tcf_idrinfo *idrinfo = tn->idrinfo; spin_lock(&idrinfo->lock); - idr_replace(&idrinfo->action_idr, a, a->tcfa_index); + /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ + WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index))); spin_unlock(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_insert); +/* Cleanup idr index that was allocated but not initialized. */ + +void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) +{ + struct tcf_idrinfo *idrinfo = tn->idrinfo; + + spin_lock(&idrinfo->lock); + /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ + WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); + spin_unlock(&idrinfo->lock); +} +EXPORT_SYMBOL(tcf_idr_cleanup); + +/* Check if action with specified index exists. If actions is found, increments + * its reference and bind counters, and return 1. Otherwise insert temporary + * error pointer (to prevent concurrent users from inserting actions with same + * index) and return 0. + */ + +int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, + struct tc_action **a, int bind) +{ + struct tcf_idrinfo *idrinfo = tn->idrinfo; + struct tc_action *p; + int ret; + +again: + spin_lock(&idrinfo->lock); + if (*index) { + p = idr_find(&idrinfo->action_idr, *index); + if (IS_ERR(p)) { + /* This means that another process allocated + * index but did not assign the pointer yet. + */ + spin_unlock(&idrinfo->lock); + goto again; + } + + if (p) { + refcount_inc(&p->tcfa_refcnt); + if (bind) + atomic_inc(&p->tcfa_bindcnt); + *a = p; + ret = 1; + } else { + *a = NULL; + ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, + *index, GFP_ATOMIC); + if (!ret) + idr_replace(&idrinfo->action_idr, + ERR_PTR(-EBUSY), *index); + } + } else { + *index = 1; + *a = NULL; + ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, + UINT_MAX, GFP_ATOMIC); + if (!ret) + idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), + *index); + } + spin_unlock(&idrinfo->lock); + return ret; +} +EXPORT_SYMBOL(tcf_idr_check_alloc); + void tcf_idrinfo_destroy(const struct tc_action_ops *ops, struct tcf_idrinfo *idrinfo) { @@ -538,13 +662,15 @@ repeat: } EXPORT_SYMBOL(tcf_action_exec); -int tcf_action_destroy(struct list_head *actions, int bind) +int tcf_action_destroy(struct tc_action *actions[], int bind) { const struct tc_action_ops *ops; - struct tc_action *a, *tmp; - int ret = 0; + struct tc_action *a; + int ret = 0, i; - list_for_each_entry_safe(a, tmp, actions, list) { + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + a = actions[i]; + actions[i] = NULL; ops = a->ops; ret = __tcf_idr_release(a, bind, true); if (ret == ACT_P_DELETED) @@ -555,6 +681,24 @@ int tcf_action_destroy(struct list_head *actions, int bind) return ret; } +static int tcf_action_put(struct tc_action *p) +{ + return __tcf_action_put(p, false); +} + +static void tcf_action_put_many(struct tc_action *actions[]) +{ + int i; + + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + struct tc_action *a = actions[i]; + const struct tc_action_ops *ops = a->ops; + + if (tcf_action_put(a)) + module_put(ops->owner); + } +} + int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { @@ -567,16 +711,22 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; + struct tc_cookie *cookie; if (nla_put_string(skb, TCA_KIND, a->ops->kind)) goto nla_put_failure; if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; - if (a->act_cookie) { - if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len, - a->act_cookie->data)) + + rcu_read_lock(); + cookie = rcu_dereference(a->act_cookie); + if (cookie) { + if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { + rcu_read_unlock(); goto nla_put_failure; + } } + rcu_read_unlock(); nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) @@ -593,14 +743,15 @@ nla_put_failure: } EXPORT_SYMBOL(tcf_action_dump_1); -int tcf_action_dump(struct sk_buff *skb, struct list_head *actions, +int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref) { struct tc_action *a; - int err = -EINVAL; + int err = -EINVAL, i; struct nlattr *nest; - list_for_each_entry(a, actions, list) { + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + a = actions[i]; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; @@ -638,6 +789,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, + bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action *a; @@ -688,9 +840,11 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, a_o = tc_lookup_action_n(act_name); if (a_o == NULL) { #ifdef CONFIG_MODULES - rtnl_unlock(); + if (rtnl_held) + rtnl_unlock(); request_module("act_%s", act_name); - rtnl_lock(); + if (rtnl_held) + rtnl_lock(); a_o = tc_lookup_action_n(act_name); @@ -713,19 +867,15 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, /* backward compatibility for policer */ if (name == NULL) err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, - extack); + rtnl_held, extack); else - err = a_o->init(net, nla, est, &a, ovr, bind, extack); + err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, + extack); if (err < 0) goto err_mod; - if (name == NULL && tb[TCA_ACT_COOKIE]) { - if (a->act_cookie) { - kfree(a->act_cookie->data); - kfree(a->act_cookie); - } - a->act_cookie = cookie; - } + if (!name && tb[TCA_ACT_COOKIE]) + tcf_set_action_cookie(&a->act_cookie, cookie); /* module count goes up only when brand new policy is created * if it exists and is only bound to in a_o->init() then @@ -737,10 +887,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { err = tcf_action_goto_chain_init(a, tp); if (err) { - LIST_HEAD(actions); + struct tc_action *actions[] = { a, NULL }; - list_add_tail(&a->list, &actions); - tcf_action_destroy(&actions, bind); + tcf_action_destroy(actions, bind); NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); return ERR_PTR(err); } @@ -758,21 +907,12 @@ err_out: return ERR_PTR(err); } -static void cleanup_a(struct list_head *actions, int ovr) -{ - struct tc_action *a; - - if (!ovr) - return; - - list_for_each_entry(a, actions, list) - a->tcfa_refcnt--; -} +/* Returns numbers of initialized actions or negative error. */ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct list_head *actions, size_t *attr_size, - struct netlink_ext_ack *extack) + struct tc_action *actions[], size_t *attr_size, + bool rtnl_held, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; @@ -786,25 +926,19 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, - extack); + rtnl_held, extack); if (IS_ERR(act)) { err = PTR_ERR(act); goto err; } act->order = i; sz += tcf_action_fill_size(act); - if (ovr) - act->tcfa_refcnt++; - list_add_tail(&act->list, actions); + /* Start from index 0 */ + actions[i - 1] = act; } *attr_size = tcf_action_full_attrs_size(sz); - - /* Remove the temp refcnt which was necessary to protect against - * destroying an existing action which was being replaced - */ - cleanup_a(actions, ovr); - return 0; + return i - 1; err: tcf_action_destroy(actions, bind); @@ -855,7 +989,7 @@ errout: return -1; } -static int tca_get_fill(struct sk_buff *skb, struct list_head *actions, +static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], u32 portid, u32 seq, u16 flags, int event, int bind, int ref) { @@ -891,7 +1025,7 @@ out_nlmsg_trim: static int tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, - struct list_head *actions, int event, + struct tc_action *actions[], int event, struct netlink_ext_ack *extack) { struct sk_buff *skb; @@ -900,7 +1034,7 @@ tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, if (!skb) return -ENOBUFS; if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, - 0, 0) <= 0) { + 0, 1) <= 0) { NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); kfree_skb(skb); return -EINVAL; @@ -1027,9 +1161,41 @@ err_out: return err; } +static int tcf_action_delete(struct net *net, struct tc_action *actions[], + int *acts_deleted, struct netlink_ext_ack *extack) +{ + u32 act_index; + int ret, i; + + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + struct tc_action *a = actions[i]; + const struct tc_action_ops *ops = a->ops; + + /* Actions can be deleted concurrently so we must save their + * type and id to search again after reference is released. + */ + act_index = a->tcfa_index; + + if (tcf_action_put(a)) { + /* last reference, action was deleted concurrently */ + module_put(ops->owner); + } else { + /* now do the delete */ + ret = ops->delete(net, act_index); + if (ret < 0) { + *acts_deleted = i + 1; + return ret; + } + } + } + *acts_deleted = i; + return 0; +} + static int -tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, - u32 portid, size_t attr_size, struct netlink_ext_ack *extack) +tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], + int *acts_deleted, u32 portid, size_t attr_size, + struct netlink_ext_ack *extack) { int ret; struct sk_buff *skb; @@ -1040,14 +1206,14 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, return -ENOBUFS; if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, - 0, 1) <= 0) { + 0, 2) <= 0) { NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); kfree_skb(skb); return -EINVAL; } /* now do the delete */ - ret = tcf_action_destroy(actions, 0); + ret = tcf_action_delete(net, actions, acts_deleted, extack); if (ret < 0) { NL_SET_ERR_MSG(extack, "Failed to delete TC action"); kfree_skb(skb); @@ -1069,7 +1235,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; size_t attr_size = 0; - LIST_HEAD(actions); + struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; + int acts_deleted = 0; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); if (ret < 0) @@ -1091,27 +1258,27 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, } act->order = i; attr_size += tcf_action_fill_size(act); - list_add_tail(&act->list, &actions); + actions[i - 1] = act; } attr_size = tcf_action_full_attrs_size(attr_size); if (event == RTM_GETACTION) - ret = tcf_get_notify(net, portid, n, &actions, event, extack); + ret = tcf_get_notify(net, portid, n, actions, event, extack); else { /* delete */ - ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack); + ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, + attr_size, extack); if (ret) goto err; return ret; } err: - if (event != RTM_GETACTION) - tcf_action_destroy(&actions, 0); + tcf_action_put_many(&actions[acts_deleted]); return ret; } static int -tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, +tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], u32 portid, size_t attr_size, struct netlink_ext_ack *extack) { struct sk_buff *skb; @@ -1142,14 +1309,17 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, { size_t attr_size = 0; int ret = 0; - LIST_HEAD(actions); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; - ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions, - &attr_size, extack); - if (ret) + ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions, + &attr_size, true, extack); + if (ret < 0) return ret; + ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); + if (ovr) + tcf_action_put_many(actions); - return tcf_add_notify(net, n, &actions, portid, attr_size, extack); + return ret; } static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 18089c02e557..06f743d8ed41 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -141,8 +141,8 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, struct tcf_bpf *prog = to_bpf(act); struct tc_act_bpf opt = { .index = prog->tcf_index, - .refcnt = prog->tcf_refcnt - ref, - .bindcnt = prog->tcf_bindcnt - bind, + .refcnt = refcount_read(&prog->tcf_refcnt) - ref, + .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind, .action = prog->tcf_action, }; struct tcf_t tm; @@ -276,7 +276,8 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, static int tcf_bpf_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **act, - int replace, int bind, struct netlink_ext_ack *extack) + int replace, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, bpf_net_id); struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; @@ -298,21 +299,27 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_ACT_BPF_PARMS]); - if (!tcf_idr_check(tn, parm->index, act, bind)) { + ret = tcf_idr_check_alloc(tn, &parm->index, act, bind); + if (!ret) { ret = tcf_idr_create(tn, parm->index, est, act, &act_bpf_ops, bind, true); - if (ret < 0) + if (ret < 0) { + tcf_idr_cleanup(tn, parm->index); return ret; + } res = ACT_P_CREATED; - } else { + } else if (ret > 0) { /* Don't override defaults. */ if (bind) return 0; - tcf_idr_release(*act, bind); - if (!replace) + if (!replace) { + tcf_idr_release(*act, bind); return -EEXIST; + } + } else { + return ret; } is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; @@ -355,8 +362,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, return res; out: - if (res == ACT_P_CREATED) - tcf_idr_release(*act, bind); + tcf_idr_release(*act, bind); return ret; } @@ -387,6 +393,13 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_bpf_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, bpf_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_bpf_ops __read_mostly = { .kind = "bpf", .type = TCA_ACT_BPF, @@ -397,6 +410,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { .init = tcf_bpf_init, .walk = tcf_bpf_walker, .lookup = tcf_bpf_search, + .delete = tcf_bpf_delete, .size = sizeof(struct tcf_bpf), }; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index e4b880fa51fe..1e31f0e448e2 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -96,7 +96,7 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = { static int tcf_connmark_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, + int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, connmark_net_id); @@ -118,11 +118,14 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_CONNMARK_PARMS]); - if (!tcf_idr_check(tn, parm->index, a, bind)) { + ret = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!ret) { ret = tcf_idr_create(tn, parm->index, est, a, &act_connmark_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ci = to_connmark(*a); ci->tcf_action = parm->action; @@ -131,16 +134,18 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, tcf_idr_insert(tn, *a); ret = ACT_P_CREATED; - } else { + } else if (ret > 0) { ci = to_connmark(*a); if (bind) return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } /* replacing action and zone */ ci->tcf_action = parm->action; ci->zone = parm->zone; + ret = 0; } return ret; @@ -154,8 +159,8 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, struct tc_connmark opt = { .index = ci->tcf_index, - .refcnt = ci->tcf_refcnt - ref, - .bindcnt = ci->tcf_bindcnt - bind, + .refcnt = refcount_read(&ci->tcf_refcnt) - ref, + .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind, .action = ci->tcf_action, .zone = ci->zone, }; @@ -193,6 +198,13 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_connmark_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, connmark_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_connmark_ops = { .kind = "connmark", .type = TCA_ACT_CONNMARK, @@ -202,6 +214,7 @@ static struct tc_action_ops act_connmark_ops = { .init = tcf_connmark_init, .walk = tcf_connmark_walker, .lookup = tcf_connmark_search, + .delete = tcf_connmark_delete, .size = sizeof(struct tcf_connmark_info), }; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 6e7124e57918..4e8c383f379e 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -46,7 +46,8 @@ static struct tc_action_ops act_csum_ops; static int tcf_csum_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, csum_net_id); struct tcf_csum_params *params_old, *params_new; @@ -66,18 +67,24 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, return -EINVAL; parm = nla_data(tb[TCA_CSUM_PARMS]); - if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { ret = tcf_idr_create(tn, parm->index, est, a, &act_csum_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind)/* dont override defaults */ return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } + } else { + return err; } p = to_tcf_csum(*a); @@ -85,8 +92,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } params_old = rtnl_dereference(p->params); @@ -597,8 +603,8 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, struct tcf_csum_params *params; struct tc_csum opt = { .index = p->tcf_index, - .refcnt = p->tcf_refcnt - ref, - .bindcnt = p->tcf_bindcnt - bind, + .refcnt = refcount_read(&p->tcf_refcnt) - ref, + .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, .action = p->tcf_action, }; struct tcf_t t; @@ -653,6 +659,13 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_csum)); } +static int tcf_csum_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, csum_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_csum_ops = { .kind = "csum", .type = TCA_ACT_CSUM, @@ -664,6 +677,7 @@ static struct tc_action_ops act_csum_ops = { .walk = tcf_csum_walker, .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, + .delete = tcf_csum_delete, .size = sizeof(struct tcf_csum), }; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 4dc4f153cad8..661b72b9147d 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -56,7 +56,8 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { static int tcf_gact_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, gact_net_id); struct nlattr *tb[TCA_GACT_MAX + 1]; @@ -90,18 +91,24 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, } #endif - if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { ret = tcf_idr_create(tn, parm->index, est, a, &act_gact_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind)/* dont override defaults */ return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } + } else { + return err; } gact = to_gact(*a); @@ -169,8 +176,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_gact *gact = to_gact(a); struct tc_gact opt = { .index = gact->tcf_index, - .refcnt = gact->tcf_refcnt - ref, - .bindcnt = gact->tcf_bindcnt - bind, + .refcnt = refcount_read(&gact->tcf_refcnt) - ref, + .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, .action = gact->tcf_action, }; struct tcf_t t; @@ -230,6 +237,13 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) return sz; } +static int tcf_gact_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, gact_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_gact_ops = { .kind = "gact", .type = TCA_ACT_GACT, @@ -241,6 +255,7 @@ static struct tc_action_ops act_gact_ops = { .walk = tcf_gact_walker, .lookup = tcf_gact_search, .get_fill_size = tcf_gact_get_fill_size, + .delete = tcf_gact_delete, .size = sizeof(struct tcf_gact), }; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 20d7d36b2fc9..3d6e265758c0 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -448,7 +448,8 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, static int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, ife_net_id); struct nlattr *tb[TCA_IFE_MAX + 1]; @@ -483,7 +484,12 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!p) return -ENOMEM; - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) { + kfree(p); + return err; + } + exists = err; if (exists && bind) { kfree(p); return 0; @@ -493,16 +499,15 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops, bind, true); if (ret) { + tcf_idr_cleanup(tn, parm->index); kfree(p); return ret; } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) { - kfree(p); - return -EEXIST; - } + kfree(p); + return -EEXIST; } ife = to_ife(*a); @@ -547,6 +552,8 @@ metadata_parse_err: if (exists) spin_unlock_bh(&ife->tcf_lock); + tcf_idr_release(*a, bind); + kfree(p); return err; } @@ -596,8 +603,8 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, struct tcf_ife_params *p = rtnl_dereference(ife->params); struct tc_ife opt = { .index = ife->tcf_index, - .refcnt = ife->tcf_refcnt - ref, - .bindcnt = ife->tcf_bindcnt - bind, + .refcnt = refcount_read(&ife->tcf_refcnt) - ref, + .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, .action = ife->tcf_action, .flags = p->flags, }; @@ -843,6 +850,13 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_ife_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, ife_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_ife_ops = { .kind = "ife", .type = TCA_ACT_IFE, @@ -853,6 +867,7 @@ static struct tc_action_ops act_ife_ops = { .init = tcf_ife_init, .walk = tcf_ife_walker, .lookup = tcf_ife_search, + .delete = tcf_ife_delete, .size = sizeof(struct tcf_ife_info), }; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 14c312d7908f..0dc787a57798 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -119,13 +119,18 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (tb[TCA_IPT_INDEX] != NULL) index = nla_get_u32(tb[TCA_IPT_INDEX]); - exists = tcf_idr_check(tn, index, a, bind); + err = tcf_idr_check_alloc(tn, &index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, index); return -EINVAL; } @@ -133,22 +138,27 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, index, est, a, ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, index); return ret; + } ret = ACT_P_CREATED; } else { if (bind)/* dont override defaults */ return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } } hook = nla_get_u32(tb[TCA_IPT_HOOK]); @@ -196,7 +206,8 @@ err1: static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, bind); @@ -204,7 +215,8 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, static int tcf_xt_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool unlocked, + struct netlink_ext_ack *extack) { return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, bind); @@ -280,8 +292,8 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, if (unlikely(!t)) goto nla_put_failure; - c.bindcnt = ipt->tcf_bindcnt - bind; - c.refcnt = ipt->tcf_refcnt - ref; + c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind; + c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref; strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) || @@ -322,6 +334,13 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_ipt_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, ipt_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_ipt_ops = { .kind = "ipt", .type = TCA_ACT_IPT, @@ -332,6 +351,7 @@ static struct tc_action_ops act_ipt_ops = { .init = tcf_ipt_init, .walk = tcf_ipt_walker, .lookup = tcf_ipt_search, + .delete = tcf_ipt_delete, .size = sizeof(struct tcf_ipt), }; @@ -372,6 +392,13 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_xt_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, xt_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_xt_ops = { .kind = "xt", .type = TCA_ACT_XT, @@ -382,6 +409,7 @@ static struct tc_action_ops act_xt_ops = { .init = tcf_xt_init, .walk = tcf_xt_walker, .lookup = tcf_xt_search, + .delete = tcf_xt_delete, .size = sizeof(struct tcf_ipt), }; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fd34015331ab..6afd89a36c69 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -68,8 +68,9 @@ static unsigned int mirred_net_id; static struct tc_action_ops act_mirred_ops; static int tcf_mirred_init(struct net *net, struct nlattr *nla, - struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + struct nlattr *est, struct tc_action **a, + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, mirred_net_id); struct nlattr *tb[TCA_MIRRED_MAX + 1]; @@ -78,7 +79,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, struct tcf_mirred *m; struct net_device *dev; bool exists = false; - int ret; + int ret, err; if (!nla) { NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed"); @@ -93,7 +94,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, } parm = nla_data(tb[TCA_MIRRED_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; @@ -106,6 +110,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, default: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option"); return -EINVAL; } @@ -114,6 +120,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, if (dev == NULL) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -ENODEV; } mac_header_xmit = dev_is_mac_header_xmit(dev); @@ -123,18 +131,20 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, if (!exists) { if (!dev) { + tcf_idr_cleanup(tn, parm->index); NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist"); return -EINVAL; } ret = tcf_idr_create(tn, parm->index, est, a, &act_mirred_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } m = to_mirred(*a); @@ -250,8 +260,8 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, struct tc_mirred opt = { .index = m->tcf_index, .action = m->tcf_action, - .refcnt = m->tcf_refcnt - ref, - .bindcnt = m->tcf_bindcnt - bind, + .refcnt = refcount_read(&m->tcf_refcnt) - ref, + .bindcnt = atomic_read(&m->tcf_bindcnt) - bind, .eaction = m->tcfm_eaction, .ifindex = dev ? dev->ifindex : 0, }; @@ -321,6 +331,13 @@ static struct net_device *tcf_mirred_get_dev(const struct tc_action *a) return rtnl_dereference(m->tcfm_dev); } +static int tcf_mirred_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, mirred_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .type = TCA_ACT_MIRRED, @@ -334,6 +351,7 @@ static struct tc_action_ops act_mirred_ops = { .lookup = tcf_mirred_search, .size = sizeof(struct tcf_mirred), .get_dev = tcf_mirred_get_dev, + .delete = tcf_mirred_delete, }; static __net_init int mirred_init_net(struct net *net) diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 4b5848b6c252..4dd9188a72fd 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -38,7 +38,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = { static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind, - struct netlink_ext_ack *extack) + bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, nat_net_id); struct nlattr *tb[TCA_NAT_MAX + 1]; @@ -57,18 +57,24 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, return -EINVAL; parm = nla_data(tb[TCA_NAT_PARMS]); - if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { ret = tcf_idr_create(tn, parm->index, est, a, &act_nat_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind) return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } + } else { + return err; } p = to_tcf_nat(*a); @@ -257,8 +263,8 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, .index = p->tcf_index, .action = p->tcf_action, - .refcnt = p->tcf_refcnt - ref, - .bindcnt = p->tcf_bindcnt - bind, + .refcnt = refcount_read(&p->tcf_refcnt) - ref, + .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, }; struct tcf_t t; @@ -294,6 +300,13 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_nat_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, nat_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_nat_ops = { .kind = "nat", .type = TCA_ACT_NAT, @@ -303,6 +316,7 @@ static struct tc_action_ops act_nat_ops = { .init = tcf_nat_init, .walk = tcf_nat_walker, .lookup = tcf_nat_search, + .delete = tcf_nat_delete, .size = sizeof(struct tcf_nat), }; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 8a925c72db5f..cc8ffcd1ddb5 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -132,20 +132,23 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, pedit_net_id); struct nlattr *tb[TCA_PEDIT_MAX + 1]; - struct nlattr *pattr; - struct tc_pedit *parm; - int ret = 0, err; - struct tcf_pedit *p; struct tc_pedit_key *keys = NULL; struct tcf_pedit_key_ex *keys_ex; + struct tc_pedit *parm; + struct nlattr *pattr; + struct tcf_pedit *p; + int ret = 0, err; int ksize; - if (nla == NULL) + if (!nla) { + NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed"); return -EINVAL; + } err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy, NULL); if (err < 0) @@ -154,47 +157,62 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, pattr = tb[TCA_PEDIT_PARMS]; if (!pattr) pattr = tb[TCA_PEDIT_PARMS_EX]; - if (!pattr) + if (!pattr) { + NL_SET_ERR_MSG_MOD(extack, "Missing required TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute"); return -EINVAL; + } parm = nla_data(pattr); ksize = parm->nkeys * sizeof(struct tc_pedit_key); - if (nla_len(pattr) < sizeof(*parm) + ksize) + if (nla_len(pattr) < sizeof(*parm) + ksize) { + NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); return -EINVAL; + } keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); if (IS_ERR(keys_ex)) return PTR_ERR(keys_ex); - if (!tcf_idr_check(tn, parm->index, a, bind)) { - if (!parm->nkeys) - return -EINVAL; + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { + if (!parm->nkeys) { + tcf_idr_cleanup(tn, parm->index); + NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); + ret = -EINVAL; + goto out_free; + } ret = tcf_idr_create(tn, parm->index, est, a, &act_pedit_ops, bind, false); - if (ret) - return ret; + if (ret) { + tcf_idr_cleanup(tn, parm->index); + goto out_free; + } p = to_pedit(*a); keys = kmalloc(ksize, GFP_KERNEL); - if (keys == NULL) { + if (!keys) { tcf_idr_release(*a, bind); - kfree(keys_ex); - return -ENOMEM; + ret = -ENOMEM; + goto out_free; } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind) - return 0; - tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + goto out_free; + if (!ovr) { + tcf_idr_release(*a, bind); + ret = -EEXIST; + goto out_free; + } p = to_pedit(*a); if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { keys = kmalloc(ksize, GFP_KERNEL); if (!keys) { - kfree(keys_ex); - return -ENOMEM; + ret = -ENOMEM; + goto out_free; } } + } else { + return err; } spin_lock_bh(&p->tcf_lock); @@ -214,12 +232,17 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); return ret; +out_free: + kfree(keys_ex); + return ret; + } static void tcf_pedit_cleanup(struct tc_action *a) { struct tcf_pedit *p = to_pedit(a); struct tc_pedit_key *keys = p->tcfp_keys; + kfree(keys); kfree(p->tcfp_keys_ex); } @@ -284,11 +307,12 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, if (p->tcfp_nkeys > 0) { struct tc_pedit_key *tkey = p->tcfp_keys; struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex; - enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; + enum pedit_header_type htype = + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { - u32 *ptr, _data; + u32 *ptr, hdata; int offset = tkey->off; int hoffset; u32 val; @@ -303,39 +327,39 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, rc = pedit_skb_hdr_offset(skb, htype, &hoffset); if (rc) { - pr_info("tc filter pedit bad header type specified (0x%x)\n", + pr_info("tc action pedit bad header type specified (0x%x)\n", htype); goto bad; } if (tkey->offmask) { - char *d, _d; + u8 *d, _d; if (!offset_valid(skb, hoffset + tkey->at)) { - pr_info("tc filter pedit 'at' offset %d out of bounds\n", + pr_info("tc action pedit 'at' offset %d out of bounds\n", hoffset + tkey->at); goto bad; } - d = skb_header_pointer(skb, hoffset + tkey->at, 1, - &_d); + d = skb_header_pointer(skb, hoffset + tkey->at, + sizeof(_d), &_d); if (!d) goto bad; offset += (*d & tkey->offmask) >> tkey->shift; } if (offset % 4) { - pr_info("tc filter pedit" - " offset must be on 32 bit boundaries\n"); + pr_info("tc action pedit offset must be on 32 bit boundaries\n"); goto bad; } if (!offset_valid(skb, hoffset + offset)) { - pr_info("tc filter pedit offset %d out of bounds\n", + pr_info("tc action pedit offset %d out of bounds\n", hoffset + offset); goto bad; } - ptr = skb_header_pointer(skb, hoffset + offset, 4, &_data); + ptr = skb_header_pointer(skb, hoffset + offset, + sizeof(hdata), &hdata); if (!ptr) goto bad; /* just do it, baby */ @@ -347,19 +371,20 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, val = (*ptr + tkey->val) & ~tkey->mask; break; default: - pr_info("tc filter pedit bad command (%d)\n", + pr_info("tc action pedit bad command (%d)\n", cmd); goto bad; } *ptr = ((*ptr & tkey->mask) ^ val); - if (ptr == &_data) + if (ptr == &hdata) skb_store_bits(skb, hoffset + offset, ptr, 4); } goto done; - } else + } else { WARN(1, "pedit BUG: index %d\n", p->tcf_index); + } bad: p->tcf_qstats.overlimits++; @@ -391,8 +416,8 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, opt->nkeys = p->tcfp_nkeys; opt->flags = p->tcfp_flags; opt->action = p->tcf_action; - opt->refcnt = p->tcf_refcnt - ref; - opt->bindcnt = p->tcf_bindcnt - bind; + opt->refcnt = refcount_read(&p->tcf_refcnt) - ref; + opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; if (p->tcfp_keys_ex) { tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); @@ -435,6 +460,13 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_pedit_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, pedit_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .type = TCA_ACT_PEDIT, @@ -445,6 +477,7 @@ static struct tc_action_ops act_pedit_ops = { .init = tcf_pedit_init, .walk = tcf_pedit_walker, .lookup = tcf_pedit_search, + .delete = tcf_pedit_delete, .size = sizeof(struct tcf_pedit), }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 4e72bc2a0dfb..1f3192ea8df7 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -75,7 +75,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { static int tcf_act_police_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, + int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack) { int ret = 0, err; @@ -101,20 +101,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, return -EINVAL; parm = nla_data(tb[TCA_POLICE_TBF]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!exists) { ret = tcf_idr_create(tn, parm->index, NULL, a, &act_police_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } police = to_police(*a); @@ -195,8 +199,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, failure: qdisc_put_rtab(P_tab); qdisc_put_rtab(R_tab); - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return err; } @@ -274,8 +277,8 @@ static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, .action = police->tcf_action, .mtu = police->tcfp_mtu, .burst = PSCHED_NS2TICKS(police->tcfp_burst), - .refcnt = police->tcf_refcnt - ref, - .bindcnt = police->tcf_bindcnt - bind, + .refcnt = refcount_read(&police->tcf_refcnt) - ref, + .bindcnt = atomic_read(&police->tcf_bindcnt) - bind, }; struct tcf_t t; @@ -314,6 +317,13 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_police_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, police_net_id); + + return tcf_idr_delete_index(tn, index); +} + MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); @@ -327,6 +337,7 @@ static struct tc_action_ops act_police_ops = { .init = tcf_act_police_init, .walk = tcf_act_police_walker, .lookup = tcf_police_search, + .delete = tcf_police_delete, .size = sizeof(struct tcf_police), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 5db358497c9e..3079e7be5bde 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -37,7 +37,8 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = { static int tcf_sample_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, sample_net_id); struct nlattr *tb[TCA_SAMPLE_MAX + 1]; @@ -45,7 +46,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, struct tc_sample *parm; struct tcf_sample *s; bool exists = false; - int ret; + int ret, err; if (!nla) return -EINVAL; @@ -58,20 +59,24 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_SAMPLE_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_sample_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } s = to_sample(*a); @@ -80,8 +85,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); psample_group = psample_group_get(net, s->psample_group_num); if (!psample_group) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } RCU_INIT_POINTER(s->psample_group, psample_group); @@ -173,8 +177,8 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a, struct tc_sample opt = { .index = s->tcf_index, .action = s->tcf_action, - .refcnt = s->tcf_refcnt - ref, - .bindcnt = s->tcf_bindcnt - bind, + .refcnt = refcount_read(&s->tcf_refcnt) - ref, + .bindcnt = atomic_read(&s->tcf_bindcnt) - bind, }; struct tcf_t t; @@ -219,6 +223,13 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_sample_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, sample_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_sample_ops = { .kind = "sample", .type = TCA_ACT_SAMPLE, @@ -229,6 +240,7 @@ static struct tc_action_ops act_sample_ops = { .cleanup = tcf_sample_cleanup, .walk = tcf_sample_walker, .lookup = tcf_sample_search, + .delete = tcf_sample_delete, .size = sizeof(struct tcf_sample), }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 98c4afe7c15b..aa51152e0066 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -79,7 +79,8 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = { static int tcf_simp_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, simp_net_id); struct nlattr *tb[TCA_DEF_MAX + 1]; @@ -99,21 +100,28 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, return -EINVAL; parm = nla_data(tb[TCA_DEF_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (tb[TCA_DEF_DATA] == NULL) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_simp_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } d = to_defact(*a); ret = alloc_defdata(d, tb[TCA_DEF_DATA]); @@ -126,9 +134,10 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, } else { d = to_defact(*a); - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } reset_policy(d, tb[TCA_DEF_DATA], parm); } @@ -145,8 +154,8 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_defact *d = to_defact(a); struct tc_defact opt = { .index = d->tcf_index, - .refcnt = d->tcf_refcnt - ref, - .bindcnt = d->tcf_bindcnt - bind, + .refcnt = refcount_read(&d->tcf_refcnt) - ref, + .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .action = d->tcf_action, }; struct tcf_t t; @@ -183,6 +192,13 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_simp_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, simp_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_simp_ops = { .kind = "simple", .type = TCA_ACT_SIMP, @@ -193,6 +209,7 @@ static struct tc_action_ops act_simp_ops = { .init = tcf_simp_init, .walk = tcf_simp_walker, .lookup = tcf_simp_search, + .delete = tcf_simp_delete, .size = sizeof(struct tcf_defact), }; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 6138d1d71900..da56e6938c9e 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -23,6 +23,9 @@ #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/dsfield.h> #include <linux/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h> @@ -34,25 +37,54 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbedit *d = to_skbedit(a); + struct tcf_skbedit_params *params; + int action; - spin_lock(&d->tcf_lock); tcf_lastuse_update(&d->tcf_tm); - bstats_update(&d->tcf_bstats, skb); - - if (d->flags & SKBEDIT_F_PRIORITY) - skb->priority = d->priority; - if (d->flags & SKBEDIT_F_QUEUE_MAPPING && - skb->dev->real_num_tx_queues > d->queue_mapping) - skb_set_queue_mapping(skb, d->queue_mapping); - if (d->flags & SKBEDIT_F_MARK) { - skb->mark &= ~d->mask; - skb->mark |= d->mark & d->mask; + bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); + + rcu_read_lock(); + params = rcu_dereference(d->params); + action = READ_ONCE(d->tcf_action); + + if (params->flags & SKBEDIT_F_PRIORITY) + skb->priority = params->priority; + if (params->flags & SKBEDIT_F_INHERITDSFIELD) { + int wlen = skb_network_offset(skb); + + switch (tc_skb_protocol(skb)) { + case htons(ETH_P_IP): + wlen += sizeof(struct iphdr); + if (!pskb_may_pull(skb, wlen)) + goto err; + skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + break; + + case htons(ETH_P_IPV6): + wlen += sizeof(struct ipv6hdr); + if (!pskb_may_pull(skb, wlen)) + goto err; + skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + break; + } } - if (d->flags & SKBEDIT_F_PTYPE) - skb->pkt_type = d->ptype; - - spin_unlock(&d->tcf_lock); - return d->tcf_action; + if (params->flags & SKBEDIT_F_QUEUE_MAPPING && + skb->dev->real_num_tx_queues > params->queue_mapping) + skb_set_queue_mapping(skb, params->queue_mapping); + if (params->flags & SKBEDIT_F_MARK) { + skb->mark &= ~params->mask; + skb->mark |= params->mark & params->mask; + } + if (params->flags & SKBEDIT_F_PTYPE) + skb->pkt_type = params->ptype; + +unlock: + rcu_read_unlock(); + return action; +err: + qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats)); + action = TC_ACT_SHOT; + goto unlock; } static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { @@ -62,13 +94,16 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, + [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, }; static int tcf_skbedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); + struct tcf_skbedit_params *params_old, *params_new; struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; struct tc_skbedit *parm; struct tcf_skbedit *d; @@ -114,52 +149,76 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, mask = nla_data(tb[TCA_SKBEDIT_MASK]); } + if (tb[TCA_SKBEDIT_FLAGS] != NULL) { + u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]); + + if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) + flags |= SKBEDIT_F_INHERITDSFIELD; + } + parm = nla_data(tb[TCA_SKBEDIT_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!flags) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, - &act_skbedit_ops, bind, false); - if (ret) + &act_skbedit_ops, bind, true); + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } d = to_skbedit(*a); ret = ACT_P_CREATED; } else { d = to_skbedit(*a); - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } } - spin_lock_bh(&d->tcf_lock); + ASSERT_RTNL(); + + params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); + if (unlikely(!params_new)) { + if (ret == ACT_P_CREATED) + tcf_idr_release(*a, bind); + return -ENOMEM; + } - d->flags = flags; + params_new->flags = flags; if (flags & SKBEDIT_F_PRIORITY) - d->priority = *priority; + params_new->priority = *priority; if (flags & SKBEDIT_F_QUEUE_MAPPING) - d->queue_mapping = *queue_mapping; + params_new->queue_mapping = *queue_mapping; if (flags & SKBEDIT_F_MARK) - d->mark = *mark; + params_new->mark = *mark; if (flags & SKBEDIT_F_PTYPE) - d->ptype = *ptype; + params_new->ptype = *ptype; /* default behaviour is to use all the bits */ - d->mask = 0xffffffff; + params_new->mask = 0xffffffff; if (flags & SKBEDIT_F_MASK) - d->mask = *mask; + params_new->mask = *mask; d->tcf_action = parm->action; - - spin_unlock_bh(&d->tcf_lock); + params_old = rtnl_dereference(d->params); + rcu_assign_pointer(d->params, params_new); + if (params_old) + kfree_rcu(params_old, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -171,30 +230,39 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = to_skbedit(a); + struct tcf_skbedit_params *params; struct tc_skbedit opt = { .index = d->tcf_index, - .refcnt = d->tcf_refcnt - ref, - .bindcnt = d->tcf_bindcnt - bind, + .refcnt = refcount_read(&d->tcf_refcnt) - ref, + .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .action = d->tcf_action, }; + u64 pure_flags = 0; struct tcf_t t; + params = rtnl_dereference(d->params); + if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_PRIORITY) && - nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority)) + if ((params->flags & SKBEDIT_F_PRIORITY) && + nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority)) + goto nla_put_failure; + if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) && + nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) && - nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping)) + if ((params->flags & SKBEDIT_F_MARK) && + nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_MARK) && - nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark)) + if ((params->flags & SKBEDIT_F_PTYPE) && + nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_PTYPE) && - nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype)) + if ((params->flags & SKBEDIT_F_MASK) && + nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_MASK) && - nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask)) + if (params->flags & SKBEDIT_F_INHERITDSFIELD) + pure_flags |= SKBEDIT_F_INHERITDSFIELD; + if (pure_flags != 0 && + nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) goto nla_put_failure; tcf_tm_dump(&t, &d->tcf_tm); @@ -207,6 +275,16 @@ nla_put_failure: return -1; } +static void tcf_skbedit_cleanup(struct tc_action *a) +{ + struct tcf_skbedit *d = to_skbedit(a); + struct tcf_skbedit_params *params; + + params = rcu_dereference_protected(d->params, 1); + if (params) + kfree_rcu(params, rcu); +} + static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops, @@ -225,6 +303,13 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_skbedit_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, skbedit_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .type = TCA_ACT_SKBEDIT, @@ -232,8 +317,10 @@ static struct tc_action_ops act_skbedit_ops = { .act = tcf_skbedit, .dump = tcf_skbedit_dump, .init = tcf_skbedit_init, + .cleanup = tcf_skbedit_cleanup, .walk = tcf_skbedit_walker, .lookup = tcf_skbedit_search, + .delete = tcf_skbedit_delete, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index ad050d7d4b46..cdc6bacfb190 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -84,7 +84,8 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = { static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, skbmod_net_id); struct nlattr *tb[TCA_SKBMOD_MAX + 1]; @@ -127,27 +128,33 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (parm->flags & SKBMOD_F_SWAPMAC) lflags = SKBMOD_F_SWAPMAC; - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!lflags) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_skbmod_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } d = to_skbmod(*a); @@ -155,8 +162,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, ASSERT_RTNL(); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } @@ -205,8 +211,8 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p); struct tc_skbmod opt = { .index = d->tcf_index, - .refcnt = d->tcf_refcnt - ref, - .bindcnt = d->tcf_bindcnt - bind, + .refcnt = refcount_read(&d->tcf_refcnt) - ref, + .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .action = d->tcf_action, }; struct tcf_t t; @@ -252,6 +258,13 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_skbmod_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, skbmod_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .type = TCA_ACT_SKBMOD, @@ -262,6 +275,7 @@ static struct tc_action_ops act_skbmod_ops = { .cleanup = tcf_skbmod_cleanup, .walk = tcf_skbmod_walker, .lookup = tcf_skbmod_search, + .delete = tcf_skbmod_delete, .size = sizeof(struct tcf_skbmod), }; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 9bc6c2ae98a5..f811850fd1d0 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> +#include <net/geneve.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/dst.h> @@ -57,6 +58,135 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, return action; } +static const struct nla_policy +enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy +geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, + .len = 128 }, +}; + +static int +tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1]; + int err, data_len, opt_len; + u8 *data; + + err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX, + nla, geneve_opt_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] || + !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] || + !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); + return -EINVAL; + } + + data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]); + data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]); + if (data_len < 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); + return -ERANGE; + } + if (data_len % 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); + return -ERANGE; + } + + opt_len = sizeof(struct geneve_opt) + data_len; + if (dst) { + struct geneve_opt *opt = dst; + + WARN_ON(dst_len < opt_len); + + opt->opt_class = + nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]); + opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]); + opt->length = data_len / 4; /* length is in units of 4 bytes */ + opt->r1 = 0; + opt->r2 = 0; + opt->r3 = 0; + + memcpy(opt + 1, data, data_len); + } + + return opt_len; +} + +static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst, + int dst_len, struct netlink_ext_ack *extack) +{ + int err, rem, opt_len, len = nla_len(nla), opts_len = 0; + const struct nlattr *attr, *head = nla_data(nla); + + err = nla_validate(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX, + enc_opts_policy, extack); + if (err) + return err; + + nla_for_each_attr(attr, head, len, rem) { + switch (nla_type(attr)) { + case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: + opt_len = tunnel_key_copy_geneve_opt(attr, dst, + dst_len, extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + if (dst) { + dst_len -= opt_len; + dst += opt_len; + } + break; + } + } + + if (!opts_len) { + NL_SET_ERR_MSG(extack, "Empty list of tunnel options"); + return -EINVAL; + } + + if (rem > 0) { + NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes"); + return -EINVAL; + } + + return opts_len; +} + +static int tunnel_key_get_opts_len(struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + return tunnel_key_copy_opts(nla, NULL, 0, extack); +} + +static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info, + int opts_len, struct netlink_ext_ack *extack) +{ + info->options_len = opts_len; + switch (nla_type(nla_data(nla))) { + case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: +#if IS_ENABLED(CONFIG_INET) + info->key.tun_flags |= TUNNEL_GENEVE_OPT; + return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), + opts_len, extack); +#else + return -EAFNOSUPPORT; +#endif + default: + NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type"); + return -EINVAL; + } +} + static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) }, [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, @@ -66,11 +196,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16}, [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED }, + [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, }; static int tunnel_key_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; @@ -81,24 +215,35 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, struct tcf_tunnel_key *t; bool exists = false; __be16 dst_port = 0; + int opts_len = 0; __be64 key_id; __be16 flags; + u8 tos, ttl; int ret = 0; int err; - if (!nla) + if (!nla) { + NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed"); return -EINVAL; + } err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy, - NULL); - if (err < 0) + extack); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes"); return err; + } - if (!tb[TCA_TUNNEL_KEY_PARMS]) + if (!tb[TCA_TUNNEL_KEY_PARMS]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key parameters"); return -EINVAL; + } parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; @@ -107,6 +252,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, break; case TCA_TUNNEL_KEY_ACT_SET: if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key id"); ret = -EINVAL; goto err_out; } @@ -121,6 +267,22 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]); + if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) { + opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS], + extack); + if (opts_len < 0) { + ret = opts_len; + goto err_out; + } + } + + tos = 0; + if (tb[TCA_TUNNEL_KEY_ENC_TOS]) + tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]); + ttl = 0; + if (tb[TCA_TUNNEL_KEY_ENC_TTL]) + ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]); + if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) { __be32 saddr; @@ -129,9 +291,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]); daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]); - metadata = __ip_tun_set_dst(saddr, daddr, 0, 0, + metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl, dst_port, flags, - key_id, 0); + key_id, opts_len); } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) { struct in6_addr saddr; @@ -140,19 +302,33 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]); daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]); - metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port, + metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port, 0, flags, key_id, 0); + } else { + NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst"); + ret = -EINVAL; + goto err_out; } if (!metadata) { - ret = -EINVAL; + NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst"); + ret = -ENOMEM; goto err_out; } + if (opts_len) { + ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS], + &metadata->u.tun_info, + opts_len, extack); + if (ret < 0) + goto err_out; + } + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; break; default: + NL_SET_ERR_MSG(extack, "Unknown tunnel key action"); ret = -EINVAL; goto err_out; } @@ -160,14 +336,16 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_tunnel_key_ops, bind, true); - if (ret) - return ret; + if (ret) { + NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); + goto err_out; + } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + NL_SET_ERR_MSG(extack, "TC IDR already exists"); + return -EEXIST; } t = to_tunnel_key(*a); @@ -175,8 +353,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, ASSERT_RTNL(); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); + NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); return -ENOMEM; } @@ -199,6 +377,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, err_out: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return ret; } @@ -216,6 +396,61 @@ static void tunnel_key_release(struct tc_action *a) } } +static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + int len = info->options_len; + u8 *src = (u8 *)(info + 1); + struct nlattr *start; + + start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE); + if (!start) + return -EMSGSIZE; + + while (len > 0) { + struct geneve_opt *opt = (struct geneve_opt *)src; + + if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, + opt->opt_class) || + nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, + opt->type) || + nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, + opt->length * 4, opt + 1)) + return -EMSGSIZE; + + len -= sizeof(struct geneve_opt) + opt->length * 4; + src += sizeof(struct geneve_opt) + opt->length * 4; + } + + nla_nest_end(skb, start); + return 0; +} + +static int tunnel_key_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + struct nlattr *start; + int err; + + if (!info->options_len) + return 0; + + start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS); + if (!start) + return -EMSGSIZE; + + if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { + err = tunnel_key_geneve_opts_dump(skb, info); + if (err) + return err; + } else { + return -EINVAL; + } + + nla_nest_end(skb, start); + return 0; +} + static int tunnel_key_dump_addresses(struct sk_buff *skb, const struct ip_tunnel_info *info) { @@ -252,8 +487,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_tunnel_key_params *params; struct tc_tunnel_key opt = { .index = t->tcf_index, - .refcnt = t->tcf_refcnt - ref, - .bindcnt = t->tcf_bindcnt - bind, + .refcnt = refcount_read(&t->tcf_refcnt) - ref, + .bindcnt = atomic_read(&t->tcf_bindcnt) - bind, .action = t->tcf_action, }; struct tcf_t tm; @@ -266,8 +501,9 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, goto nla_put_failure; if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) { - struct ip_tunnel_key *key = - ¶ms->tcft_enc_metadata->u.tun_info.key; + struct ip_tunnel_info *info = + ¶ms->tcft_enc_metadata->u.tun_info; + struct ip_tunnel_key *key = &info->key; __be32 key_id = tunnel_id_to_key32(key->tun_id); if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) || @@ -275,7 +511,14 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, ¶ms->tcft_enc_metadata->u.tun_info) || nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) || nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM, - !(key->tun_flags & TUNNEL_CSUM))) + !(key->tun_flags & TUNNEL_CSUM)) || + tunnel_key_opts_dump(skb, info)) + goto nla_put_failure; + + if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos)) + goto nla_put_failure; + + if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl)) goto nla_put_failure; } @@ -309,6 +552,13 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tunnel_key_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_tunnel_key_ops = { .kind = "tunnel_key", .type = TCA_ACT_TUNNEL_KEY, @@ -319,6 +569,7 @@ static struct tc_action_ops act_tunnel_key_ops = { .cleanup = tunnel_key_release, .walk = tunnel_key_walker, .lookup = tunnel_key_search, + .delete = tunnel_key_delete, .size = sizeof(struct tcf_tunnel_key), }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 1fb39e1f9d07..ad37f308175a 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -109,7 +109,8 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { static int tcf_vlan_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, vlan_net_id); struct nlattr *tb[TCA_VLAN_MAX + 1]; @@ -133,7 +134,10 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (!tb[TCA_VLAN_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_VLAN_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; @@ -145,12 +149,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (!tb[TCA_VLAN_PUSH_VLAN_ID]) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); if (push_vid >= VLAN_VID_MASK) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -ERANGE; } @@ -163,6 +171,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, default: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EPROTONOSUPPORT; } } else { @@ -175,6 +185,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, default: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } action = parm->v_action; @@ -182,14 +194,15 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_vlan_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } v = to_vlan(*a); @@ -197,8 +210,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, ASSERT_RTNL(); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } @@ -239,8 +251,8 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_vlan_params *p = rtnl_dereference(v->vlan_p); struct tc_vlan opt = { .index = v->tcf_index, - .refcnt = v->tcf_refcnt - ref, - .bindcnt = v->tcf_bindcnt - bind, + .refcnt = refcount_read(&v->tcf_refcnt) - ref, + .bindcnt = atomic_read(&v->tcf_bindcnt) - bind, .action = v->tcf_action, .v_action = p->tcfv_action, }; @@ -286,6 +298,13 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_vlan_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, vlan_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_vlan_ops = { .kind = "vlan", .type = TCA_ACT_VLAN, @@ -296,6 +315,7 @@ static struct tc_action_ops act_vlan_ops = { .cleanup = tcf_vlan_cleanup, .walk = tcf_vlan_walker, .lookup = tcf_vlan_search, + .delete = tcf_vlan_delete, .size = sizeof(struct tcf_vlan), }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f74513a7c7a8..623fe2cfe529 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -277,18 +277,21 @@ static bool tcf_block_offload_in_use(struct tcf_block *block) static int tcf_block_offload_cmd(struct tcf_block *block, struct net_device *dev, struct tcf_block_ext_info *ei, - enum tc_block_command command) + enum tc_block_command command, + struct netlink_ext_ack *extack) { struct tc_block_offload bo = {}; bo.command = command; bo.binder_type = ei->binder_type; bo.block = block; + bo.extack = extack; return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); } static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, - struct tcf_block_ext_info *ei) + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack) { struct net_device *dev = q->dev_queue->dev; int err; @@ -299,10 +302,12 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, /* If tc offload feature is disabled and the block we try to bind * to already has some offloaded filters, forbid to bind. */ - if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) + if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { + NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); return -EOPNOTSUPP; + } - err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND); + err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; return err; @@ -322,7 +327,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, if (!dev->netdev_ops->ndo_setup_tc) goto no_offload_dev_dec; - err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND); + err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL); if (err == -EOPNOTSUPP) goto no_offload_dev_dec; return; @@ -612,7 +617,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, if (err) goto err_chain_head_change_cb_add; - err = tcf_block_offload_bind(block, q, ei); + err = tcf_block_offload_bind(block, q, ei, extack); if (err) goto err_block_offload_bind; @@ -746,18 +751,53 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) } EXPORT_SYMBOL(tcf_block_cb_decref); +static int +tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, + void *cb_priv, bool add, bool offload_in_use, + struct netlink_ext_ack *extack) +{ + struct tcf_chain *chain; + struct tcf_proto *tp; + int err; + + list_for_each_entry(chain, &block->chain_list, list) { + for (tp = rtnl_dereference(chain->filter_chain); tp; + tp = rtnl_dereference(tp->next)) { + if (tp->ops->reoffload) { + err = tp->ops->reoffload(tp, add, cb, cb_priv, + extack); + if (err && add) + goto err_playback_remove; + } else if (add && offload_in_use) { + err = -EOPNOTSUPP; + NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); + goto err_playback_remove; + } + } + } + + return 0; + +err_playback_remove: + tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, + extack); + return err; +} + struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, + struct netlink_ext_ack *extack) { struct tcf_block_cb *block_cb; + int err; - /* At this point, playback of previous block cb calls is not supported, - * so forbid to register to block which already has some offloaded - * filters present. - */ - if (tcf_block_offload_in_use(block)) - return ERR_PTR(-EOPNOTSUPP); + /* Replay any already present rules */ + err = tcf_block_playback_offloads(block, cb, cb_priv, true, + tcf_block_offload_in_use(block), + extack); + if (err) + return ERR_PTR(err); block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); if (!block_cb) @@ -772,17 +812,22 @@ EXPORT_SYMBOL(__tcf_block_cb_register); int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, struct netlink_ext_ack *extack) { struct tcf_block_cb *block_cb; - block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv); + block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv, + extack); return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0; } EXPORT_SYMBOL(tcf_block_cb_register); -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb) { + tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv, + false, tcf_block_offload_in_use(block), + NULL); list_del(&block_cb->list); kfree(block_cb); } @@ -796,7 +841,7 @@ void tcf_block_cb_unregister(struct tcf_block *block, block_cb = tcf_block_cb_lookup(block, cb, cb_ident); if (!block_cb) return; - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); } EXPORT_SYMBOL(tcf_block_cb_unregister); @@ -1463,7 +1508,9 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, arg.w.stop = 0; arg.w.skip = cb->args[1] - 1; arg.w.count = 0; + arg.w.cookie = cb->args[2]; tp->ops->walk(tp, &arg.w); + cb->args[2] = arg.w.cookie; cb->args[1] = arg.w.count + 1; if (arg.w.stop) return false; @@ -1564,11 +1611,7 @@ out: void tcf_exts_destroy(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT - LIST_HEAD(actions); - - ASSERT_RTNL(); - tcf_exts_to_list(exts, &actions); - tcf_action_destroy(&actions, TCA_ACT_UNBIND); + tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); kfree(exts->actions); exts->nr_actions = 0; #endif @@ -1587,7 +1630,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, if (exts->police && tb[exts->police]) { act = tcf_action_init_1(net, tp, tb[exts->police], rate_tlv, "police", ovr, - TCA_ACT_BIND, extack); + TCA_ACT_BIND, true, extack); if (IS_ERR(act)) return PTR_ERR(act); @@ -1595,17 +1638,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, exts->actions[0] = act; exts->nr_actions = 1; } else if (exts->action && tb[exts->action]) { - LIST_HEAD(actions); - int err, i = 0; + int err; err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, NULL, ovr, TCA_ACT_BIND, - &actions, &attr_size, extack); - if (err) + exts->actions, &attr_size, true, + extack); + if (err < 0) return err; - list_for_each_entry(act, &actions, list) - exts->actions[i++] = act; - exts->nr_actions = i; + exts->nr_actions = err; } exts->net = net; } @@ -1654,14 +1695,11 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) * tc data even if iproute2 was newer - jhs */ if (exts->type != TCA_OLD_COMPAT) { - LIST_HEAD(actions); - nest = nla_nest_start(skb, exts->action); if (nest == NULL) goto nla_put_failure; - tcf_exts_to_list(exts, &actions); - if (tcf_action_dump(skb, &actions, 0, 0) < 0) + if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } else if (exts->police) { diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 1aa7f6511065..66e0ac9811f9 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -43,6 +43,7 @@ struct cls_bpf_prog { struct tcf_result res; bool exts_integrated; u32 gen_flags; + unsigned int in_hw_count; struct tcf_exts exts; u32 handle; u16 bpf_num_ops; @@ -174,6 +175,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, cls_bpf_offload_cmd(tp, oldprog, prog, extack); return err; } else if (err > 0) { + prog->in_hw_count = err; tcf_block_offload_inc(block, &prog->gen_flags); } } @@ -652,6 +654,42 @@ skip: } } +static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct cls_bpf_head *head = rtnl_dereference(tp->root); + struct tcf_block *block = tp->chain->block; + struct tc_cls_bpf_offload cls_bpf = {}; + struct cls_bpf_prog *prog; + int err; + + list_for_each_entry(prog, &head->plist, link) { + if (tc_skip_hw(prog->gen_flags)) + continue; + + tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, + extack); + cls_bpf.command = TC_CLSBPF_OFFLOAD; + cls_bpf.exts = &prog->exts; + cls_bpf.prog = add ? prog->filter : NULL; + cls_bpf.oldprog = add ? NULL : prog->filter; + cls_bpf.name = prog->bpf_name; + cls_bpf.exts_integrated = prog->exts_integrated; + + err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv); + if (err) { + if (add && tc_skip_sw(prog->gen_flags)) + return err; + continue; + } + + tc_cls_offload_cnt_update(block, &prog->in_hw_count, + &prog->gen_flags, add); + } + + return 0; +} + static struct tcf_proto_ops cls_bpf_ops __read_mostly = { .kind = "bpf", .owner = THIS_MODULE, @@ -662,6 +700,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = { .change = cls_bpf_change, .delete = cls_bpf_delete, .walk = cls_bpf_walk, + .reoffload = cls_bpf_reoffload, .dump = cls_bpf_dump, .bind_class = cls_bpf_bind_class, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9e8b26a80fb3..38d74803e2df 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -35,6 +35,7 @@ struct fl_flow_key { struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; @@ -51,6 +52,7 @@ struct fl_flow_key { struct flow_dissector_key_mpls mpls; struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ip ip; + struct flow_dissector_key_ip enc_ip; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -87,6 +89,7 @@ struct cls_fl_filter { struct list_head list; u32 handle; u32 flags; + unsigned int in_hw_count; struct rcu_work rwork; struct net_device *hw_dev; }; @@ -289,6 +292,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, fl_hw_destroy_filter(tp, f, NULL); return err; } else if (err > 0) { + f->in_hw_count = err; tcf_block_offload_inc(block, &f->flags); } @@ -447,6 +451,13 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, }; static void fl_set_key_val(struct nlattr **tb, @@ -498,22 +509,26 @@ static int fl_set_key_mpls(struct nlattr **tb, } static void fl_set_key_vlan(struct nlattr **tb, + __be16 ethertype, + int vlan_id_key, int vlan_prio_key, struct flow_dissector_key_vlan *key_val, struct flow_dissector_key_vlan *key_mask) { #define VLAN_PRIORITY_MASK 0x7 - if (tb[TCA_FLOWER_KEY_VLAN_ID]) { + if (tb[vlan_id_key]) { key_val->vlan_id = - nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK; + nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; key_mask->vlan_id = VLAN_VID_MASK; } - if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) { + if (tb[vlan_prio_key]) { key_val->vlan_priority = - nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) & + nla_get_u8(tb[vlan_prio_key]) & VLAN_PRIORITY_MASK; key_mask->vlan_priority = VLAN_PRIORITY_MASK; } + key_val->vlan_tpid = ethertype; + key_mask->vlan_tpid = cpu_to_be16(~0); } static void fl_set_key_flag(u32 flower_key, u32 flower_mask, @@ -551,17 +566,17 @@ static int fl_set_key_flags(struct nlattr **tb, return 0; } -static void fl_set_key_ip(struct nlattr **tb, +static void fl_set_key_ip(struct nlattr **tb, bool encap, struct flow_dissector_key_ip *key, struct flow_dissector_key_ip *mask) { - fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS, - &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK, - sizeof(key->tos)); + int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; + int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; + int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; + int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; - fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, - &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK, - sizeof(key->ttl)); + fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); + fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); } static int fl_set_key(struct net *net, struct nlattr **tb, @@ -590,12 +605,28 @@ static int fl_set_key(struct net *net, struct nlattr **tb, if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); - if (ethertype == htons(ETH_P_8021Q)) { - fl_set_key_vlan(tb, &key->vlan, &mask->vlan); - fl_set_key_val(tb, &key->basic.n_proto, - TCA_FLOWER_KEY_VLAN_ETH_TYPE, - &mask->basic.n_proto, TCA_FLOWER_UNSPEC, - sizeof(key->basic.n_proto)); + if (eth_type_vlan(ethertype)) { + fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, + &mask->vlan); + + if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { + ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); + if (eth_type_vlan(ethertype)) { + fl_set_key_vlan(tb, ethertype, + TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, + &key->cvlan, &mask->cvlan); + fl_set_key_val(tb, &key->basic.n_proto, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + &mask->basic.n_proto, + TCA_FLOWER_UNSPEC, + sizeof(key->basic.n_proto)); + } else { + key->basic.n_proto = ethertype; + mask->basic.n_proto = cpu_to_be16(~0); + } + } } else { key->basic.n_proto = ethertype; mask->basic.n_proto = cpu_to_be16(~0); @@ -607,7 +638,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)); - fl_set_key_ip(tb, &key->ip, &mask->ip); + fl_set_key_ip(tb, false, &key->ip, &mask->ip); } if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { @@ -742,6 +773,8 @@ static int fl_set_key(struct net *net, struct nlattr **tb, &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, sizeof(key->enc_tp.dst)); + fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); + if (tb[TCA_FLOWER_KEY_FLAGS]) ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags); @@ -821,6 +854,8 @@ static void fl_init_dissector(struct fl_flow_mask *mask) FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_VLAN, vlan); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_CVLAN, cvlan); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); @@ -832,6 +867,8 @@ static void fl_init_dissector(struct fl_flow_mask *mask) enc_control); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); skb_flow_dissector_init(&mask->dissector, keys, cnt); } @@ -1071,20 +1108,59 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *f; + + arg->count = arg->skip; + + while ((f = idr_get_next_ul(&head->handle_idr, + &arg->cookie)) != NULL) { + if (arg->fn(tp, f, arg) < 0) { + arg->stop = 1; + break; + } + arg->cookie = f->handle + 1; + arg->count++; + } +} + +static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct cls_fl_head *head = rtnl_dereference(tp->root); + struct tc_cls_flower_offload cls_flower = {}; + struct tcf_block *block = tp->chain->block; struct fl_flow_mask *mask; + struct cls_fl_filter *f; + int err; - list_for_each_entry_rcu(mask, &head->masks, list) { - list_for_each_entry_rcu(f, &mask->filters, list) { - if (arg->count < arg->skip) - goto skip; - if (arg->fn(tp, f, arg) < 0) { - arg->stop = 1; - break; + list_for_each_entry(mask, &head->masks, list) { + list_for_each_entry(f, &mask->filters, list) { + if (tc_skip_hw(f->flags)) + continue; + + tc_cls_common_offload_init(&cls_flower.common, tp, + f->flags, extack); + cls_flower.command = add ? + TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY; + cls_flower.cookie = (unsigned long)f; + cls_flower.dissector = &mask->dissector; + cls_flower.mask = &f->mkey; + cls_flower.key = &f->key; + cls_flower.exts = &f->exts; + cls_flower.classid = f->res.classid; + + err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); + if (err) { + if (add && tc_skip_sw(f->flags)) + return err; + continue; } -skip: - arg->count++; + + tc_cls_offload_cnt_update(block, &f->in_hw_count, + &f->flags, add); } } + + return 0; } static int fl_dump_key_val(struct sk_buff *skb, @@ -1141,20 +1217,24 @@ static int fl_dump_key_mpls(struct sk_buff *skb, return 0; } -static int fl_dump_key_ip(struct sk_buff *skb, +static int fl_dump_key_ip(struct sk_buff *skb, bool encap, struct flow_dissector_key_ip *key, struct flow_dissector_key_ip *mask) { - if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos, - TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) || - fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl, - TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl))) + int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; + int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; + int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; + int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; + + if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || + fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) return -1; return 0; } static int fl_dump_key_vlan(struct sk_buff *skb, + int vlan_id_key, int vlan_prio_key, struct flow_dissector_key_vlan *vlan_key, struct flow_dissector_key_vlan *vlan_mask) { @@ -1163,13 +1243,13 @@ static int fl_dump_key_vlan(struct sk_buff *skb, if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) return 0; if (vlan_mask->vlan_id) { - err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID, + err = nla_put_u16(skb, vlan_id_key, vlan_key->vlan_id); if (err) return err; } if (vlan_mask->vlan_priority) { - err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO, + err = nla_put_u8(skb, vlan_prio_key, vlan_key->vlan_priority); if (err) return err; @@ -1264,15 +1344,36 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) goto nla_put_failure; - if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan)) + if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) + goto nla_put_failure; + + if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, + &key->cvlan, &mask->cvlan) || + (mask->cvlan.vlan_tpid && + nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + key->cvlan.vlan_tpid))) goto nla_put_failure; + if (mask->basic.n_proto) { + if (mask->cvlan.vlan_tpid) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + key->basic.n_proto)) + goto nla_put_failure; + } else if (mask->vlan.vlan_tpid) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + key->basic.n_proto)) + goto nla_put_failure; + } + } + if ((key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) && (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)) || - fl_dump_key_ip(skb, &key->ip, &mask->ip))) + fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) goto nla_put_failure; if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && @@ -1397,7 +1498,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, - sizeof(key->enc_tp.dst))) + sizeof(key->enc_tp.dst)) || + fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip)) goto nla_put_failure; if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) @@ -1438,6 +1540,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = { .change = fl_change, .delete = fl_delete, .walk = fl_walk, + .reoffload = fl_reoffload, .dump = fl_dump, .bind_class = fl_bind_class, .owner = THIS_MODULE, diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 47b207ef7762..af16f36ed578 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -21,6 +21,7 @@ struct cls_mall_head { struct tcf_result res; u32 handle; u32 flags; + unsigned int in_hw_count; struct rcu_work rwork; }; @@ -95,6 +96,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, mall_destroy_hw_filter(tp, head, cookie, NULL); return err; } else if (err > 0) { + head->in_hw_count = err; tcf_block_offload_inc(block, &head->flags); } @@ -235,6 +237,35 @@ skip: arg->count++; } +static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct cls_mall_head *head = rtnl_dereference(tp->root); + struct tc_cls_matchall_offload cls_mall = {}; + struct tcf_block *block = tp->chain->block; + int err; + + if (tc_skip_hw(head->flags)) + return 0; + + tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); + cls_mall.command = add ? + TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; + cls_mall.exts = &head->exts; + cls_mall.cookie = (unsigned long)head; + + err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv); + if (err) { + if (add && tc_skip_sw(head->flags)) + return err; + return 0; + } + + tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add); + + return 0; +} + static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { @@ -289,6 +320,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = { .change = mall_change, .delete = mall_delete, .walk = mall_walk, + .reoffload = mall_reoffload, .dump = mall_dump, .bind_class = mall_bind_class, .owner = THIS_MODULE, diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index fb861f90fde6..d5d2a6dc3921 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -62,6 +62,7 @@ struct tc_u_knode { struct tc_u32_pcnt __percpu *pf; #endif u32 flags; + unsigned int in_hw_count; #ifdef CONFIG_CLS_U32_MARK u32 val; u32 mask; @@ -571,6 +572,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, u32_remove_hw_knode(tp, n, NULL); return err; } else if (err > 0) { + n->in_hw_count = err; tcf_block_offload_inc(block, &n->flags); } @@ -1199,6 +1201,114 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) } } +static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, + bool add, tc_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack) +{ + struct tc_cls_u32_offload cls_u32 = {}; + int err; + + tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack); + cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; + cls_u32.hnode.divisor = ht->divisor; + cls_u32.hnode.handle = ht->handle; + cls_u32.hnode.prio = ht->prio; + + err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); + if (err && add && tc_skip_sw(ht->flags)) + return err; + + return 0; +} + +static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, + bool add, tc_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack) +{ + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); + struct tcf_block *block = tp->chain->block; + struct tc_cls_u32_offload cls_u32 = {}; + int err; + + tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); + cls_u32.command = add ? + TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; + cls_u32.knode.handle = n->handle; + + if (add) { + cls_u32.knode.fshift = n->fshift; +#ifdef CONFIG_CLS_U32_MARK + cls_u32.knode.val = n->val; + cls_u32.knode.mask = n->mask; +#else + cls_u32.knode.val = 0; + cls_u32.knode.mask = 0; +#endif + cls_u32.knode.sel = &n->sel; + cls_u32.knode.exts = &n->exts; + if (n->ht_down) + cls_u32.knode.link_handle = ht->handle; + } + + err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); + if (err) { + if (add && tc_skip_sw(n->flags)) + return err; + return 0; + } + + tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add); + + return 0; +} + +static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct tc_u_common *tp_c = tp->data; + struct tc_u_hnode *ht; + struct tc_u_knode *n; + unsigned int h; + int err; + + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) { + if (ht->prio != tp->prio) + continue; + + /* When adding filters to a new dev, try to offload the + * hashtable first. When removing, do the filters before the + * hashtable. + */ + if (add && !tc_skip_hw(ht->flags)) { + err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv, + extack); + if (err) + return err; + } + + for (h = 0; h <= ht->divisor; h++) { + for (n = rtnl_dereference(ht->ht[h]); + n; + n = rtnl_dereference(n->next)) { + if (tc_skip_hw(n->flags)) + continue; + + err = u32_reoffload_knode(tp, n, add, cb, + cb_priv, extack); + if (err) + return err; + } + } + + if (!add && !tc_skip_hw(ht->flags)) + u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack); + } + + return 0; +} + static void u32_bind_class(void *fh, u32 classid, unsigned long cl) { struct tc_u_knode *n = fh; @@ -1336,6 +1446,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = { .change = u32_change, .delete = u32_delete, .walk = u32_walk, + .reoffload = u32_reoffload, .dump = u32_dump, .bind_class = u32_bind_class, .owner = THIS_MODULE, diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 54eca685420f..98541c6399db 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -596,12 +596,19 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) return HRTIMER_NORESTART; } -void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) +void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, + clockid_t clockid) { - hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED); wd->timer.function = qdisc_watchdog; wd->qdisc = qdisc; } +EXPORT_SYMBOL(qdisc_watchdog_init_clockid); + +void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) +{ + qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC); +} EXPORT_SYMBOL(qdisc_watchdog_init); void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c new file mode 100644 index 000000000000..539c9490c308 --- /dev/null +++ b/net/sched/sch_cake.c @@ -0,0 +1,3019 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* COMMON Applications Kept Enhanced (CAKE) discipline + * + * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com> + * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk> + * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com> + * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de> + * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk> + * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au> + * + * The CAKE Principles: + * (or, how to have your cake and eat it too) + * + * This is a combination of several shaping, AQM and FQ techniques into one + * easy-to-use package: + * + * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE + * equipment and bloated MACs. This operates in deficit mode (as in sch_fq), + * eliminating the need for any sort of burst parameter (eg. token bucket + * depth). Burst support is limited to that necessary to overcome scheduling + * latency. + * + * - A Diffserv-aware priority queue, giving more priority to certain classes, + * up to a specified fraction of bandwidth. Above that bandwidth threshold, + * the priority is reduced to avoid starving other tins. + * + * - Each priority tin has a separate Flow Queue system, to isolate traffic + * flows from each other. This prevents a burst on one flow from increasing + * the delay to another. Flows are distributed to queues using a + * set-associative hash function. + * + * - Each queue is actively managed by Cobalt, which is a combination of the + * Codel and Blue AQM algorithms. This serves flows fairly, and signals + * congestion early via ECN (if available) and/or packet drops, to keep + * latency low. The codel parameters are auto-tuned based on the bandwidth + * setting, as is necessary at low bandwidths. + * + * The configuration parameters are kept deliberately simple for ease of use. + * Everything has sane defaults. Complete generality of configuration is *not* + * a goal. + * + * The priority queue operates according to a weighted DRR scheme, combined with + * a bandwidth tracker which reuses the shaper logic to detect which side of the + * bandwidth sharing threshold the tin is operating. This determines whether a + * priority-based weight (high) or a bandwidth-based weight (low) is used for + * that tin in the current pass. + * + * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly + * granted us permission to leverage. + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/string.h> +#include <linux/in.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/jhash.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/reciprocal_div.h> +#include <net/netlink.h> +#include <linux/version.h> +#include <linux/if_vlan.h> +#include <net/pkt_sched.h> +#include <net/pkt_cls.h> +#include <net/tcp.h> +#include <net/flow_dissector.h> + +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <net/netfilter/nf_conntrack_core.h> +#endif + +#define CAKE_SET_WAYS (8) +#define CAKE_MAX_TINS (8) +#define CAKE_QUEUES (1024) +#define CAKE_FLOW_MASK 63 +#define CAKE_FLOW_NAT_FLAG 64 +#define CAKE_SPLIT_GSO_THRESHOLD (125000000) /* 1Gbps */ + +/* struct cobalt_params - contains codel and blue parameters + * @interval: codel initial drop rate + * @target: maximum persistent sojourn time & blue update rate + * @mtu_time: serialisation delay of maximum-size packet + * @p_inc: increment of blue drop probability (0.32 fxp) + * @p_dec: decrement of blue drop probability (0.32 fxp) + */ +struct cobalt_params { + u64 interval; + u64 target; + u64 mtu_time; + u32 p_inc; + u32 p_dec; +}; + +/* struct cobalt_vars - contains codel and blue variables + * @count: codel dropping frequency + * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 + * @drop_next: time to drop next packet, or when we dropped last + * @blue_timer: Blue time to next drop + * @p_drop: BLUE drop probability (0.32 fxp) + * @dropping: set if in dropping state + * @ecn_marked: set if marked + */ +struct cobalt_vars { + u32 count; + u32 rec_inv_sqrt; + ktime_t drop_next; + ktime_t blue_timer; + u32 p_drop; + bool dropping; + bool ecn_marked; +}; + +enum { + CAKE_SET_NONE = 0, + CAKE_SET_SPARSE, + CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */ + CAKE_SET_BULK, + CAKE_SET_DECAYING +}; + +struct cake_flow { + /* this stuff is all needed per-flow at dequeue time */ + struct sk_buff *head; + struct sk_buff *tail; + struct list_head flowchain; + s32 deficit; + u32 dropped; + struct cobalt_vars cvars; + u16 srchost; /* index into cake_host table */ + u16 dsthost; + u8 set; +}; /* please try to keep this structure <= 64 bytes */ + +struct cake_host { + u32 srchost_tag; + u32 dsthost_tag; + u16 srchost_refcnt; + u16 dsthost_refcnt; +}; + +struct cake_heap_entry { + u16 t:3, b:10; +}; + +struct cake_tin_data { + struct cake_flow flows[CAKE_QUEUES]; + u32 backlogs[CAKE_QUEUES]; + u32 tags[CAKE_QUEUES]; /* for set association */ + u16 overflow_idx[CAKE_QUEUES]; + struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */ + u16 flow_quantum; + + struct cobalt_params cparams; + u32 drop_overlimit; + u16 bulk_flow_count; + u16 sparse_flow_count; + u16 decaying_flow_count; + u16 unresponsive_flow_count; + + u32 max_skblen; + + struct list_head new_flows; + struct list_head old_flows; + struct list_head decaying_flows; + + /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ + ktime_t time_next_packet; + u64 tin_rate_ns; + u64 tin_rate_bps; + u16 tin_rate_shft; + + u16 tin_quantum_prio; + u16 tin_quantum_band; + s32 tin_deficit; + u32 tin_backlog; + u32 tin_dropped; + u32 tin_ecn_mark; + + u32 packets; + u64 bytes; + + u32 ack_drops; + + /* moving averages */ + u64 avge_delay; + u64 peak_delay; + u64 base_delay; + + /* hash function stats */ + u32 way_directs; + u32 way_hits; + u32 way_misses; + u32 way_collisions; +}; /* number of tins is small, so size of this struct doesn't matter much */ + +struct cake_sched_data { + struct tcf_proto __rcu *filter_list; /* optional external classifier */ + struct tcf_block *block; + struct cake_tin_data *tins; + + struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS]; + u16 overflow_timeout; + + u16 tin_cnt; + u8 tin_mode; + u8 flow_mode; + u8 ack_filter; + u8 atm_mode; + + /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ + u16 rate_shft; + ktime_t time_next_packet; + ktime_t failsafe_next_packet; + u64 rate_ns; + u64 rate_bps; + u16 rate_flags; + s16 rate_overhead; + u16 rate_mpu; + u64 interval; + u64 target; + + /* resource tracking */ + u32 buffer_used; + u32 buffer_max_used; + u32 buffer_limit; + u32 buffer_config_limit; + + /* indices for dequeue */ + u16 cur_tin; + u16 cur_flow; + + struct qdisc_watchdog watchdog; + const u8 *tin_index; + const u8 *tin_order; + + /* bandwidth capacity estimate */ + ktime_t last_packet_time; + ktime_t avg_window_begin; + u64 avg_packet_interval; + u64 avg_window_bytes; + u64 avg_peak_bandwidth; + ktime_t last_reconfig_time; + + /* packet length stats */ + u32 avg_netoff; + u16 max_netlen; + u16 max_adjlen; + u16 min_netlen; + u16 min_adjlen; +}; + +enum { + CAKE_FLAG_OVERHEAD = BIT(0), + CAKE_FLAG_AUTORATE_INGRESS = BIT(1), + CAKE_FLAG_INGRESS = BIT(2), + CAKE_FLAG_WASH = BIT(3), + CAKE_FLAG_SPLIT_GSO = BIT(4) +}; + +/* COBALT operates the Codel and BLUE algorithms in parallel, in order to + * obtain the best features of each. Codel is excellent on flows which + * respond to congestion signals in a TCP-like way. BLUE is more effective on + * unresponsive flows. + */ + +struct cobalt_skb_cb { + ktime_t enqueue_time; + u32 adjusted_len; +}; + +static u64 us_to_ns(u64 us) +{ + return us * NSEC_PER_USEC; +} + +static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb) +{ + qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb)); + return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data; +} + +static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb) +{ + return get_cobalt_cb(skb)->enqueue_time; +} + +static void cobalt_set_enqueue_time(struct sk_buff *skb, + ktime_t now) +{ + get_cobalt_cb(skb)->enqueue_time = now; +} + +static u16 quantum_div[CAKE_QUEUES + 1] = {0}; + +/* Diffserv lookup tables */ + +static const u8 precedence[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, +}; + +static const u8 diffserv8[] = { + 2, 5, 1, 2, 4, 2, 2, 2, + 0, 2, 1, 2, 1, 2, 1, 2, + 5, 2, 4, 2, 4, 2, 4, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 6, 2, 3, 2, 3, 2, 3, 2, + 6, 2, 2, 2, 6, 2, 6, 2, + 7, 2, 2, 2, 2, 2, 2, 2, + 7, 2, 2, 2, 2, 2, 2, 2, +}; + +static const u8 diffserv4[] = { + 0, 2, 0, 0, 2, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 2, 0, 2, 0, 2, 0, 2, 0, + 2, 0, 2, 0, 2, 0, 2, 0, + 3, 0, 2, 0, 2, 0, 2, 0, + 3, 0, 0, 0, 3, 0, 3, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, +}; + +static const u8 diffserv3[] = { + 0, 0, 0, 0, 2, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 2, 0, 2, 0, + 2, 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, 0, +}; + +static const u8 besteffort[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* tin priority order for stats dumping */ + +static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7}; +static const u8 bulk_order[] = {1, 0, 2, 3}; + +#define REC_INV_SQRT_CACHE (16) +static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0}; + +/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots + * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) + * + * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 + */ + +static void cobalt_newton_step(struct cobalt_vars *vars) +{ + u32 invsqrt, invsqrt2; + u64 val; + + invsqrt = vars->rec_inv_sqrt; + invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; + val = (3LL << 32) - ((u64)vars->count * invsqrt2); + + val >>= 2; /* avoid overflow in following multiply */ + val = (val * invsqrt) >> (32 - 2 + 1); + + vars->rec_inv_sqrt = val; +} + +static void cobalt_invsqrt(struct cobalt_vars *vars) +{ + if (vars->count < REC_INV_SQRT_CACHE) + vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count]; + else + cobalt_newton_step(vars); +} + +/* There is a big difference in timing between the accurate values placed in + * the cache and the approximations given by a single Newton step for small + * count values, particularly when stepping from count 1 to 2 or vice versa. + * Above 16, a single Newton step gives sufficient accuracy in either + * direction, given the precision stored. + * + * The magnitude of the error when stepping up to count 2 is such as to give + * the value that *should* have been produced at count 4. + */ + +static void cobalt_cache_init(void) +{ + struct cobalt_vars v; + + memset(&v, 0, sizeof(v)); + v.rec_inv_sqrt = ~0U; + cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt; + + for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) { + cobalt_newton_step(&v); + cobalt_newton_step(&v); + cobalt_newton_step(&v); + cobalt_newton_step(&v); + + cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt; + } +} + +static void cobalt_vars_init(struct cobalt_vars *vars) +{ + memset(vars, 0, sizeof(*vars)); + + if (!cobalt_rec_inv_sqrt_cache[0]) { + cobalt_cache_init(); + cobalt_rec_inv_sqrt_cache[0] = ~0; + } +} + +/* CoDel control_law is t + interval/sqrt(count) + * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid + * both sqrt() and divide operation. + */ +static ktime_t cobalt_control(ktime_t t, + u64 interval, + u32 rec_inv_sqrt) +{ + return ktime_add_ns(t, reciprocal_scale(interval, + rec_inv_sqrt)); +} + +/* Call this when a packet had to be dropped due to queue overflow. Returns + * true if the BLUE state was quiescent before but active after this call. + */ +static bool cobalt_queue_full(struct cobalt_vars *vars, + struct cobalt_params *p, + ktime_t now) +{ + bool up = false; + + if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) { + up = !vars->p_drop; + vars->p_drop += p->p_inc; + if (vars->p_drop < p->p_inc) + vars->p_drop = ~0; + vars->blue_timer = now; + } + vars->dropping = true; + vars->drop_next = now; + if (!vars->count) + vars->count = 1; + + return up; +} + +/* Call this when the queue was serviced but turned out to be empty. Returns + * true if the BLUE state was active before but quiescent after this call. + */ +static bool cobalt_queue_empty(struct cobalt_vars *vars, + struct cobalt_params *p, + ktime_t now) +{ + bool down = false; + + if (vars->p_drop && + ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) { + if (vars->p_drop < p->p_dec) + vars->p_drop = 0; + else + vars->p_drop -= p->p_dec; + vars->blue_timer = now; + down = !vars->p_drop; + } + vars->dropping = false; + + if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) { + vars->count--; + cobalt_invsqrt(vars); + vars->drop_next = cobalt_control(vars->drop_next, + p->interval, + vars->rec_inv_sqrt); + } + + return down; +} + +/* Call this with a freshly dequeued packet for possible congestion marking. + * Returns true as an instruction to drop the packet, false for delivery. + */ +static bool cobalt_should_drop(struct cobalt_vars *vars, + struct cobalt_params *p, + ktime_t now, + struct sk_buff *skb, + u32 bulk_flows) +{ + bool next_due, over_target, drop = false; + ktime_t schedule; + u64 sojourn; + +/* The 'schedule' variable records, in its sign, whether 'now' is before or + * after 'drop_next'. This allows 'drop_next' to be updated before the next + * scheduling decision is actually branched, without destroying that + * information. Similarly, the first 'schedule' value calculated is preserved + * in the boolean 'next_due'. + * + * As for 'drop_next', we take advantage of the fact that 'interval' is both + * the delay between first exceeding 'target' and the first signalling event, + * *and* the scaling factor for the signalling frequency. It's therefore very + * natural to use a single mechanism for both purposes, and eliminates a + * significant amount of reference Codel's spaghetti code. To help with this, + * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close + * as possible to 1.0 in fixed-point. + */ + + sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb))); + schedule = ktime_sub(now, vars->drop_next); + over_target = sojourn > p->target && + sojourn > p->mtu_time * bulk_flows * 2 && + sojourn > p->mtu_time * 4; + next_due = vars->count && ktime_to_ns(schedule) >= 0; + + vars->ecn_marked = false; + + if (over_target) { + if (!vars->dropping) { + vars->dropping = true; + vars->drop_next = cobalt_control(now, + p->interval, + vars->rec_inv_sqrt); + } + if (!vars->count) + vars->count = 1; + } else if (vars->dropping) { + vars->dropping = false; + } + + if (next_due && vars->dropping) { + /* Use ECN mark if possible, otherwise drop */ + drop = !(vars->ecn_marked = INET_ECN_set_ce(skb)); + + vars->count++; + if (!vars->count) + vars->count--; + cobalt_invsqrt(vars); + vars->drop_next = cobalt_control(vars->drop_next, + p->interval, + vars->rec_inv_sqrt); + schedule = ktime_sub(now, vars->drop_next); + } else { + while (next_due) { + vars->count--; + cobalt_invsqrt(vars); + vars->drop_next = cobalt_control(vars->drop_next, + p->interval, + vars->rec_inv_sqrt); + schedule = ktime_sub(now, vars->drop_next); + next_due = vars->count && ktime_to_ns(schedule) >= 0; + } + } + + /* Simple BLUE implementation. Lack of ECN is deliberate. */ + if (vars->p_drop) + drop |= (prandom_u32() < vars->p_drop); + + /* Overload the drop_next field as an activity timeout */ + if (!vars->count) + vars->drop_next = ktime_add_ns(now, p->interval); + else if (ktime_to_ns(schedule) > 0 && !drop) + vars->drop_next = now; + + return drop; +} + +static void cake_update_flowkeys(struct flow_keys *keys, + const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + struct nf_conntrack_tuple tuple = {}; + bool rev = !skb->_nfct; + + if (tc_skb_protocol(skb) != htons(ETH_P_IP)) + return; + + if (!nf_ct_get_tuple_skb(&tuple, skb)) + return; + + keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip; + keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip; + + if (keys->ports.ports) { + keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all; + keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all; + } +#endif +} + +/* Cake has several subtle multiple bit settings. In these cases you + * would be matching triple isolate mode as well. + */ + +static bool cake_dsrc(int flow_mode) +{ + return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC; +} + +static bool cake_ddst(int flow_mode) +{ + return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST; +} + +static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, + int flow_mode) +{ + u32 flow_hash = 0, srchost_hash, dsthost_hash; + u16 reduced_hash, srchost_idx, dsthost_idx; + struct flow_keys keys, host_keys; + + if (unlikely(flow_mode == CAKE_FLOW_NONE)) + return 0; + + skb_flow_dissect_flow_keys(skb, &keys, + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + + if (flow_mode & CAKE_FLOW_NAT_FLAG) + cake_update_flowkeys(&keys, skb); + + /* flow_hash_from_keys() sorts the addresses by value, so we have + * to preserve their order in a separate data structure to treat + * src and dst host addresses as independently selectable. + */ + host_keys = keys; + host_keys.ports.ports = 0; + host_keys.basic.ip_proto = 0; + host_keys.keyid.keyid = 0; + host_keys.tags.flow_label = 0; + + switch (host_keys.control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + host_keys.addrs.v4addrs.src = 0; + dsthost_hash = flow_hash_from_keys(&host_keys); + host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; + host_keys.addrs.v4addrs.dst = 0; + srchost_hash = flow_hash_from_keys(&host_keys); + break; + + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + memset(&host_keys.addrs.v6addrs.src, 0, + sizeof(host_keys.addrs.v6addrs.src)); + dsthost_hash = flow_hash_from_keys(&host_keys); + host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; + memset(&host_keys.addrs.v6addrs.dst, 0, + sizeof(host_keys.addrs.v6addrs.dst)); + srchost_hash = flow_hash_from_keys(&host_keys); + break; + + default: + dsthost_hash = 0; + srchost_hash = 0; + } + + /* This *must* be after the above switch, since as a + * side-effect it sorts the src and dst addresses. + */ + if (flow_mode & CAKE_FLOW_FLOWS) + flow_hash = flow_hash_from_keys(&keys); + + if (!(flow_mode & CAKE_FLOW_FLOWS)) { + if (flow_mode & CAKE_FLOW_SRC_IP) + flow_hash ^= srchost_hash; + + if (flow_mode & CAKE_FLOW_DST_IP) + flow_hash ^= dsthost_hash; + } + + reduced_hash = flow_hash % CAKE_QUEUES; + + /* set-associative hashing */ + /* fast path if no hash collision (direct lookup succeeds) */ + if (likely(q->tags[reduced_hash] == flow_hash && + q->flows[reduced_hash].set)) { + q->way_directs++; + } else { + u32 inner_hash = reduced_hash % CAKE_SET_WAYS; + u32 outer_hash = reduced_hash - inner_hash; + bool allocate_src = false; + bool allocate_dst = false; + u32 i, k; + + /* check if any active queue in the set is reserved for + * this flow. + */ + for (i = 0, k = inner_hash; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (q->tags[outer_hash + k] == flow_hash) { + if (i) + q->way_hits++; + + if (!q->flows[outer_hash + k].set) { + /* need to increment host refcnts */ + allocate_src = cake_dsrc(flow_mode); + allocate_dst = cake_ddst(flow_mode); + } + + goto found; + } + } + + /* no queue is reserved for this flow, look for an + * empty one. + */ + for (i = 0; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (!q->flows[outer_hash + k].set) { + q->way_misses++; + allocate_src = cake_dsrc(flow_mode); + allocate_dst = cake_ddst(flow_mode); + goto found; + } + } + + /* With no empty queues, default to the original + * queue, accept the collision, update the host tags. + */ + q->way_collisions++; + q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--; + q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--; + allocate_src = cake_dsrc(flow_mode); + allocate_dst = cake_ddst(flow_mode); +found: + /* reserve queue for future packets in same flow */ + reduced_hash = outer_hash + k; + q->tags[reduced_hash] = flow_hash; + + if (allocate_src) { + srchost_idx = srchost_hash % CAKE_QUEUES; + inner_hash = srchost_idx % CAKE_SET_WAYS; + outer_hash = srchost_idx - inner_hash; + for (i = 0, k = inner_hash; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (q->hosts[outer_hash + k].srchost_tag == + srchost_hash) + goto found_src; + } + for (i = 0; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (!q->hosts[outer_hash + k].srchost_refcnt) + break; + } + q->hosts[outer_hash + k].srchost_tag = srchost_hash; +found_src: + srchost_idx = outer_hash + k; + q->hosts[srchost_idx].srchost_refcnt++; + q->flows[reduced_hash].srchost = srchost_idx; + } + + if (allocate_dst) { + dsthost_idx = dsthost_hash % CAKE_QUEUES; + inner_hash = dsthost_idx % CAKE_SET_WAYS; + outer_hash = dsthost_idx - inner_hash; + for (i = 0, k = inner_hash; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (q->hosts[outer_hash + k].dsthost_tag == + dsthost_hash) + goto found_dst; + } + for (i = 0; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (!q->hosts[outer_hash + k].dsthost_refcnt) + break; + } + q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; +found_dst: + dsthost_idx = outer_hash + k; + q->hosts[dsthost_idx].dsthost_refcnt++; + q->flows[reduced_hash].dsthost = dsthost_idx; + } + } + + return reduced_hash; +} + +/* helper functions : might be changed when/if skb use a standard list_head */ +/* remove one skb from head of slot queue */ + +static struct sk_buff *dequeue_head(struct cake_flow *flow) +{ + struct sk_buff *skb = flow->head; + + if (skb) { + flow->head = skb->next; + skb->next = NULL; + } + + return skb; +} + +/* add skb to flow queue (tail add) */ + +static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb) +{ + if (!flow->head) + flow->head = skb; + else + flow->tail->next = skb; + flow->tail = skb; + skb->next = NULL; +} + +static struct iphdr *cake_get_iphdr(const struct sk_buff *skb, + struct ipv6hdr *buf) +{ + unsigned int offset = skb_network_offset(skb); + struct iphdr *iph; + + iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf); + + if (!iph) + return NULL; + + if (iph->version == 4 && iph->protocol == IPPROTO_IPV6) + return skb_header_pointer(skb, offset + iph->ihl * 4, + sizeof(struct ipv6hdr), buf); + + else if (iph->version == 4) + return iph; + + else if (iph->version == 6) + return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr), + buf); + + return NULL; +} + +static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb, + void *buf, unsigned int bufsize) +{ + unsigned int offset = skb_network_offset(skb); + const struct ipv6hdr *ipv6h; + const struct tcphdr *tcph; + const struct iphdr *iph; + struct ipv6hdr _ipv6h; + struct tcphdr _tcph; + + ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); + + if (!ipv6h) + return NULL; + + if (ipv6h->version == 4) { + iph = (struct iphdr *)ipv6h; + offset += iph->ihl * 4; + + /* special-case 6in4 tunnelling, as that is a common way to get + * v6 connectivity in the home + */ + if (iph->protocol == IPPROTO_IPV6) { + ipv6h = skb_header_pointer(skb, offset, + sizeof(_ipv6h), &_ipv6h); + + if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) + return NULL; + + offset += sizeof(struct ipv6hdr); + + } else if (iph->protocol != IPPROTO_TCP) { + return NULL; + } + + } else if (ipv6h->version == 6) { + if (ipv6h->nexthdr != IPPROTO_TCP) + return NULL; + + offset += sizeof(struct ipv6hdr); + } else { + return NULL; + } + + tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); + if (!tcph) + return NULL; + + return skb_header_pointer(skb, offset, + min(__tcp_hdrlen(tcph), bufsize), buf); +} + +static const void *cake_get_tcpopt(const struct tcphdr *tcph, + int code, int *oplen) +{ + /* inspired by tcp_parse_options in tcp_input.c */ + int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr); + const u8 *ptr = (const u8 *)(tcph + 1); + + while (length > 0) { + int opcode = *ptr++; + int opsize; + + if (opcode == TCPOPT_EOL) + break; + if (opcode == TCPOPT_NOP) { + length--; + continue; + } + opsize = *ptr++; + if (opsize < 2 || opsize > length) + break; + + if (opcode == code) { + *oplen = opsize; + return ptr; + } + + ptr += opsize - 2; + length -= opsize; + } + + return NULL; +} + +/* Compare two SACK sequences. A sequence is considered greater if it SACKs more + * bytes than the other. In the case where both sequences ACKs bytes that the + * other doesn't, A is considered greater. DSACKs in A also makes A be + * considered greater. + * + * @return -1, 0 or 1 as normal compare functions + */ +static int cake_tcph_sack_compare(const struct tcphdr *tcph_a, + const struct tcphdr *tcph_b) +{ + const struct tcp_sack_block_wire *sack_a, *sack_b; + u32 ack_seq_a = ntohl(tcph_a->ack_seq); + u32 bytes_a = 0, bytes_b = 0; + int oplen_a, oplen_b; + bool first = true; + + sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a); + sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b); + + /* pointers point to option contents */ + oplen_a -= TCPOLEN_SACK_BASE; + oplen_b -= TCPOLEN_SACK_BASE; + + if (sack_a && oplen_a >= sizeof(*sack_a) && + (!sack_b || oplen_b < sizeof(*sack_b))) + return -1; + else if (sack_b && oplen_b >= sizeof(*sack_b) && + (!sack_a || oplen_a < sizeof(*sack_a))) + return 1; + else if ((!sack_a || oplen_a < sizeof(*sack_a)) && + (!sack_b || oplen_b < sizeof(*sack_b))) + return 0; + + while (oplen_a >= sizeof(*sack_a)) { + const struct tcp_sack_block_wire *sack_tmp = sack_b; + u32 start_a = get_unaligned_be32(&sack_a->start_seq); + u32 end_a = get_unaligned_be32(&sack_a->end_seq); + int oplen_tmp = oplen_b; + bool found = false; + + /* DSACK; always considered greater to prevent dropping */ + if (before(start_a, ack_seq_a)) + return -1; + + bytes_a += end_a - start_a; + + while (oplen_tmp >= sizeof(*sack_tmp)) { + u32 start_b = get_unaligned_be32(&sack_tmp->start_seq); + u32 end_b = get_unaligned_be32(&sack_tmp->end_seq); + + /* first time through we count the total size */ + if (first) + bytes_b += end_b - start_b; + + if (!after(start_b, start_a) && !before(end_b, end_a)) { + found = true; + if (!first) + break; + } + oplen_tmp -= sizeof(*sack_tmp); + sack_tmp++; + } + + if (!found) + return -1; + + oplen_a -= sizeof(*sack_a); + sack_a++; + first = false; + } + + /* If we made it this far, all ranges SACKed by A are covered by B, so + * either the SACKs are equal, or B SACKs more bytes. + */ + return bytes_b > bytes_a ? 1 : 0; +} + +static void cake_tcph_get_tstamp(const struct tcphdr *tcph, + u32 *tsval, u32 *tsecr) +{ + const u8 *ptr; + int opsize; + + ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize); + + if (ptr && opsize == TCPOLEN_TIMESTAMP) { + *tsval = get_unaligned_be32(ptr); + *tsecr = get_unaligned_be32(ptr + 4); + } +} + +static bool cake_tcph_may_drop(const struct tcphdr *tcph, + u32 tstamp_new, u32 tsecr_new) +{ + /* inspired by tcp_parse_options in tcp_input.c */ + int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr); + const u8 *ptr = (const u8 *)(tcph + 1); + u32 tstamp, tsecr; + + /* 3 reserved flags must be unset to avoid future breakage + * ACK must be set + * ECE/CWR are handled separately + * All other flags URG/PSH/RST/SYN/FIN must be unset + * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero) + * 0x00C00000 = CWR/ECE (handled separately) + * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000 + */ + if (((tcp_flag_word(tcph) & + cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK)) + return false; + + while (length > 0) { + int opcode = *ptr++; + int opsize; + + if (opcode == TCPOPT_EOL) + break; + if (opcode == TCPOPT_NOP) { + length--; + continue; + } + opsize = *ptr++; + if (opsize < 2 || opsize > length) + break; + + switch (opcode) { + case TCPOPT_MD5SIG: /* doesn't influence state */ + break; + + case TCPOPT_SACK: /* stricter checking performed later */ + if (opsize % 8 != 2) + return false; + break; + + case TCPOPT_TIMESTAMP: + /* only drop timestamps lower than new */ + if (opsize != TCPOLEN_TIMESTAMP) + return false; + tstamp = get_unaligned_be32(ptr); + tsecr = get_unaligned_be32(ptr + 4); + if (after(tstamp, tstamp_new) || + after(tsecr, tsecr_new)) + return false; + break; + + case TCPOPT_MSS: /* these should only be set on SYN */ + case TCPOPT_WINDOW: + case TCPOPT_SACK_PERM: + case TCPOPT_FASTOPEN: + case TCPOPT_EXP: + default: /* don't drop if any unknown options are present */ + return false; + } + + ptr += opsize - 2; + length -= opsize; + } + + return true; +} + +static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, + struct cake_flow *flow) +{ + bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; + struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL; + struct sk_buff *skb_check, *skb_prev = NULL; + const struct ipv6hdr *ipv6h, *ipv6h_check; + unsigned char _tcph[64], _tcph_check[64]; + const struct tcphdr *tcph, *tcph_check; + const struct iphdr *iph, *iph_check; + struct ipv6hdr _iph, _iph_check; + const struct sk_buff *skb; + int seglen, num_found = 0; + u32 tstamp = 0, tsecr = 0; + __be32 elig_flags = 0; + int sack_comp; + + /* no other possible ACKs to filter */ + if (flow->head == flow->tail) + return NULL; + + skb = flow->tail; + tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph)); + iph = cake_get_iphdr(skb, &_iph); + if (!tcph) + return NULL; + + cake_tcph_get_tstamp(tcph, &tstamp, &tsecr); + + /* the 'triggering' packet need only have the ACK flag set. + * also check that SYN is not set, as there won't be any previous ACKs. + */ + if ((tcp_flag_word(tcph) & + (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK) + return NULL; + + /* the 'triggering' ACK is at the tail of the queue, we have already + * returned if it is the only packet in the flow. loop through the rest + * of the queue looking for pure ACKs with the same 5-tuple as the + * triggering one. + */ + for (skb_check = flow->head; + skb_check && skb_check != skb; + skb_prev = skb_check, skb_check = skb_check->next) { + iph_check = cake_get_iphdr(skb_check, &_iph_check); + tcph_check = cake_get_tcphdr(skb_check, &_tcph_check, + sizeof(_tcph_check)); + + /* only TCP packets with matching 5-tuple are eligible, and only + * drop safe headers + */ + if (!tcph_check || iph->version != iph_check->version || + tcph_check->source != tcph->source || + tcph_check->dest != tcph->dest) + continue; + + if (iph_check->version == 4) { + if (iph_check->saddr != iph->saddr || + iph_check->daddr != iph->daddr) + continue; + + seglen = ntohs(iph_check->tot_len) - + (4 * iph_check->ihl); + } else if (iph_check->version == 6) { + ipv6h = (struct ipv6hdr *)iph; + ipv6h_check = (struct ipv6hdr *)iph_check; + + if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) || + ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr)) + continue; + + seglen = ntohs(ipv6h_check->payload_len); + } else { + WARN_ON(1); /* shouldn't happen */ + continue; + } + + /* If the ECE/CWR flags changed from the previous eligible + * packet in the same flow, we should no longer be dropping that + * previous packet as this would lose information. + */ + if (elig_ack && (tcp_flag_word(tcph_check) & + (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) { + elig_ack = NULL; + elig_ack_prev = NULL; + num_found--; + } + + /* Check TCP options and flags, don't drop ACKs with segment + * data, and don't drop ACKs with a higher cumulative ACK + * counter than the triggering packet. Check ACK seqno here to + * avoid parsing SACK options of packets we are going to exclude + * anyway. + */ + if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) || + (seglen - __tcp_hdrlen(tcph_check)) != 0 || + after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq))) + continue; + + /* Check SACK options. The triggering packet must SACK more data + * than the ACK under consideration, or SACK the same range but + * have a larger cumulative ACK counter. The latter is a + * pathological case, but is contained in the following check + * anyway, just to be safe. + */ + sack_comp = cake_tcph_sack_compare(tcph_check, tcph); + + if (sack_comp < 0 || + (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) && + sack_comp == 0)) + continue; + + /* At this point we have found an eligible pure ACK to drop; if + * we are in aggressive mode, we are done. Otherwise, keep + * searching unless this is the second eligible ACK we + * found. + * + * Since we want to drop ACK closest to the head of the queue, + * save the first eligible ACK we find, even if we need to loop + * again. + */ + if (!elig_ack) { + elig_ack = skb_check; + elig_ack_prev = skb_prev; + elig_flags = (tcp_flag_word(tcph_check) + & (TCP_FLAG_ECE | TCP_FLAG_CWR)); + } + + if (num_found++ > 0) + goto found; + } + + /* We made it through the queue without finding two eligible ACKs . If + * we found a single eligible ACK we can drop it in aggressive mode if + * we can guarantee that this does not interfere with ECN flag + * information. We ensure this by dropping it only if the enqueued + * packet is consecutive with the eligible ACK, and their flags match. + */ + if (elig_ack && aggressive && elig_ack->next == skb && + (elig_flags == (tcp_flag_word(tcph) & + (TCP_FLAG_ECE | TCP_FLAG_CWR)))) + goto found; + + return NULL; + +found: + if (elig_ack_prev) + elig_ack_prev->next = elig_ack->next; + else + flow->head = elig_ack->next; + + elig_ack->next = NULL; + + return elig_ack; +} + +static u64 cake_ewma(u64 avg, u64 sample, u32 shift) +{ + avg -= avg >> shift; + avg += sample >> shift; + return avg; +} + +static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) +{ + if (q->rate_flags & CAKE_FLAG_OVERHEAD) + len -= off; + + if (q->max_netlen < len) + q->max_netlen = len; + if (q->min_netlen > len) + q->min_netlen = len; + + len += q->rate_overhead; + + if (len < q->rate_mpu) + len = q->rate_mpu; + + if (q->atm_mode == CAKE_ATM_ATM) { + len += 47; + len /= 48; + len *= 53; + } else if (q->atm_mode == CAKE_ATM_PTM) { + /* Add one byte per 64 bytes or part thereof. + * This is conservative and easier to calculate than the + * precise value. + */ + len += (len + 63) / 64; + } + + if (q->max_adjlen < len) + q->max_adjlen = len; + if (q->min_adjlen > len) + q->min_adjlen = len; + + return len; +} + +static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + unsigned int hdr_len, last_len = 0; + u32 off = skb_network_offset(skb); + u32 len = qdisc_pkt_len(skb); + u16 segs = 1; + + q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); + + if (!shinfo->gso_size) + return cake_calc_overhead(q, len, off); + + /* borrowed from qdisc_pkt_len_init() */ + hdr_len = skb_transport_header(skb) - skb_mac_header(skb); + + /* + transport layer */ + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | + SKB_GSO_TCPV6))) { + const struct tcphdr *th; + struct tcphdr _tcphdr; + + th = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_tcphdr), &_tcphdr); + if (likely(th)) + hdr_len += __tcp_hdrlen(th); + } else { + struct udphdr _udphdr; + + if (skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_udphdr), &_udphdr)) + hdr_len += sizeof(struct udphdr); + } + + if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) + segs = DIV_ROUND_UP(skb->len - hdr_len, + shinfo->gso_size); + else + segs = shinfo->gso_segs; + + len = shinfo->gso_size + hdr_len; + last_len = skb->len - shinfo->gso_size * (segs - 1); + + return (cake_calc_overhead(q, len, off) * (segs - 1) + + cake_calc_overhead(q, last_len, off)); +} + +static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) +{ + struct cake_heap_entry ii = q->overflow_heap[i]; + struct cake_heap_entry jj = q->overflow_heap[j]; + + q->overflow_heap[i] = jj; + q->overflow_heap[j] = ii; + + q->tins[ii.t].overflow_idx[ii.b] = j; + q->tins[jj.t].overflow_idx[jj.b] = i; +} + +static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i) +{ + struct cake_heap_entry ii = q->overflow_heap[i]; + + return q->tins[ii.t].backlogs[ii.b]; +} + +static void cake_heapify(struct cake_sched_data *q, u16 i) +{ + static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES; + u32 mb = cake_heap_get_backlog(q, i); + u32 m = i; + + while (m < a) { + u32 l = m + m + 1; + u32 r = l + 1; + + if (l < a) { + u32 lb = cake_heap_get_backlog(q, l); + + if (lb > mb) { + m = l; + mb = lb; + } + } + + if (r < a) { + u32 rb = cake_heap_get_backlog(q, r); + + if (rb > mb) { + m = r; + mb = rb; + } + } + + if (m != i) { + cake_heap_swap(q, i, m); + i = m; + } else { + break; + } + } +} + +static void cake_heapify_up(struct cake_sched_data *q, u16 i) +{ + while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) { + u16 p = (i - 1) >> 1; + u32 ib = cake_heap_get_backlog(q, i); + u32 pb = cake_heap_get_backlog(q, p); + + if (ib > pb) { + cake_heap_swap(q, i, p); + i = p; + } else { + break; + } + } +} + +static int cake_advance_shaper(struct cake_sched_data *q, + struct cake_tin_data *b, + struct sk_buff *skb, + ktime_t now, bool drop) +{ + u32 len = get_cobalt_cb(skb)->adjusted_len; + + /* charge packet bandwidth to this tin + * and to the global shaper. + */ + if (q->rate_ns) { + u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft; + u64 global_dur = (len * q->rate_ns) >> q->rate_shft; + u64 failsafe_dur = global_dur + (global_dur >> 1); + + if (ktime_before(b->time_next_packet, now)) + b->time_next_packet = ktime_add_ns(b->time_next_packet, + tin_dur); + + else if (ktime_before(b->time_next_packet, + ktime_add_ns(now, tin_dur))) + b->time_next_packet = ktime_add_ns(now, tin_dur); + + q->time_next_packet = ktime_add_ns(q->time_next_packet, + global_dur); + if (!drop) + q->failsafe_next_packet = \ + ktime_add_ns(q->failsafe_next_packet, + failsafe_dur); + } + return len; +} + +static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) +{ + struct cake_sched_data *q = qdisc_priv(sch); + ktime_t now = ktime_get(); + u32 idx = 0, tin = 0, len; + struct cake_heap_entry qq; + struct cake_tin_data *b; + struct cake_flow *flow; + struct sk_buff *skb; + + if (!q->overflow_timeout) { + int i; + /* Build fresh max-heap */ + for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--) + cake_heapify(q, i); + } + q->overflow_timeout = 65535; + + /* select longest queue for pruning */ + qq = q->overflow_heap[0]; + tin = qq.t; + idx = qq.b; + + b = &q->tins[tin]; + flow = &b->flows[idx]; + skb = dequeue_head(flow); + if (unlikely(!skb)) { + /* heap has gone wrong, rebuild it next time */ + q->overflow_timeout = 0; + return idx + (tin << 16); + } + + if (cobalt_queue_full(&flow->cvars, &b->cparams, now)) + b->unresponsive_flow_count++; + + len = qdisc_pkt_len(skb); + q->buffer_used -= skb->truesize; + b->backlogs[idx] -= len; + b->tin_backlog -= len; + sch->qstats.backlog -= len; + qdisc_tree_reduce_backlog(sch, 1, len); + + flow->dropped++; + b->tin_dropped++; + sch->qstats.drops++; + + if (q->rate_flags & CAKE_FLAG_INGRESS) + cake_advance_shaper(q, b, skb, now, true); + + __qdisc_drop(skb, to_free); + sch->q.qlen--; + + cake_heapify(q, 0); + + return idx + (tin << 16); +} + +static void cake_wash_diffserv(struct sk_buff *skb) +{ + switch (skb->protocol) { + case htons(ETH_P_IP): + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + break; + case htons(ETH_P_IPV6): + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); + break; + default: + break; + } +} + +static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) +{ + u8 dscp; + + switch (skb->protocol) { + case htons(ETH_P_IP): + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + if (wash && dscp) + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + return dscp; + + case htons(ETH_P_IPV6): + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + if (wash && dscp) + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); + return dscp; + + case htons(ETH_P_ARP): + return 0x38; /* CS7 - Net Control */ + + default: + /* If there is no Diffserv field, treat as best-effort */ + return 0; + } +} + +static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, + struct sk_buff *skb) +{ + struct cake_sched_data *q = qdisc_priv(sch); + u32 tin; + + if (TC_H_MAJ(skb->priority) == sch->handle && + TC_H_MIN(skb->priority) > 0 && + TC_H_MIN(skb->priority) <= q->tin_cnt) { + tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; + + if (q->rate_flags & CAKE_FLAG_WASH) + cake_wash_diffserv(skb); + } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) { + /* extract the Diffserv Precedence field, if it exists */ + /* and clear DSCP bits if washing */ + tin = q->tin_index[cake_handle_diffserv(skb, + q->rate_flags & CAKE_FLAG_WASH)]; + if (unlikely(tin >= q->tin_cnt)) + tin = 0; + } else { + tin = 0; + if (q->rate_flags & CAKE_FLAG_WASH) + cake_wash_diffserv(skb); + } + + return &q->tins[tin]; +} + +static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, + struct sk_buff *skb, int flow_mode, int *qerr) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct tcf_proto *filter; + struct tcf_result res; + u32 flow = 0; + int result; + + filter = rcu_dereference_bh(q->filter_list); + if (!filter) + goto hash; + + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + result = tcf_classify(skb, filter, &res, false); + + if (result >= 0) { +#ifdef CONFIG_NET_CLS_ACT + switch (result) { + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + case TC_ACT_TRAP: + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; + /* fall through */ + case TC_ACT_SHOT: + return 0; + } +#endif + if (TC_H_MIN(res.classid) <= CAKE_QUEUES) + flow = TC_H_MIN(res.classid); + } +hash: + *t = cake_select_tin(sch, skb); + return flow ?: cake_hash(*t, skb, flow_mode) + 1; +} + +static void cake_reconfigure(struct Qdisc *sch); + +static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + struct cake_sched_data *q = qdisc_priv(sch); + int len = qdisc_pkt_len(skb); + int uninitialized_var(ret); + struct sk_buff *ack = NULL; + ktime_t now = ktime_get(); + struct cake_tin_data *b; + struct cake_flow *flow; + u32 idx; + + /* choose flow to insert into */ + idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); + if (idx == 0) { + if (ret & __NET_XMIT_BYPASS) + qdisc_qstats_drop(sch); + __qdisc_drop(skb, to_free); + return ret; + } + idx--; + flow = &b->flows[idx]; + + /* ensure shaper state isn't stale */ + if (!b->tin_backlog) { + if (ktime_before(b->time_next_packet, now)) + b->time_next_packet = now; + + if (!sch->q.qlen) { + if (ktime_before(q->time_next_packet, now)) { + q->failsafe_next_packet = now; + q->time_next_packet = now; + } else if (ktime_after(q->time_next_packet, now) && + ktime_after(q->failsafe_next_packet, now)) { + u64 next = \ + min(ktime_to_ns(q->time_next_packet), + ktime_to_ns( + q->failsafe_next_packet)); + sch->qstats.overlimits++; + qdisc_watchdog_schedule_ns(&q->watchdog, next); + } + } + } + + if (unlikely(len > b->max_skblen)) + b->max_skblen = len; + + if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { + struct sk_buff *segs, *nskb; + netdev_features_t features = netif_skb_features(skb); + unsigned int slen = 0; + + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); + if (IS_ERR_OR_NULL(segs)) + return qdisc_drop(skb, sch, to_free); + + while (segs) { + nskb = segs->next; + segs->next = NULL; + qdisc_skb_cb(segs)->pkt_len = segs->len; + cobalt_set_enqueue_time(segs, now); + get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, + segs); + flow_queue_add(flow, segs); + + sch->q.qlen++; + slen += segs->len; + q->buffer_used += segs->truesize; + b->packets++; + segs = nskb; + } + + /* stats */ + b->bytes += slen; + b->backlogs[idx] += slen; + b->tin_backlog += slen; + sch->qstats.backlog += slen; + q->avg_window_bytes += slen; + + qdisc_tree_reduce_backlog(sch, 1, len); + consume_skb(skb); + } else { + /* not splitting */ + cobalt_set_enqueue_time(skb, now); + get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); + flow_queue_add(flow, skb); + + if (q->ack_filter) + ack = cake_ack_filter(q, flow); + + if (ack) { + b->ack_drops++; + sch->qstats.drops++; + b->bytes += qdisc_pkt_len(ack); + len -= qdisc_pkt_len(ack); + q->buffer_used += skb->truesize - ack->truesize; + if (q->rate_flags & CAKE_FLAG_INGRESS) + cake_advance_shaper(q, b, ack, now, true); + + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); + consume_skb(ack); + } else { + sch->q.qlen++; + q->buffer_used += skb->truesize; + } + + /* stats */ + b->packets++; + b->bytes += len; + b->backlogs[idx] += len; + b->tin_backlog += len; + sch->qstats.backlog += len; + q->avg_window_bytes += len; + } + + if (q->overflow_timeout) + cake_heapify_up(q, b->overflow_idx[idx]); + + /* incoming bandwidth capacity estimate */ + if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { + u64 packet_interval = \ + ktime_to_ns(ktime_sub(now, q->last_packet_time)); + + if (packet_interval > NSEC_PER_SEC) + packet_interval = NSEC_PER_SEC; + + /* filter out short-term bursts, eg. wifi aggregation */ + q->avg_packet_interval = \ + cake_ewma(q->avg_packet_interval, + packet_interval, + (packet_interval > q->avg_packet_interval ? + 2 : 8)); + + q->last_packet_time = now; + + if (packet_interval > q->avg_packet_interval) { + u64 window_interval = \ + ktime_to_ns(ktime_sub(now, + q->avg_window_begin)); + u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; + + do_div(b, window_interval); + q->avg_peak_bandwidth = + cake_ewma(q->avg_peak_bandwidth, b, + b > q->avg_peak_bandwidth ? 2 : 8); + q->avg_window_bytes = 0; + q->avg_window_begin = now; + + if (ktime_after(now, + ktime_add_ms(q->last_reconfig_time, + 250))) { + q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; + cake_reconfigure(sch); + } + } + } else { + q->avg_window_bytes = 0; + q->last_packet_time = now; + } + + /* flowchain */ + if (!flow->set || flow->set == CAKE_SET_DECAYING) { + struct cake_host *srchost = &b->hosts[flow->srchost]; + struct cake_host *dsthost = &b->hosts[flow->dsthost]; + u16 host_load = 1; + + if (!flow->set) { + list_add_tail(&flow->flowchain, &b->new_flows); + } else { + b->decaying_flow_count--; + list_move_tail(&flow->flowchain, &b->new_flows); + } + flow->set = CAKE_SET_SPARSE; + b->sparse_flow_count++; + + if (cake_dsrc(q->flow_mode)) + host_load = max(host_load, srchost->srchost_refcnt); + + if (cake_ddst(q->flow_mode)) + host_load = max(host_load, dsthost->dsthost_refcnt); + + flow->deficit = (b->flow_quantum * + quantum_div[host_load]) >> 16; + } else if (flow->set == CAKE_SET_SPARSE_WAIT) { + /* this flow was empty, accounted as a sparse flow, but actually + * in the bulk rotation. + */ + flow->set = CAKE_SET_BULK; + b->sparse_flow_count--; + b->bulk_flow_count++; + } + + if (q->buffer_used > q->buffer_max_used) + q->buffer_max_used = q->buffer_used; + + if (q->buffer_used > q->buffer_limit) { + u32 dropped = 0; + + while (q->buffer_used > q->buffer_limit) { + dropped++; + cake_drop(sch, to_free); + } + b->drop_overlimit += dropped; + } + return NET_XMIT_SUCCESS; +} + +static struct sk_buff *cake_dequeue_one(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct cake_tin_data *b = &q->tins[q->cur_tin]; + struct cake_flow *flow = &b->flows[q->cur_flow]; + struct sk_buff *skb = NULL; + u32 len; + + if (flow->head) { + skb = dequeue_head(flow); + len = qdisc_pkt_len(skb); + b->backlogs[q->cur_flow] -= len; + b->tin_backlog -= len; + sch->qstats.backlog -= len; + q->buffer_used -= skb->truesize; + sch->q.qlen--; + + if (q->overflow_timeout) + cake_heapify(q, b->overflow_idx[q->cur_flow]); + } + return skb; +} + +/* Discard leftover packets from a tin no longer in use. */ +static void cake_clear_tin(struct Qdisc *sch, u16 tin) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + + q->cur_tin = tin; + for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) + while (!!(skb = cake_dequeue_one(sch))) + kfree_skb(skb); +} + +static struct sk_buff *cake_dequeue(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct cake_tin_data *b = &q->tins[q->cur_tin]; + struct cake_host *srchost, *dsthost; + ktime_t now = ktime_get(); + struct cake_flow *flow; + struct list_head *head; + bool first_flow = true; + struct sk_buff *skb; + u16 host_load; + u64 delay; + u32 len; + +begin: + if (!sch->q.qlen) + return NULL; + + /* global hard shaper */ + if (ktime_after(q->time_next_packet, now) && + ktime_after(q->failsafe_next_packet, now)) { + u64 next = min(ktime_to_ns(q->time_next_packet), + ktime_to_ns(q->failsafe_next_packet)); + + sch->qstats.overlimits++; + qdisc_watchdog_schedule_ns(&q->watchdog, next); + return NULL; + } + + /* Choose a class to work on. */ + if (!q->rate_ns) { + /* In unlimited mode, can't rely on shaper timings, just balance + * with DRR + */ + bool wrapped = false, empty = true; + + while (b->tin_deficit < 0 || + !(b->sparse_flow_count + b->bulk_flow_count)) { + if (b->tin_deficit <= 0) + b->tin_deficit += b->tin_quantum_band; + if (b->sparse_flow_count + b->bulk_flow_count) + empty = false; + + q->cur_tin++; + b++; + if (q->cur_tin >= q->tin_cnt) { + q->cur_tin = 0; + b = q->tins; + + if (wrapped) { + /* It's possible for q->qlen to be + * nonzero when we actually have no + * packets anywhere. + */ + if (empty) + return NULL; + } else { + wrapped = true; + } + } + } + } else { + /* In shaped mode, choose: + * - Highest-priority tin with queue and meeting schedule, or + * - The earliest-scheduled tin with queue. + */ + ktime_t best_time = KTIME_MAX; + int tin, best_tin = 0; + + for (tin = 0; tin < q->tin_cnt; tin++) { + b = q->tins + tin; + if ((b->sparse_flow_count + b->bulk_flow_count) > 0) { + ktime_t time_to_pkt = \ + ktime_sub(b->time_next_packet, now); + + if (ktime_to_ns(time_to_pkt) <= 0 || + ktime_compare(time_to_pkt, + best_time) <= 0) { + best_time = time_to_pkt; + best_tin = tin; + } + } + } + + q->cur_tin = best_tin; + b = q->tins + best_tin; + + /* No point in going further if no packets to deliver. */ + if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count))) + return NULL; + } + +retry: + /* service this class */ + head = &b->decaying_flows; + if (!first_flow || list_empty(head)) { + head = &b->new_flows; + if (list_empty(head)) { + head = &b->old_flows; + if (unlikely(list_empty(head))) { + head = &b->decaying_flows; + if (unlikely(list_empty(head))) + goto begin; + } + } + } + flow = list_first_entry(head, struct cake_flow, flowchain); + q->cur_flow = flow - b->flows; + first_flow = false; + + /* triple isolation (modified DRR++) */ + srchost = &b->hosts[flow->srchost]; + dsthost = &b->hosts[flow->dsthost]; + host_load = 1; + + if (cake_dsrc(q->flow_mode)) + host_load = max(host_load, srchost->srchost_refcnt); + + if (cake_ddst(q->flow_mode)) + host_load = max(host_load, dsthost->dsthost_refcnt); + + WARN_ON(host_load > CAKE_QUEUES); + + /* flow isolation (DRR++) */ + if (flow->deficit <= 0) { + /* The shifted prandom_u32() is a way to apply dithering to + * avoid accumulating roundoff errors + */ + flow->deficit += (b->flow_quantum * quantum_div[host_load] + + (prandom_u32() >> 16)) >> 16; + list_move_tail(&flow->flowchain, &b->old_flows); + + /* Keep all flows with deficits out of the sparse and decaying + * rotations. No non-empty flow can go into the decaying + * rotation, so they can't get deficits + */ + if (flow->set == CAKE_SET_SPARSE) { + if (flow->head) { + b->sparse_flow_count--; + b->bulk_flow_count++; + flow->set = CAKE_SET_BULK; + } else { + /* we've moved it to the bulk rotation for + * correct deficit accounting but we still want + * to count it as a sparse flow, not a bulk one. + */ + flow->set = CAKE_SET_SPARSE_WAIT; + } + } + goto retry; + } + + /* Retrieve a packet via the AQM */ + while (1) { + skb = cake_dequeue_one(sch); + if (!skb) { + /* this queue was actually empty */ + if (cobalt_queue_empty(&flow->cvars, &b->cparams, now)) + b->unresponsive_flow_count--; + + if (flow->cvars.p_drop || flow->cvars.count || + ktime_before(now, flow->cvars.drop_next)) { + /* keep in the flowchain until the state has + * decayed to rest + */ + list_move_tail(&flow->flowchain, + &b->decaying_flows); + if (flow->set == CAKE_SET_BULK) { + b->bulk_flow_count--; + b->decaying_flow_count++; + } else if (flow->set == CAKE_SET_SPARSE || + flow->set == CAKE_SET_SPARSE_WAIT) { + b->sparse_flow_count--; + b->decaying_flow_count++; + } + flow->set = CAKE_SET_DECAYING; + } else { + /* remove empty queue from the flowchain */ + list_del_init(&flow->flowchain); + if (flow->set == CAKE_SET_SPARSE || + flow->set == CAKE_SET_SPARSE_WAIT) + b->sparse_flow_count--; + else if (flow->set == CAKE_SET_BULK) + b->bulk_flow_count--; + else + b->decaying_flow_count--; + + flow->set = CAKE_SET_NONE; + srchost->srchost_refcnt--; + dsthost->dsthost_refcnt--; + } + goto begin; + } + + /* Last packet in queue may be marked, shouldn't be dropped */ + if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb, + (b->bulk_flow_count * + !!(q->rate_flags & + CAKE_FLAG_INGRESS))) || + !flow->head) + break; + + /* drop this packet, get another one */ + if (q->rate_flags & CAKE_FLAG_INGRESS) { + len = cake_advance_shaper(q, b, skb, + now, true); + flow->deficit -= len; + b->tin_deficit -= len; + } + flow->dropped++; + b->tin_dropped++; + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); + qdisc_qstats_drop(sch); + kfree_skb(skb); + if (q->rate_flags & CAKE_FLAG_INGRESS) + goto retry; + } + + b->tin_ecn_mark += !!flow->cvars.ecn_marked; + qdisc_bstats_update(sch, skb); + + /* collect delay stats */ + delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb))); + b->avge_delay = cake_ewma(b->avge_delay, delay, 8); + b->peak_delay = cake_ewma(b->peak_delay, delay, + delay > b->peak_delay ? 2 : 8); + b->base_delay = cake_ewma(b->base_delay, delay, + delay < b->base_delay ? 2 : 8); + + len = cake_advance_shaper(q, b, skb, now, false); + flow->deficit -= len; + b->tin_deficit -= len; + + if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { + u64 next = min(ktime_to_ns(q->time_next_packet), + ktime_to_ns(q->failsafe_next_packet)); + + qdisc_watchdog_schedule_ns(&q->watchdog, next); + } else if (!sch->q.qlen) { + int i; + + for (i = 0; i < q->tin_cnt; i++) { + if (q->tins[i].decaying_flow_count) { + ktime_t next = \ + ktime_add_ns(now, + q->tins[i].cparams.target); + + qdisc_watchdog_schedule_ns(&q->watchdog, + ktime_to_ns(next)); + break; + } + } + } + + if (q->overflow_timeout) + q->overflow_timeout--; + + return skb; +} + +static void cake_reset(struct Qdisc *sch) +{ + u32 c; + + for (c = 0; c < CAKE_MAX_TINS; c++) + cake_clear_tin(sch, c); +} + +static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = { + [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 }, + [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 }, + [TCA_CAKE_ATM] = { .type = NLA_U32 }, + [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 }, + [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 }, + [TCA_CAKE_RTT] = { .type = NLA_U32 }, + [TCA_CAKE_TARGET] = { .type = NLA_U32 }, + [TCA_CAKE_AUTORATE] = { .type = NLA_U32 }, + [TCA_CAKE_MEMORY] = { .type = NLA_U32 }, + [TCA_CAKE_NAT] = { .type = NLA_U32 }, + [TCA_CAKE_RAW] = { .type = NLA_U32 }, + [TCA_CAKE_WASH] = { .type = NLA_U32 }, + [TCA_CAKE_MPU] = { .type = NLA_U32 }, + [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, + [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, +}; + +static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, + u64 target_ns, u64 rtt_est_ns) +{ + /* convert byte-rate into time-per-byte + * so it will always unwedge in reasonable time. + */ + static const u64 MIN_RATE = 64; + u32 byte_target = mtu; + u64 byte_target_ns; + u8 rate_shft = 0; + u64 rate_ns = 0; + + b->flow_quantum = 1514; + if (rate) { + b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL); + rate_shft = 34; + rate_ns = ((u64)NSEC_PER_SEC) << rate_shft; + rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate)); + while (!!(rate_ns >> 34)) { + rate_ns >>= 1; + rate_shft--; + } + } /* else unlimited, ie. zero delay */ + + b->tin_rate_bps = rate; + b->tin_rate_ns = rate_ns; + b->tin_rate_shft = rate_shft; + + byte_target_ns = (byte_target * rate_ns) >> rate_shft; + + b->cparams.target = max((byte_target_ns * 3) / 2, target_ns); + b->cparams.interval = max(rtt_est_ns + + b->cparams.target - target_ns, + b->cparams.target * 2); + b->cparams.mtu_time = byte_target_ns; + b->cparams.p_inc = 1 << 24; /* 1/256 */ + b->cparams.p_dec = 1 << 20; /* 1/4096 */ +} + +static int cake_config_besteffort(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct cake_tin_data *b = &q->tins[0]; + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + + q->tin_cnt = 1; + + q->tin_index = besteffort; + q->tin_order = normal_order; + + cake_set_rate(b, rate, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + b->tin_quantum_band = 65535; + b->tin_quantum_prio = 65535; + + return 0; +} + +static int cake_config_precedence(struct Qdisc *sch) +{ + /* convert high-level (user visible) parameters into internal format */ + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum1 = 256; + u32 quantum2 = 256; + u32 i; + + q->tin_cnt = 8; + q->tin_index = precedence; + q->tin_order = normal_order; + + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[i]; + + cake_set_rate(b, rate, mtu, us_to_ns(q->target), + us_to_ns(q->interval)); + + b->tin_quantum_prio = max_t(u16, 1U, quantum1); + b->tin_quantum_band = max_t(u16, 1U, quantum2); + + /* calculate next class's parameters */ + rate *= 7; + rate >>= 3; + + quantum1 *= 3; + quantum1 >>= 1; + + quantum2 *= 7; + quantum2 >>= 3; + } + + return 0; +} + +/* List of known Diffserv codepoints: + * + * Least Effort (CS1) + * Best Effort (CS0) + * Max Reliability & LLT "Lo" (TOS1) + * Max Throughput (TOS2) + * Min Delay (TOS4) + * LLT "La" (TOS5) + * Assured Forwarding 1 (AF1x) - x3 + * Assured Forwarding 2 (AF2x) - x3 + * Assured Forwarding 3 (AF3x) - x3 + * Assured Forwarding 4 (AF4x) - x3 + * Precedence Class 2 (CS2) + * Precedence Class 3 (CS3) + * Precedence Class 4 (CS4) + * Precedence Class 5 (CS5) + * Precedence Class 6 (CS6) + * Precedence Class 7 (CS7) + * Voice Admit (VA) + * Expedited Forwarding (EF) + + * Total 25 codepoints. + */ + +/* List of traffic classes in RFC 4594: + * (roughly descending order of contended priority) + * (roughly ascending order of uncontended throughput) + * + * Network Control (CS6,CS7) - routing traffic + * Telephony (EF,VA) - aka. VoIP streams + * Signalling (CS5) - VoIP setup + * Multimedia Conferencing (AF4x) - aka. video calls + * Realtime Interactive (CS4) - eg. games + * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch + * Broadcast Video (CS3) + * Low Latency Data (AF2x,TOS4) - eg. database + * Ops, Admin, Management (CS2,TOS1) - eg. ssh + * Standard Service (CS0 & unrecognised codepoints) + * High Throughput Data (AF1x,TOS2) - eg. web traffic + * Low Priority Data (CS1) - eg. BitTorrent + + * Total 12 traffic classes. + */ + +static int cake_config_diffserv8(struct Qdisc *sch) +{ +/* Pruned list of traffic classes for typical applications: + * + * Network Control (CS6, CS7) + * Minimum Latency (EF, VA, CS5, CS4) + * Interactive Shell (CS2, TOS1) + * Low Latency Transactions (AF2x, TOS4) + * Video Streaming (AF4x, AF3x, CS3) + * Bog Standard (CS0 etc.) + * High Throughput (AF1x, TOS2) + * Background Traffic (CS1) + * + * Total 8 traffic classes. + */ + + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum1 = 256; + u32 quantum2 = 256; + u32 i; + + q->tin_cnt = 8; + + /* codepoint to class mapping */ + q->tin_index = diffserv8; + q->tin_order = normal_order; + + /* class characteristics */ + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[i]; + + cake_set_rate(b, rate, mtu, us_to_ns(q->target), + us_to_ns(q->interval)); + + b->tin_quantum_prio = max_t(u16, 1U, quantum1); + b->tin_quantum_band = max_t(u16, 1U, quantum2); + + /* calculate next class's parameters */ + rate *= 7; + rate >>= 3; + + quantum1 *= 3; + quantum1 >>= 1; + + quantum2 *= 7; + quantum2 >>= 3; + } + + return 0; +} + +static int cake_config_diffserv4(struct Qdisc *sch) +{ +/* Further pruned list of traffic classes for four-class system: + * + * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4) + * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1) + * Best Effort (CS0, AF1x, TOS2, and those not specified) + * Background Traffic (CS1) + * + * Total 4 traffic classes. + */ + + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum = 1024; + + q->tin_cnt = 4; + + /* codepoint to class mapping */ + q->tin_index = diffserv4; + q->tin_order = bulk_order; + + /* class characteristics */ + cake_set_rate(&q->tins[0], rate, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[1], rate >> 4, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[2], rate >> 1, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[3], rate >> 2, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + + /* priority weights */ + q->tins[0].tin_quantum_prio = quantum; + q->tins[1].tin_quantum_prio = quantum >> 4; + q->tins[2].tin_quantum_prio = quantum << 2; + q->tins[3].tin_quantum_prio = quantum << 4; + + /* bandwidth-sharing weights */ + q->tins[0].tin_quantum_band = quantum; + q->tins[1].tin_quantum_band = quantum >> 4; + q->tins[2].tin_quantum_band = quantum >> 1; + q->tins[3].tin_quantum_band = quantum >> 2; + + return 0; +} + +static int cake_config_diffserv3(struct Qdisc *sch) +{ +/* Simplified Diffserv structure with 3 tins. + * Low Priority (CS1) + * Best Effort + * Latency Sensitive (TOS4, VA, EF, CS6, CS7) + */ + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum = 1024; + + q->tin_cnt = 3; + + /* codepoint to class mapping */ + q->tin_index = diffserv3; + q->tin_order = bulk_order; + + /* class characteristics */ + cake_set_rate(&q->tins[0], rate, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[1], rate >> 4, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[2], rate >> 2, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + + /* priority weights */ + q->tins[0].tin_quantum_prio = quantum; + q->tins[1].tin_quantum_prio = quantum >> 4; + q->tins[2].tin_quantum_prio = quantum << 4; + + /* bandwidth-sharing weights */ + q->tins[0].tin_quantum_band = quantum; + q->tins[1].tin_quantum_band = quantum >> 4; + q->tins[2].tin_quantum_band = quantum >> 2; + + return 0; +} + +static void cake_reconfigure(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + int c, ft; + + switch (q->tin_mode) { + case CAKE_DIFFSERV_BESTEFFORT: + ft = cake_config_besteffort(sch); + break; + + case CAKE_DIFFSERV_PRECEDENCE: + ft = cake_config_precedence(sch); + break; + + case CAKE_DIFFSERV_DIFFSERV8: + ft = cake_config_diffserv8(sch); + break; + + case CAKE_DIFFSERV_DIFFSERV4: + ft = cake_config_diffserv4(sch); + break; + + case CAKE_DIFFSERV_DIFFSERV3: + default: + ft = cake_config_diffserv3(sch); + break; + } + + for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { + cake_clear_tin(sch, c); + q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; + } + + q->rate_ns = q->tins[ft].tin_rate_ns; + q->rate_shft = q->tins[ft].tin_rate_shft; + + if (q->buffer_config_limit) { + q->buffer_limit = q->buffer_config_limit; + } else if (q->rate_bps) { + u64 t = q->rate_bps * q->interval; + + do_div(t, USEC_PER_SEC / 4); + q->buffer_limit = max_t(u32, t, 4U << 20); + } else { + q->buffer_limit = ~0; + } + + sch->flags &= ~TCQ_F_CAN_BYPASS; + + q->buffer_limit = min(q->buffer_limit, + max(sch->limit * psched_mtu(qdisc_dev(sch)), + q->buffer_config_limit)); +} + +static int cake_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_CAKE_MAX + 1]; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack); + if (err < 0) + return err; + + if (tb[TCA_CAKE_NAT]) { +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + q->flow_mode &= ~CAKE_FLOW_NAT_FLAG; + q->flow_mode |= CAKE_FLOW_NAT_FLAG * + !!nla_get_u32(tb[TCA_CAKE_NAT]); +#else + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT], + "No conntrack support in kernel"); + return -EOPNOTSUPP; +#endif + } + + if (tb[TCA_CAKE_BASE_RATE64]) + q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); + + if (tb[TCA_CAKE_DIFFSERV_MODE]) + q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]); + + if (tb[TCA_CAKE_WASH]) { + if (!!nla_get_u32(tb[TCA_CAKE_WASH])) + q->rate_flags |= CAKE_FLAG_WASH; + else + q->rate_flags &= ~CAKE_FLAG_WASH; + } + + if (tb[TCA_CAKE_FLOW_MODE]) + q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) | + (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & + CAKE_FLOW_MASK)); + + if (tb[TCA_CAKE_ATM]) + q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]); + + if (tb[TCA_CAKE_OVERHEAD]) { + q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]); + q->rate_flags |= CAKE_FLAG_OVERHEAD; + + q->max_netlen = 0; + q->max_adjlen = 0; + q->min_netlen = ~0; + q->min_adjlen = ~0; + } + + if (tb[TCA_CAKE_RAW]) { + q->rate_flags &= ~CAKE_FLAG_OVERHEAD; + + q->max_netlen = 0; + q->max_adjlen = 0; + q->min_netlen = ~0; + q->min_adjlen = ~0; + } + + if (tb[TCA_CAKE_MPU]) + q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]); + + if (tb[TCA_CAKE_RTT]) { + q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); + + if (!q->interval) + q->interval = 1; + } + + if (tb[TCA_CAKE_TARGET]) { + q->target = nla_get_u32(tb[TCA_CAKE_TARGET]); + + if (!q->target) + q->target = 1; + } + + if (tb[TCA_CAKE_AUTORATE]) { + if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE])) + q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; + else + q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; + } + + if (tb[TCA_CAKE_INGRESS]) { + if (!!nla_get_u32(tb[TCA_CAKE_INGRESS])) + q->rate_flags |= CAKE_FLAG_INGRESS; + else + q->rate_flags &= ~CAKE_FLAG_INGRESS; + } + + if (tb[TCA_CAKE_ACK_FILTER]) + q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]); + + if (tb[TCA_CAKE_MEMORY]) + q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); + + if (q->rate_bps && q->rate_bps <= CAKE_SPLIT_GSO_THRESHOLD) + q->rate_flags |= CAKE_FLAG_SPLIT_GSO; + else + q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; + + if (q->tins) { + sch_tree_lock(sch); + cake_reconfigure(sch); + sch_tree_unlock(sch); + } + + return 0; +} + +static void cake_destroy(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + + qdisc_watchdog_cancel(&q->watchdog); + tcf_block_put(q->block); + kvfree(q->tins); +} + +static int cake_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct cake_sched_data *q = qdisc_priv(sch); + int i, j, err; + + sch->limit = 10240; + q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; + q->flow_mode = CAKE_FLOW_TRIPLE; + + q->rate_bps = 0; /* unlimited by default */ + + q->interval = 100000; /* 100ms default */ + q->target = 5000; /* 5ms: codel RFC argues + * for 5 to 10% of interval + */ + + q->cur_tin = 0; + q->cur_flow = 0; + + qdisc_watchdog_init(&q->watchdog, sch); + + if (opt) { + int err = cake_change(sch, opt, extack); + + if (err) + return err; + } + + err = tcf_block_get(&q->block, &q->filter_list, sch, extack); + if (err) + return err; + + quantum_div[0] = ~0; + for (i = 1; i <= CAKE_QUEUES; i++) + quantum_div[i] = 65535 / i; + + q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data), + GFP_KERNEL); + if (!q->tins) + goto nomem; + + for (i = 0; i < CAKE_MAX_TINS; i++) { + struct cake_tin_data *b = q->tins + i; + + INIT_LIST_HEAD(&b->new_flows); + INIT_LIST_HEAD(&b->old_flows); + INIT_LIST_HEAD(&b->decaying_flows); + b->sparse_flow_count = 0; + b->bulk_flow_count = 0; + b->decaying_flow_count = 0; + + for (j = 0; j < CAKE_QUEUES; j++) { + struct cake_flow *flow = b->flows + j; + u32 k = j * CAKE_MAX_TINS + i; + + INIT_LIST_HEAD(&flow->flowchain); + cobalt_vars_init(&flow->cvars); + + q->overflow_heap[k].t = i; + q->overflow_heap[k].b = j; + b->overflow_idx[j] = k; + } + } + + cake_reconfigure(sch); + q->avg_peak_bandwidth = q->rate_bps; + q->min_netlen = ~0; + q->min_adjlen = ~0; + return 0; + +nomem: + cake_destroy(sch); + return -ENOMEM; +} + +static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct nlattr *opts; + + opts = nla_nest_start(skb, TCA_OPTIONS); + if (!opts) + goto nla_put_failure; + + if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps, + TCA_CAKE_PAD)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE, + q->flow_mode & CAKE_FLOW_MASK)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_AUTORATE, + !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS))) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_INGRESS, + !!(q->rate_flags & CAKE_FLAG_INGRESS))) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_NAT, + !!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_WASH, + !!(q->rate_flags & CAKE_FLAG_WASH))) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead)) + goto nla_put_failure; + + if (!(q->rate_flags & CAKE_FLAG_OVERHEAD)) + if (nla_put_u32(skb, TCA_CAKE_RAW, 0)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO, + !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) + goto nla_put_failure; + + return nla_nest_end(skb, opts); + +nla_put_failure: + return -1; +} + +static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP); + struct cake_sched_data *q = qdisc_priv(sch); + struct nlattr *tstats, *ts; + int i; + + if (!stats) + return -1; + +#define PUT_STAT_U32(attr, data) do { \ + if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) +#define PUT_STAT_U64(attr, data) do { \ + if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \ + data, TCA_CAKE_STATS_PAD)) \ + goto nla_put_failure; \ + } while (0) + + PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); + PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); + PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); + PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); + PUT_STAT_U32(MAX_NETLEN, q->max_netlen); + PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); + PUT_STAT_U32(MIN_NETLEN, q->min_netlen); + PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); + +#undef PUT_STAT_U32 +#undef PUT_STAT_U64 + + tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS); + if (!tstats) + goto nla_put_failure; + +#define PUT_TSTAT_U32(attr, data) do { \ + if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) +#define PUT_TSTAT_U64(attr, data) do { \ + if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \ + data, TCA_CAKE_TIN_STATS_PAD)) \ + goto nla_put_failure; \ + } while (0) + + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[q->tin_order[i]]; + + ts = nla_nest_start(d->skb, i + 1); + if (!ts) + goto nla_put_failure; + + PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps); + PUT_TSTAT_U64(SENT_BYTES64, b->bytes); + PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog); + + PUT_TSTAT_U32(TARGET_US, + ktime_to_us(ns_to_ktime(b->cparams.target))); + PUT_TSTAT_U32(INTERVAL_US, + ktime_to_us(ns_to_ktime(b->cparams.interval))); + + PUT_TSTAT_U32(SENT_PACKETS, b->packets); + PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped); + PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark); + PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops); + + PUT_TSTAT_U32(PEAK_DELAY_US, + ktime_to_us(ns_to_ktime(b->peak_delay))); + PUT_TSTAT_U32(AVG_DELAY_US, + ktime_to_us(ns_to_ktime(b->avge_delay))); + PUT_TSTAT_U32(BASE_DELAY_US, + ktime_to_us(ns_to_ktime(b->base_delay))); + + PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits); + PUT_TSTAT_U32(WAY_MISSES, b->way_misses); + PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions); + + PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count + + b->decaying_flow_count); + PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count); + PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count); + PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen); + + PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum); + nla_nest_end(d->skb, ts); + } + +#undef PUT_TSTAT_U32 +#undef PUT_TSTAT_U64 + + nla_nest_end(d->skb, tstats); + return nla_nest_end(d->skb, stats); + +nla_put_failure: + nla_nest_cancel(d->skb, stats); + return -1; +} + +static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg) +{ + return NULL; +} + +static unsigned long cake_find(struct Qdisc *sch, u32 classid) +{ + return 0; +} + +static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent, + u32 classid) +{ + return 0; +} + +static void cake_unbind(struct Qdisc *q, unsigned long cl) +{ +} + +static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl, + struct netlink_ext_ack *extack) +{ + struct cake_sched_data *q = qdisc_priv(sch); + + if (cl) + return NULL; + return q->block; +} + +static int cake_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + tcm->tcm_handle |= TC_H_MIN(cl); + return 0; +} + +static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct gnet_dump *d) +{ + struct cake_sched_data *q = qdisc_priv(sch); + const struct cake_flow *flow = NULL; + struct gnet_stats_queue qs = { 0 }; + struct nlattr *stats; + u32 idx = cl - 1; + + if (idx < CAKE_QUEUES * q->tin_cnt) { + const struct cake_tin_data *b = \ + &q->tins[q->tin_order[idx / CAKE_QUEUES]]; + const struct sk_buff *skb; + + flow = &b->flows[idx % CAKE_QUEUES]; + + if (flow->head) { + sch_tree_lock(sch); + skb = flow->head; + while (skb) { + qs.qlen++; + skb = skb->next; + } + sch_tree_unlock(sch); + } + qs.backlog = b->backlogs[idx % CAKE_QUEUES]; + qs.drops = flow->dropped; + } + if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) + return -1; + if (flow) { + ktime_t now = ktime_get(); + + stats = nla_nest_start(d->skb, TCA_STATS_APP); + if (!stats) + return -1; + +#define PUT_STAT_U32(attr, data) do { \ + if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) +#define PUT_STAT_S32(attr, data) do { \ + if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) + + PUT_STAT_S32(DEFICIT, flow->deficit); + PUT_STAT_U32(DROPPING, flow->cvars.dropping); + PUT_STAT_U32(COBALT_COUNT, flow->cvars.count); + PUT_STAT_U32(P_DROP, flow->cvars.p_drop); + if (flow->cvars.p_drop) { + PUT_STAT_S32(BLUE_TIMER_US, + ktime_to_us( + ktime_sub(now, + flow->cvars.blue_timer))); + } + if (flow->cvars.dropping) { + PUT_STAT_S32(DROP_NEXT_US, + ktime_to_us( + ktime_sub(now, + flow->cvars.drop_next))); + } + + if (nla_nest_end(d->skb, stats) < 0) + return -1; + } + + return 0; + +nla_put_failure: + nla_nest_cancel(d->skb, stats); + return -1; +} + +static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg) +{ + struct cake_sched_data *q = qdisc_priv(sch); + unsigned int i, j; + + if (arg->stop) + return; + + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[q->tin_order[i]]; + + for (j = 0; j < CAKE_QUEUES; j++) { + if (list_empty(&b->flows[j].flowchain) || + arg->count < arg->skip) { + arg->count++; + continue; + } + if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) { + arg->stop = 1; + break; + } + arg->count++; + } + } +} + +static const struct Qdisc_class_ops cake_class_ops = { + .leaf = cake_leaf, + .find = cake_find, + .tcf_block = cake_tcf_block, + .bind_tcf = cake_bind, + .unbind_tcf = cake_unbind, + .dump = cake_dump_class, + .dump_stats = cake_dump_class_stats, + .walk = cake_walk, +}; + +static struct Qdisc_ops cake_qdisc_ops __read_mostly = { + .cl_ops = &cake_class_ops, + .id = "cake", + .priv_size = sizeof(struct cake_sched_data), + .enqueue = cake_enqueue, + .dequeue = cake_dequeue, + .peek = qdisc_peek_dequeued, + .init = cake_init, + .reset = cake_reset, + .destroy = cake_destroy, + .change = cake_change, + .dump = cake_dump, + .dump_stats = cake_dump_stats, + .owner = THIS_MODULE, +}; + +static int __init cake_module_init(void) +{ + return register_qdisc(&cake_qdisc_ops); +} + +static void __exit cake_module_exit(void) +{ + unregister_qdisc(&cake_qdisc_ops); +} + +module_init(cake_module_init) +module_exit(cake_module_exit) +MODULE_AUTHOR("Jonathan Morton"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("The CAKE shaper."); diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c new file mode 100644 index 000000000000..1538d6fa8165 --- /dev/null +++ b/net/sched/sch_etf.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* net/sched/sch_etf.c Earliest TxTime First queueing discipline. + * + * Authors: Jesus Sanchez-Palencia <jesus.sanchez-palencia@intel.com> + * Vinicius Costa Gomes <vinicius.gomes@intel.com> + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/errqueue.h> +#include <linux/rbtree.h> +#include <linux/skbuff.h> +#include <linux/posix-timers.h> +#include <net/netlink.h> +#include <net/sch_generic.h> +#include <net/pkt_sched.h> +#include <net/sock.h> + +#define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON) +#define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON) + +struct etf_sched_data { + bool offload; + bool deadline_mode; + int clockid; + int queue; + s32 delta; /* in ns */ + ktime_t last; /* The txtime of the last skb sent to the netdevice. */ + struct rb_root head; + struct qdisc_watchdog watchdog; + ktime_t (*get_time)(void); +}; + +static const struct nla_policy etf_policy[TCA_ETF_MAX + 1] = { + [TCA_ETF_PARMS] = { .len = sizeof(struct tc_etf_qopt) }, +}; + +static inline int validate_input_params(struct tc_etf_qopt *qopt, + struct netlink_ext_ack *extack) +{ + /* Check if params comply to the following rules: + * * Clockid and delta must be valid. + * + * * Dynamic clockids are not supported. + * + * * Delta must be a positive integer. + * + * Also note that for the HW offload case, we must + * expect that system clocks have been synchronized to PHC. + */ + if (qopt->clockid < 0) { + NL_SET_ERR_MSG(extack, "Dynamic clockids are not supported"); + return -ENOTSUPP; + } + + if (qopt->clockid != CLOCK_TAI) { + NL_SET_ERR_MSG(extack, "Invalid clockid. CLOCK_TAI must be used"); + return -EINVAL; + } + + if (qopt->delta < 0) { + NL_SET_ERR_MSG(extack, "Delta must be positive"); + return -EINVAL; + } + + return 0; +} + +static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) +{ + struct etf_sched_data *q = qdisc_priv(sch); + ktime_t txtime = nskb->tstamp; + struct sock *sk = nskb->sk; + ktime_t now; + + if (!sk) + return false; + + if (!sock_flag(sk, SOCK_TXTIME)) + return false; + + /* We don't perform crosstimestamping. + * Drop if packet's clockid differs from qdisc's. + */ + if (sk->sk_clockid != q->clockid) + return false; + + if (sk->sk_txtime_deadline_mode != q->deadline_mode) + return false; + + now = q->get_time(); + if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) + return false; + + return true; +} + +static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct rb_node *p; + + p = rb_first(&q->head); + if (!p) + return NULL; + + return rb_to_skb(p); +} + +static void reset_watchdog(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb = etf_peek_timesortedlist(sch); + ktime_t next; + + if (!skb) + return; + + next = ktime_sub_ns(skb->tstamp, q->delta); + qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next)); +} + +static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) +{ + struct sock_exterr_skb *serr; + struct sk_buff *clone; + ktime_t txtime = skb->tstamp; + + if (!skb->sk || !(skb->sk->sk_txtime_report_errors)) + return; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) + return; + + serr = SKB_EXT_ERR(clone); + serr->ee.ee_errno = err; + serr->ee.ee_origin = SO_EE_ORIGIN_TXTIME; + serr->ee.ee_type = 0; + serr->ee.ee_code = code; + serr->ee.ee_pad = 0; + serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */ + serr->ee.ee_info = txtime; /* low part of tstamp */ + + if (sock_queue_err_skb(skb->sk, clone)) + kfree_skb(clone); +} + +static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct rb_node **p = &q->head.rb_node, *parent = NULL; + ktime_t txtime = nskb->tstamp; + + if (!is_packet_valid(sch, nskb)) { + report_sock_error(nskb, EINVAL, + SO_EE_CODE_TXTIME_INVALID_PARAM); + return qdisc_drop(nskb, sch, to_free); + } + + while (*p) { + struct sk_buff *skb; + + parent = *p; + skb = rb_to_skb(parent); + if (ktime_after(txtime, skb->tstamp)) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + rb_link_node(&nskb->rbnode, parent, p); + rb_insert_color(&nskb->rbnode, &q->head); + + qdisc_qstats_backlog_inc(sch, nskb); + sch->q.qlen++; + + /* Now we may need to re-arm the qdisc watchdog for the next packet. */ + reset_watchdog(sch); + + return NET_XMIT_SUCCESS; +} + +static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb, + bool drop) +{ + struct etf_sched_data *q = qdisc_priv(sch); + + rb_erase(&skb->rbnode, &q->head); + + /* The rbnode field in the skb re-uses these fields, now that + * we are done with the rbnode, reset them. + */ + skb->next = NULL; + skb->prev = NULL; + skb->dev = qdisc_dev(sch); + + qdisc_qstats_backlog_dec(sch, skb); + + if (drop) { + struct sk_buff *to_free = NULL; + + report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED); + + qdisc_drop(skb, sch, &to_free); + kfree_skb_list(to_free); + qdisc_qstats_overlimit(sch); + } else { + qdisc_bstats_update(sch, skb); + + q->last = skb->tstamp; + } + + sch->q.qlen--; +} + +static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + ktime_t now, next; + + skb = etf_peek_timesortedlist(sch); + if (!skb) + return NULL; + + now = q->get_time(); + + /* Drop if packet has expired while in queue. */ + if (ktime_before(skb->tstamp, now)) { + timesortedlist_erase(sch, skb, true); + skb = NULL; + goto out; + } + + /* When in deadline mode, dequeue as soon as possible and change the + * txtime from deadline to (now + delta). + */ + if (q->deadline_mode) { + timesortedlist_erase(sch, skb, false); + skb->tstamp = now; + goto out; + } + + next = ktime_sub_ns(skb->tstamp, q->delta); + + /* Dequeue only if now is within the [txtime - delta, txtime] range. */ + if (ktime_after(now, next)) + timesortedlist_erase(sch, skb, false); + else + skb = NULL; + +out: + /* Now we may need to re-arm the qdisc watchdog for the next packet. */ + reset_watchdog(sch); + + return skb; +} + +static void etf_disable_offload(struct net_device *dev, + struct etf_sched_data *q) +{ + struct tc_etf_qopt_offload etf = { }; + const struct net_device_ops *ops; + int err; + + if (!q->offload) + return; + + ops = dev->netdev_ops; + if (!ops->ndo_setup_tc) + return; + + etf.queue = q->queue; + etf.enable = 0; + + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf); + if (err < 0) + pr_warn("Couldn't disable ETF offload for queue %d\n", + etf.queue); +} + +static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q, + struct netlink_ext_ack *extack) +{ + const struct net_device_ops *ops = dev->netdev_ops; + struct tc_etf_qopt_offload etf = { }; + int err; + + if (q->offload) + return 0; + + if (!ops->ndo_setup_tc) { + NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload"); + return -EOPNOTSUPP; + } + + etf.queue = q->queue; + etf.enable = 1; + + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Specified device failed to setup ETF hardware offload"); + return err; + } + + return 0; +} + +static int etf_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct nlattr *tb[TCA_ETF_MAX + 1]; + struct tc_etf_qopt *qopt; + int err; + + if (!opt) { + NL_SET_ERR_MSG(extack, + "Missing ETF qdisc options which are mandatory"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_ETF_PARMS]) { + NL_SET_ERR_MSG(extack, "Missing mandatory ETF parameters"); + return -EINVAL; + } + + qopt = nla_data(tb[TCA_ETF_PARMS]); + + pr_debug("delta %d clockid %d offload %s deadline %s\n", + qopt->delta, qopt->clockid, + OFFLOAD_IS_ON(qopt) ? "on" : "off", + DEADLINE_MODE_IS_ON(qopt) ? "on" : "off"); + + err = validate_input_params(qopt, extack); + if (err < 0) + return err; + + q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); + + if (OFFLOAD_IS_ON(qopt)) { + err = etf_enable_offload(dev, q, extack); + if (err < 0) + return err; + } + + /* Everything went OK, save the parameters used. */ + q->delta = qopt->delta; + q->clockid = qopt->clockid; + q->offload = OFFLOAD_IS_ON(qopt); + q->deadline_mode = DEADLINE_MODE_IS_ON(qopt); + + switch (q->clockid) { + case CLOCK_REALTIME: + q->get_time = ktime_get_real; + break; + case CLOCK_MONOTONIC: + q->get_time = ktime_get; + break; + case CLOCK_BOOTTIME: + q->get_time = ktime_get_boottime; + break; + case CLOCK_TAI: + q->get_time = ktime_get_clocktai; + break; + default: + NL_SET_ERR_MSG(extack, "Clockid is not supported"); + return -ENOTSUPP; + } + + qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid); + + return 0; +} + +static void timesortedlist_clear(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct rb_node *p = rb_first(&q->head); + + while (p) { + struct sk_buff *skb = rb_to_skb(p); + + p = rb_next(p); + + rb_erase(&skb->rbnode, &q->head); + rtnl_kfree_skbs(skb, skb); + sch->q.qlen--; + } +} + +static void etf_reset(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + + /* Only cancel watchdog if it's been initialized. */ + if (q->watchdog.qdisc == sch) + qdisc_watchdog_cancel(&q->watchdog); + + /* No matter which mode we are on, it's safe to clear both lists. */ + timesortedlist_clear(sch); + __qdisc_reset_queue(&sch->q); + + sch->qstats.backlog = 0; + sch->q.qlen = 0; + + q->last = 0; +} + +static void etf_destroy(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + + /* Only cancel watchdog if it's been initialized. */ + if (q->watchdog.qdisc == sch) + qdisc_watchdog_cancel(&q->watchdog); + + etf_disable_offload(dev, q); +} + +static int etf_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct tc_etf_qopt opt = { }; + struct nlattr *nest; + + nest = nla_nest_start(skb, TCA_OPTIONS); + if (!nest) + goto nla_put_failure; + + opt.delta = q->delta; + opt.clockid = q->clockid; + if (q->offload) + opt.flags |= TC_ETF_OFFLOAD_ON; + + if (q->deadline_mode) + opt.flags |= TC_ETF_DEADLINE_MODE_ON; + + if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + + return nla_nest_end(skb, nest); + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -1; +} + +static struct Qdisc_ops etf_qdisc_ops __read_mostly = { + .id = "etf", + .priv_size = sizeof(struct etf_sched_data), + .enqueue = etf_enqueue_timesortedlist, + .dequeue = etf_dequeue_timesortedlist, + .peek = etf_peek_timesortedlist, + .init = etf_init, + .reset = etf_reset, + .destroy = etf_destroy, + .dump = etf_dump, + .owner = THIS_MODULE, +}; + +static int __init etf_module_init(void) +{ + return register_qdisc(&etf_qdisc_ops); +} + +static void __exit etf_module_exit(void) +{ + unregister_qdisc(&etf_qdisc_ops); +} +module_init(etf_module_init) +module_exit(etf_module_exit) +MODULE_LICENSE("GPL"); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 2a4ab7caf553..43c4bfe625a9 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -126,7 +126,6 @@ struct htb_class { union { struct htb_class_leaf { - struct list_head drop_list; int deficit[TC_HTB_MAXDEPTH]; struct Qdisc *q; } leaf; @@ -171,7 +170,6 @@ struct htb_sched { struct qdisc_watchdog watchdog; s64 now; /* cached dequeue time */ - struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ /* time of nearest event per level (row) */ s64 near_ev_cache[TC_HTB_MAXDEPTH]; @@ -562,8 +560,6 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) if (!cl->prio_activity) { cl->prio_activity = 1 << cl->prio; htb_activate_prios(q, cl); - list_add_tail(&cl->un.leaf.drop_list, - q->drops + cl->prio); } } @@ -579,7 +575,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) htb_deactivate_prios(q, cl); cl->prio_activity = 0; - list_del_init(&cl->un.leaf.drop_list); } static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, @@ -981,7 +976,6 @@ static void htb_reset(struct Qdisc *sch) else { if (cl->un.leaf.q) qdisc_reset(cl->un.leaf.q); - INIT_LIST_HEAD(&cl->un.leaf.drop_list); } cl->prio_activity = 0; cl->cmode = HTB_CAN_SEND; @@ -993,8 +987,6 @@ static void htb_reset(struct Qdisc *sch) sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); memset(q->row_mask, 0, sizeof(q->row_mask)); - for (i = 0; i < TC_HTB_NUMPRIO; i++) - INIT_LIST_HEAD(q->drops + i); } static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { @@ -1024,7 +1016,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, struct nlattr *tb[TCA_HTB_MAX + 1]; struct tc_htb_glob *gopt; int err; - int i; qdisc_watchdog_init(&q->watchdog, sch); INIT_WORK(&q->work, htb_work_func); @@ -1050,8 +1041,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; - for (i = 0; i < TC_HTB_NUMPRIO; i++) - INIT_LIST_HEAD(q->drops + i); qdisc_skb_head_init(&q->direct_queue); @@ -1224,7 +1213,6 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, parent->level = 0; memset(&parent->un.inner, 0, sizeof(parent->un.inner)); - INIT_LIST_HEAD(&parent->un.leaf.drop_list); parent->un.leaf.q = new_q ? new_q : &noop_qdisc; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; @@ -1418,7 +1406,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } cl->children = 0; - INIT_LIST_HEAD(&cl->un.leaf.drop_list); RB_CLEAR_NODE(&cl->pq_node); for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 7d6801fc5340..ad18a2052416 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -68,6 +68,11 @@ Fabio Ludovici <fabio.ludovici at yahoo.it> */ +struct disttable { + u32 size; + s16 table[0]; +}; + struct netem_sched_data { /* internal t(ime)fifo qdisc uses t_root and sch->limit */ struct rb_root t_root; @@ -99,10 +104,7 @@ struct netem_sched_data { u32 rho; } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; - struct disttable { - u32 size; - s16 table[0]; - } *delay_dist; + struct disttable *delay_dist; enum { CLG_RANDOM, @@ -142,6 +144,7 @@ struct netem_sched_data { s32 bytes_left; } slot; + struct disttable *slot_dist; }; /* Time stamp put into socket buffer control block @@ -180,7 +183,7 @@ static u32 get_crandom(struct crndstate *state) u64 value, rho; unsigned long answer; - if (state->rho == 0) /* no correlation */ + if (!state || state->rho == 0) /* no correlation */ return prandom_u32(); value = prandom_u32(); @@ -601,10 +604,19 @@ finish_segs: static void get_slot_next(struct netem_sched_data *q, u64 now) { - q->slot.slot_next = now + q->slot_config.min_delay + - (prandom_u32() * - (q->slot_config.max_delay - - q->slot_config.min_delay) >> 32); + s64 next_delay; + + if (!q->slot_dist) + next_delay = q->slot_config.min_delay + + (prandom_u32() * + (q->slot_config.max_delay - + q->slot_config.min_delay) >> 32); + else + next_delay = tabledist(q->slot_config.dist_delay, + (s32)(q->slot_config.dist_jitter), + NULL, q->slot_dist); + + q->slot.slot_next = now + next_delay; q->slot.packets_left = q->slot_config.max_packets; q->slot.bytes_left = q->slot_config.max_bytes; } @@ -721,9 +733,9 @@ static void dist_free(struct disttable *d) * signed 16 bit values. */ -static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) +static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, + const struct nlattr *attr) { - struct netem_sched_data *q = qdisc_priv(sch); size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); spinlock_t *root_lock; @@ -744,7 +756,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); - swap(q->delay_dist, d); + swap(*tbl, d); spin_unlock_bh(root_lock); dist_free(d); @@ -762,7 +774,8 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) q->slot_config.max_bytes = INT_MAX; q->slot.packets_left = q->slot_config.max_packets; q->slot.bytes_left = q->slot_config.max_bytes; - if (q->slot_config.min_delay | q->slot_config.max_delay) + if (q->slot_config.min_delay | q->slot_config.max_delay | + q->slot_config.dist_jitter) q->slot.slot_next = ktime_get_ns(); else q->slot.slot_next = 0; @@ -926,16 +939,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, } if (tb[TCA_NETEM_DELAY_DIST]) { - ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); - if (ret) { - /* recover clg and loss_model, in case of - * q->clg and q->loss_model were modified - * in get_loss_clg() - */ - q->clg = old_clg; - q->loss_model = old_loss_model; - return ret; - } + ret = get_dist_table(sch, &q->delay_dist, + tb[TCA_NETEM_DELAY_DIST]); + if (ret) + goto get_table_failure; + } + + if (tb[TCA_NETEM_SLOT_DIST]) { + ret = get_dist_table(sch, &q->slot_dist, + tb[TCA_NETEM_SLOT_DIST]); + if (ret) + goto get_table_failure; } sch->limit = qopt->limit; @@ -983,6 +997,15 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, get_slot(q, tb[TCA_NETEM_SLOT]); return ret; + +get_table_failure: + /* recover clg and loss_model, in case of + * q->clg and q->loss_model were modified + * in get_loss_clg() + */ + q->clg = old_clg; + q->loss_model = old_loss_model; + return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt, @@ -1011,6 +1034,7 @@ static void netem_destroy(struct Qdisc *sch) if (q->qdisc) qdisc_destroy(q->qdisc); dist_free(q->delay_dist); + dist_free(q->slot_dist); } static int dump_loss_model(const struct netem_sched_data *q, @@ -1127,7 +1151,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) if (dump_loss_model(q, skb) != 0) goto nla_put_failure; - if (q->slot_config.min_delay | q->slot_config.max_delay) { + if (q->slot_config.min_delay | q->slot_config.max_delay | + q->slot_config.dist_jitter) { slot = q->slot_config; if (slot.max_packets == INT_MAX) slot.max_packets = 0; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5d5a16204d50..297d9cf960b9 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -115,6 +115,9 @@ static struct sctp_association *sctp_association_init( /* Initialize path max retrans value. */ asoc->pathmaxrxt = sp->pathmaxrxt; + asoc->flowlabel = sp->flowlabel; + asoc->dscp = sp->dscp; + /* Initialize default path MTU. */ asoc->pathmtu = sp->pathmtu; @@ -647,6 +650,18 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, peer->sackdelay = asoc->sackdelay; peer->sackfreq = asoc->sackfreq; + if (addr->sa.sa_family == AF_INET6) { + __be32 info = addr->v6.sin6_flowinfo; + + if (info) { + peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK); + peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } else { + peer->flowlabel = asoc->flowlabel; + } + } + peer->dscp = asoc->dscp; + /* Enable/disable heartbeat, SACK delay, and path MTU discovery * based on association setting. */ diff --git a/net/sctp/input.c b/net/sctp/input.c index ba8a6e6c36fa..9bbc5f92c941 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -56,6 +56,7 @@ #include <net/sctp/sm.h> #include <net/sctp/checksum.h> #include <net/net_namespace.h> +#include <linux/rhashtable.h> /* Forward declarations for internal helpers. */ static int sctp_rcv_ootb(struct sk_buff *); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 0cd2e764f47f..fc6c5e4bffa5 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -209,12 +209,17 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) struct sock *sk = skb->sk; struct ipv6_pinfo *np = inet6_sk(sk); struct flowi6 *fl6 = &transport->fl.u.ip6; + __u8 tclass = np->tclass; int res; pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, skb->len, &fl6->saddr, &fl6->daddr); - IP6_ECN_flow_xmit(sk, fl6->flowlabel); + if (transport->dscp & SCTP_DSCP_SET_MASK) + tclass = transport->dscp & SCTP_DSCP_VAL_MASK; + + if (INET_ECN_is_capable(tclass)) + IP6_ECN_flow_xmit(sk, fl6->flowlabel); if (!(transport->param_flags & SPP_PMTUD_ENABLE)) skb->ignore_df = 1; @@ -223,7 +228,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) rcu_read_lock(); res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt), - np->tclass); + tclass); rcu_read_unlock(); return res; } @@ -254,6 +259,17 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->flowi6_oif = daddr->v6.sin6_scope_id; else if (asoc) fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if; + if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK) + fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK); + + if (np->sndflow && (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) { + struct ip6_flowlabel *flowlabel; + + flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); + if (!flowlabel) + goto out; + fl6_sock_release(flowlabel); + } pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 67f73d3a1356..e948db29ab53 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -426,13 +426,16 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, struct dst_entry *dst = NULL; union sctp_addr *daddr = &t->ipaddr; union sctp_addr dst_saddr; + __u8 tos = inet_sk(sk)->tos; + if (t->dscp & SCTP_DSCP_SET_MASK) + tos = t->dscp & SCTP_DSCP_VAL_MASK; memset(fl4, 0x0, sizeof(struct flowi4)); fl4->daddr = daddr->v4.sin_addr.s_addr; fl4->fl4_dport = daddr->v4.sin_port; fl4->flowi4_proto = IPPROTO_SCTP; if (asoc) { - fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk); + fl4->flowi4_tos = RT_CONN_FLAGS_TOS(asoc->base.sk, tos); fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if; fl4->fl4_sport = htons(asoc->base.bind_addr.port); } @@ -495,7 +498,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl4->fl4_sport = laddr->a.v4.sin_port; flowi4_update_output(fl4, asoc->base.sk->sk_bound_dev_if, - RT_CONN_FLAGS(asoc->base.sk), + RT_CONN_FLAGS_TOS(asoc->base.sk, tos), daddr->v4.sin_addr.s_addr, laddr->a.v4.sin_addr.s_addr); @@ -971,16 +974,21 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *transport) { struct inet_sock *inet = inet_sk(skb->sk); + __u8 dscp = inet->tos; pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb, - skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr); + skb->len, &transport->fl.u.ip4.saddr, + &transport->fl.u.ip4.daddr); + + if (transport->dscp & SCTP_DSCP_SET_MASK) + dscp = transport->dscp & SCTP_DSCP_VAL_MASK; inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT; SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); - return ip_queue_xmit(&inet->sk, skb, &transport->fl); + return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp); } static struct sctp_af sctp_af_inet; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ce620e878538..502c0d7cb105 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -66,6 +66,7 @@ #include <linux/slab.h> #include <linux/file.h> #include <linux/compat.h> +#include <linux/rhashtable.h> #include <net/ip.h> #include <net/icmp.h> @@ -1696,6 +1697,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, struct sctp_association *asoc; enum sctp_scope scope; struct cmsghdr *cmsg; + __be32 flowinfo = 0; struct sctp_af *af; int err; @@ -1780,6 +1782,9 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, if (!cmsgs->addrs_msg) return 0; + if (daddr->sa.sa_family == AF_INET6) + flowinfo = daddr->v6.sin6_flowinfo; + /* sendv addr list parse */ for_each_cmsghdr(cmsg, cmsgs->addrs_msg) { struct sctp_transport *transport; @@ -1812,6 +1817,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, } dlen = sizeof(struct in6_addr); + daddr->v6.sin6_flowinfo = flowinfo; daddr->v6.sin6_family = AF_INET6; daddr->v6.sin6_port = htons(asoc->peer.port); memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen); @@ -2392,6 +2398,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; + * uint32_t spp_ipv6_flowlabel; + * uint8_t spp_dscp; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the @@ -2471,6 +2479,45 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. + * + * SPP_IPV6_FLOWLABEL: Setting this flag enables the + * setting of the IPV6 flow label value. The value is + * contained in the spp_ipv6_flowlabel field. + * Upon retrieval, this flag will be set to indicate that + * the spp_ipv6_flowlabel field has a valid value returned. + * If a specific destination address is set (in the + * spp_address field), then the value returned is that of + * the address. If just an association is specified (and + * no address), then the association's default flow label + * is returned. If neither an association nor a destination + * is specified, then the socket's default flow label is + * returned. For non-IPv6 sockets, this flag will be left + * cleared. + * + * SPP_DSCP: Setting this flag enables the setting of the + * Differentiated Services Code Point (DSCP) value + * associated with either the association or a specific + * address. The value is obtained in the spp_dscp field. + * Upon retrieval, this flag will be set to indicate that + * the spp_dscp field has a valid value returned. If a + * specific destination address is set when called (in the + * spp_address field), then that specific destination + * address's DSCP value is returned. If just an association + * is specified, then the association's default DSCP is + * returned. If neither an association nor a destination is + * specified, then the socket's default DSCP is returned. + * + * spp_ipv6_flowlabel + * - This field is used in conjunction with the + * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label. + * The 20 least significant bits are used for the flow + * label. This setting has precedence over any IPv6-layer + * setting. + * + * spp_dscp - This field is used in conjunction with the SPP_DSCP flag + * and contains the DSCP. The 6 most significant bits are + * used for the DSCP. This setting has precedence over any + * IPv4- or IPv6- layer setting. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, @@ -2610,6 +2657,51 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } + if (params->spp_flags & SPP_IPV6_FLOWLABEL) { + if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { + trans->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } else if (asoc) { + list_for_each_entry(trans, + &asoc->peer.transport_addr_list, + transports) { + if (trans->ipaddr.sa.sa_family != AF_INET6) + continue; + trans->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } + asoc->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } else if (sctp_opt2sk(sp)->sk_family == AF_INET6) { + sp->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } + } + + if (params->spp_flags & SPP_DSCP) { + if (trans) { + trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; + trans->dscp |= SCTP_DSCP_SET_MASK; + } else if (asoc) { + list_for_each_entry(trans, + &asoc->peer.transport_addr_list, + transports) { + trans->dscp = params->spp_dscp & + SCTP_DSCP_VAL_MASK; + trans->dscp |= SCTP_DSCP_SET_MASK; + } + asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; + asoc->dscp |= SCTP_DSCP_SET_MASK; + } else { + sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; + sp->dscp |= SCTP_DSCP_SET_MASK; + } + } + return 0; } @@ -2624,11 +2716,18 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, int error; int hb_change, pmtud_change, sackdelay_change; - if (optlen != sizeof(struct sctp_paddrparams)) + if (optlen == sizeof(params)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + } else if (optlen == ALIGN(offsetof(struct sctp_paddrparams, + spp_ipv6_flowlabel), 4)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL)) + return -EINVAL; + } else { return -EINVAL; - - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; + } /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; @@ -4169,6 +4268,28 @@ out: return retval; } +static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + int val; + + if (!sctp_style(sk, TCP)) + return -EOPNOTSUPP; + + if (sctp_sk(sk)->ep->base.bind_addr.port) + return -EFAULT; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->reuse = !!val; + + return 0; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -4363,6 +4484,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_interleaving_supported(sk, optval, optlen); break; + case SCTP_REUSE_PORT: + retval = sctp_setsockopt_reuse_port(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -5427,6 +5551,45 @@ out: * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. + * + * SPP_IPV6_FLOWLABEL: Setting this flag enables the + * setting of the IPV6 flow label value. The value is + * contained in the spp_ipv6_flowlabel field. + * Upon retrieval, this flag will be set to indicate that + * the spp_ipv6_flowlabel field has a valid value returned. + * If a specific destination address is set (in the + * spp_address field), then the value returned is that of + * the address. If just an association is specified (and + * no address), then the association's default flow label + * is returned. If neither an association nor a destination + * is specified, then the socket's default flow label is + * returned. For non-IPv6 sockets, this flag will be left + * cleared. + * + * SPP_DSCP: Setting this flag enables the setting of the + * Differentiated Services Code Point (DSCP) value + * associated with either the association or a specific + * address. The value is obtained in the spp_dscp field. + * Upon retrieval, this flag will be set to indicate that + * the spp_dscp field has a valid value returned. If a + * specific destination address is set when called (in the + * spp_address field), then that specific destination + * address's DSCP value is returned. If just an association + * is specified, then the association's default DSCP is + * returned. If neither an association nor a destination is + * specified, then the socket's default DSCP is returned. + * + * spp_ipv6_flowlabel + * - This field is used in conjunction with the + * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label. + * The 20 least significant bits are used for the flow + * label. This setting has precedence over any IPv6-layer + * setting. + * + * spp_dscp - This field is used in conjunction with the SPP_DSCP flag + * and contains the DSCP. The 6 most significant bits are + * used for the DSCP. This setting has precedence over any + * IPv4- or IPv6- layer setting. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) @@ -5436,9 +5599,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); - if (len < sizeof(struct sctp_paddrparams)) + if (len >= sizeof(params)) + len = sizeof(params); + else if (len >= ALIGN(offsetof(struct sctp_paddrparams, + spp_ipv6_flowlabel), 4)) + len = ALIGN(offsetof(struct sctp_paddrparams, + spp_ipv6_flowlabel), 4); + else return -EINVAL; - len = sizeof(struct sctp_paddrparams); + if (copy_from_user(¶ms, optval, len)) return -EFAULT; @@ -5473,6 +5642,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; + if (trans->flowlabel & SCTP_FLOWLABEL_SET_MASK) { + params.spp_ipv6_flowlabel = trans->flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + params.spp_flags |= SPP_IPV6_FLOWLABEL; + } + if (trans->dscp & SCTP_DSCP_SET_MASK) { + params.spp_dscp = trans->dscp & SCTP_DSCP_VAL_MASK; + params.spp_flags |= SPP_DSCP; + } } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); @@ -5482,6 +5660,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; + if (asoc->flowlabel & SCTP_FLOWLABEL_SET_MASK) { + params.spp_ipv6_flowlabel = asoc->flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + params.spp_flags |= SPP_IPV6_FLOWLABEL; + } + if (asoc->dscp & SCTP_DSCP_SET_MASK) { + params.spp_dscp = asoc->dscp & SCTP_DSCP_VAL_MASK; + params.spp_flags |= SPP_DSCP; + } } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; @@ -5491,6 +5678,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; + if (sp->flowlabel & SCTP_FLOWLABEL_SET_MASK) { + params.spp_ipv6_flowlabel = sp->flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + params.spp_flags |= SPP_IPV6_FLOWLABEL; + } + if (sp->dscp & SCTP_DSCP_SET_MASK) { + params.spp_dscp = sp->dscp & SCTP_DSCP_VAL_MASK; + params.spp_flags |= SPP_DSCP; + } } if (copy_to_user(optval, ¶ms, len)) @@ -7196,6 +7392,26 @@ out: return retval; } +static int sctp_getsockopt_reuse_port(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + val = sctp_sk(sk)->reuse; + if (put_user(len, optlen)) + return -EFAULT; + + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -7391,6 +7607,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_interleaving_supported(sk, len, optval, optlen); break; + case SCTP_REUSE_PORT: + retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -7428,6 +7647,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { + bool reuse = (sk->sk_reuse || sctp_sk(sk)->reuse); struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; @@ -7500,13 +7720,11 @@ pp_found: * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ - int reuse = sk->sk_reuse; struct sock *sk2; pr_debug("%s: found a possible match\n", __func__); - if (pp->fastreuse && sk->sk_reuse && - sk->sk_state != SCTP_SS_LISTENING) + if (pp->fastreuse && reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port @@ -7524,7 +7742,7 @@ pp_found: ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || - (reuse && sk2->sk_reuse && + (reuse && (sk2->sk_reuse || sctp_sk(sk2)->reuse) && sk2->sk_state != SCTP_SS_LISTENING)) continue; @@ -7548,12 +7766,12 @@ pp_not_found: * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { - if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) + if (reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && - (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) + (!reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table @@ -7684,7 +7902,7 @@ int sctp_inet_listen(struct socket *sock, int backlog) err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; - if (sk->sk_reuse) + if (sk->sk_reuse || sctp_sk(sk)->reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } @@ -8551,6 +8769,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newsk->sk_no_check_tx = sk->sk_no_check_tx; newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; + sctp_sk(newsk)->reuse = sp->reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = sctp_destruct_sock; diff --git a/net/smc/Makefile b/net/smc/Makefile index 188104654b54..4df96b4b8130 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_SMC) += smc.o obj-$(CONFIG_SMC_DIAG) += smc_diag.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o -smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o +smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 05e4ffe5aabd..143b2220c0c8 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -23,6 +23,7 @@ #include <linux/workqueue.h> #include <linux/in.h> #include <linux/sched/signal.h> +#include <linux/if_vlan.h> #include <net/sock.h> #include <net/tcp.h> @@ -35,6 +36,7 @@ #include "smc_cdc.h" #include "smc_core.h" #include "smc_ib.h" +#include "smc_ism.h" #include "smc_pnet.h" #include "smc_tx.h" #include "smc_rx.h" @@ -381,8 +383,8 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) return 0; } -static void smc_conn_save_peer_info(struct smc_sock *smc, - struct smc_clc_msg_accept_confirm *clc) +static void smcr_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) { int bufsize = smc_uncompress_bufsize(clc->rmbe_size); @@ -393,6 +395,28 @@ static void smc_conn_save_peer_info(struct smc_sock *smc, smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); } +static void smcd_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + int bufsize = smc_uncompress_bufsize(clc->dmbe_size); + + smc->conn.peer_rmbe_idx = clc->dmbe_idx; + smc->conn.peer_token = clc->token; + /* msg header takes up space in the buffer */ + smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); + atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); + smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; +} + +static void smc_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + if (smc->conn.lgr->is_smcd) + smcd_conn_save_peer_info(smc, clc); + else + smcr_conn_save_peer_info(smc, clc); +} + static void smc_link_save_peer_info(struct smc_link *link, struct smc_clc_msg_accept_confirm *clc) { @@ -463,15 +487,51 @@ static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev, return reason_code; } +/* check if there is an ISM device available for this connection. */ +/* called for connect and listen */ +static int smc_check_ism(struct smc_sock *smc, struct smcd_dev **ismdev) +{ + /* Find ISM device with same PNETID as connecting interface */ + smc_pnet_find_ism_resource(smc->clcsock->sk, ismdev); + if (!(*ismdev)) + return SMC_CLC_DECL_CNFERR; /* configuration error */ + return 0; +} + +/* Check for VLAN ID and register it on ISM device just for CLC handshake */ +static int smc_connect_ism_vlan_setup(struct smc_sock *smc, + struct smcd_dev *ismdev, + unsigned short vlan_id) +{ + if (vlan_id && smc_ism_get_vlan(ismdev, vlan_id)) + return SMC_CLC_DECL_CNFERR; + return 0; +} + +/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is + * used, the VLAN ID will be registered again during the connection setup. + */ +static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd, + struct smcd_dev *ismdev, + unsigned short vlan_id) +{ + if (!is_smcd) + return 0; + if (vlan_id && smc_ism_put_vlan(ismdev, vlan_id)) + return SMC_CLC_DECL_CNFERR; + return 0; +} + /* CLC handshake during connect */ -static int smc_connect_clc(struct smc_sock *smc, +static int smc_connect_clc(struct smc_sock *smc, int smc_type, struct smc_clc_msg_accept_confirm *aclc, - struct smc_ib_device *ibdev, u8 ibport) + struct smc_ib_device *ibdev, u8 ibport, + struct smcd_dev *ismdev) { int rc = 0; /* do inband token exchange */ - rc = smc_clc_send_proposal(smc, ibdev, ibport); + rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, ismdev); if (rc) return rc; /* receive SMC Accept CLC message */ @@ -488,8 +548,8 @@ static int smc_connect_rdma(struct smc_sock *smc, int reason_code = 0; mutex_lock(&smc_create_lgr_pending); - local_contact = smc_conn_create(smc, ibdev, ibport, &aclc->lcl, - aclc->hdr.flag); + local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, + ibport, &aclc->lcl, NULL, 0); if (local_contact < 0) { if (local_contact == -ENOMEM) reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ @@ -504,7 +564,7 @@ static int smc_connect_rdma(struct smc_sock *smc, smc_conn_save_peer_info(smc, aclc); /* create send buffer and rmb */ - if (smc_buf_create(smc)) + if (smc_buf_create(smc, false)) return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact); if (local_contact == SMC_FIRST_CONTACT) @@ -551,11 +611,50 @@ static int smc_connect_rdma(struct smc_sock *smc, return 0; } +/* setup for ISM connection of client */ +static int smc_connect_ism(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *aclc, + struct smcd_dev *ismdev) +{ + int local_contact = SMC_FIRST_CONTACT; + int rc = 0; + + mutex_lock(&smc_create_lgr_pending); + local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, + NULL, ismdev, aclc->gid); + if (local_contact < 0) + return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); + + /* Create send and receive buffers */ + if (smc_buf_create(smc, true)) + return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact); + + smc_conn_save_peer_info(smc, aclc); + smc_close_init(smc); + smc_rx_init(smc); + smc_tx_init(smc); + + rc = smc_clc_send_confirm(smc); + if (rc) + return smc_connect_abort(smc, rc, local_contact); + mutex_unlock(&smc_create_lgr_pending); + + smc_copy_sock_settings_to_clc(smc); + if (smc->sk.sk_state == SMC_INIT) + smc->sk.sk_state = SMC_ACTIVE; + + return 0; +} + /* perform steps before actually connecting */ static int __smc_connect(struct smc_sock *smc) { + bool ism_supported = false, rdma_supported = false; struct smc_clc_msg_accept_confirm aclc; struct smc_ib_device *ibdev; + struct smcd_dev *ismdev; + unsigned short vlan; + int smc_type; int rc = 0; u8 ibport; @@ -572,20 +671,52 @@ static int __smc_connect(struct smc_sock *smc) if (using_ipsec(smc)) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC); - /* check if a RDMA device is available; if not, fall back */ - if (smc_check_rdma(smc, &ibdev, &ibport)) + /* check for VLAN ID */ + if (smc_vlan_by_tcpsk(smc->clcsock, &vlan)) + return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); + + /* check if there is an ism device available */ + if (!smc_check_ism(smc, &ismdev) && + !smc_connect_ism_vlan_setup(smc, ismdev, vlan)) { + /* ISM is supported for this connection */ + ism_supported = true; + smc_type = SMC_TYPE_D; + } + + /* check if there is a rdma device available */ + if (!smc_check_rdma(smc, &ibdev, &ibport)) { + /* RDMA is supported for this connection */ + rdma_supported = true; + if (ism_supported) + smc_type = SMC_TYPE_B; /* both */ + else + smc_type = SMC_TYPE_R; /* only RDMA */ + } + + /* if neither ISM nor RDMA are supported, fallback */ + if (!rdma_supported && !ism_supported) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); /* perform CLC handshake */ - rc = smc_connect_clc(smc, &aclc, ibdev, ibport); - if (rc) + rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, ismdev); + if (rc) { + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); + } - /* connect using rdma */ - rc = smc_connect_rdma(smc, &aclc, ibdev, ibport); - if (rc) + /* depending on previous steps, connect using rdma or ism */ + if (rdma_supported && aclc.hdr.path == SMC_TYPE_R) + rc = smc_connect_rdma(smc, &aclc, ibdev, ibport); + else if (ism_supported && aclc.hdr.path == SMC_TYPE_D) + rc = smc_connect_ism(smc, &aclc, ismdev); + else + rc = SMC_CLC_DECL_CNFERR; + if (rc) { + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); + } + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return 0; } @@ -953,7 +1084,8 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, int *local_contact) { /* allocate connection / link group */ - *local_contact = smc_conn_create(new_smc, ibdev, ibport, &pclc->lcl, 0); + *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, + &pclc->lcl, NULL, 0); if (*local_contact < 0) { if (*local_contact == -ENOMEM) return SMC_CLC_DECL_MEM;/* insufficient memory*/ @@ -961,12 +1093,50 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, } /* create send buffer and rmb */ - if (smc_buf_create(new_smc)) + if (smc_buf_create(new_smc, false)) return SMC_CLC_DECL_MEM; return 0; } +/* listen worker: initialize connection and buffers for SMC-D */ +static int smc_listen_ism_init(struct smc_sock *new_smc, + struct smc_clc_msg_proposal *pclc, + struct smcd_dev *ismdev, + int *local_contact) +{ + struct smc_clc_msg_smcd *pclc_smcd; + + pclc_smcd = smc_get_clc_msg_smcd(pclc); + *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, + ismdev, pclc_smcd->gid); + if (*local_contact < 0) { + if (*local_contact == -ENOMEM) + return SMC_CLC_DECL_MEM;/* insufficient memory*/ + return SMC_CLC_DECL_INTERR; /* other error */ + } + + /* Check if peer can be reached via ISM device */ + if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid, + new_smc->conn.lgr->vlan_id, + new_smc->conn.lgr->smcd)) { + if (*local_contact == SMC_FIRST_CONTACT) + smc_lgr_forget(new_smc->conn.lgr); + smc_conn_free(&new_smc->conn); + return SMC_CLC_DECL_CNFERR; + } + + /* Create send and receive buffers */ + if (smc_buf_create(new_smc, true)) { + if (*local_contact == SMC_FIRST_CONTACT) + smc_lgr_forget(new_smc->conn.lgr); + smc_conn_free(&new_smc->conn); + return SMC_CLC_DECL_MEM; + } + + return 0; +} + /* listen worker: register buffers */ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact) { @@ -1025,6 +1195,8 @@ static void smc_listen_work(struct work_struct *work) struct smc_clc_msg_accept_confirm cclc; struct smc_clc_msg_proposal *pclc; struct smc_ib_device *ibdev; + bool ism_supported = false; + struct smcd_dev *ismdev; u8 buf[SMC_CLC_MAX_LEN]; int local_contact = 0; int reason_code = 0; @@ -1065,12 +1237,21 @@ static void smc_listen_work(struct work_struct *work) smc_rx_init(new_smc); smc_tx_init(new_smc); + /* check if ISM is available */ + if ((pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) && + !smc_check_ism(new_smc, &ismdev) && + !smc_listen_ism_init(new_smc, pclc, ismdev, &local_contact)) { + ism_supported = true; + } + /* check if RDMA is available */ - if (smc_check_rdma(new_smc, &ibdev, &ibport) || - smc_listen_rdma_check(new_smc, pclc) || - smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, - &local_contact) || - smc_listen_rdma_reg(new_smc, local_contact)) { + if (!ism_supported && + ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) || + smc_check_rdma(new_smc, &ibdev, &ibport) || + smc_listen_rdma_check(new_smc, pclc) || + smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, + &local_contact) || + smc_listen_rdma_reg(new_smc, local_contact))) { /* SMC not supported, decline */ mutex_unlock(&smc_create_lgr_pending); smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact); @@ -1095,7 +1276,8 @@ static void smc_listen_work(struct work_struct *work) } /* finish worker */ - smc_listen_rdma_finish(new_smc, &cclc, local_contact); + if (!ism_supported) + smc_listen_rdma_finish(new_smc, &cclc, local_contact); smc_conn_save_peer_info(new_smc, &cclc); mutex_unlock(&smc_create_lgr_pending); smc_listen_out_connected(new_smc); diff --git a/net/smc/smc.h b/net/smc/smc.h index d7ca26570482..be20acd7b5ab 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -21,8 +21,6 @@ #define SMCPROTO_SMC 0 /* SMC protocol, IPv4 */ #define SMCPROTO_SMC6 1 /* SMC protocol, IPv6 */ -#define SMC_MAX_PORTS 2 /* Max # of ports */ - extern struct proto smc_proto; extern struct proto smc_proto6; @@ -185,6 +183,11 @@ struct smc_connection { spinlock_t acurs_lock; /* protect cursors */ #endif struct work_struct close_work; /* peer sent some closing */ + struct tasklet_struct rx_tsklet; /* Receiver tasklet for SMC-D */ + u8 rx_off; /* receive offset: + * 0 for SMC-R, 32 for SMC-D + */ + u64 peer_token; /* SMC-D token of peer */ }; struct smc_connect_info { diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a7e8d63fc8ae..621d8cca570b 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -117,7 +117,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, return rc; } -int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) +static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) { struct smc_cdc_tx_pend *pend; struct smc_wr_buf *wr_buf; @@ -130,6 +130,21 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) return smc_cdc_msg_send(conn, wr_buf, pend); } +int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) +{ + int rc; + + if (conn->lgr->is_smcd) { + spin_lock_bh(&conn->send_lock); + rc = smcd_cdc_msg_send(conn); + spin_unlock_bh(&conn->send_lock); + } else { + rc = smcr_cdc_get_slot_and_msg_send(conn); + } + + return rc; +} + static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend, unsigned long data) { @@ -157,6 +172,45 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) (unsigned long)conn); } +/* Send a SMC-D CDC header. + * This increments the free space available in our send buffer. + * Also update the confirmed receive buffer with what was sent to the peer. + */ +int smcd_cdc_msg_send(struct smc_connection *conn) +{ + struct smc_sock *smc = container_of(conn, struct smc_sock, conn); + struct smcd_cdc_msg cdc; + int rc, diff; + + memset(&cdc, 0, sizeof(cdc)); + cdc.common.type = SMC_CDC_MSG_TYPE; + cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; + cdc.prod_count = conn->local_tx_ctrl.prod.count; + + cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; + cdc.cons_count = conn->local_tx_ctrl.cons.count; + cdc.prod_flags = conn->local_tx_ctrl.prod_flags; + cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; + rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); + if (rc) + return rc; + smc_curs_write(&conn->rx_curs_confirmed, + smc_curs_read(&conn->local_tx_ctrl.cons, conn), conn); + /* Calculate transmitted data and increment free send buffer space */ + diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, + &conn->tx_curs_sent); + /* increased by confirmed number of bytes */ + smp_mb__before_atomic(); + atomic_add(diff, &conn->sndbuf_space); + /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ + smp_mb__after_atomic(); + smc_curs_write(&conn->tx_curs_fin, + smc_curs_read(&conn->tx_curs_sent, conn), conn); + + smc_tx_sndbuf_nonfull(smc); + return rc; +} + /********************************* receive ***********************************/ static inline bool smc_cdc_before(u16 seq1, u16 seq2) @@ -178,7 +232,7 @@ static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc, if (!sock_flag(&smc->sk, SOCK_URGINLINE)) /* we'll skip the urgent byte, so don't account for it */ (*diff_prod)--; - base = (char *)conn->rmb_desc->cpu_addr; + base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off; if (conn->urg_curs.count) conn->urg_rx_byte = *(base + conn->urg_curs.count - 1); else @@ -276,6 +330,34 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) sock_put(&smc->sk); /* no free sk in softirq-context */ } +/* Schedule a tasklet for this connection. Triggered from the ISM device IRQ + * handler to indicate update in the DMBE. + * + * Context: + * - tasklet context + */ +static void smcd_cdc_rx_tsklet(unsigned long data) +{ + struct smc_connection *conn = (struct smc_connection *)data; + struct smcd_cdc_msg cdc; + struct smc_sock *smc; + + if (!conn) + return; + + memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); + smc = container_of(conn, struct smc_sock, conn); + smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); +} + +/* Initialize receive tasklet. Called from ISM device IRQ handler to start + * receiver side. + */ +void smcd_cdc_rx_init(struct smc_connection *conn) +{ + tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn); +} + /***************************** init, exit, misc ******************************/ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index f60082fee5b8..8fbce4fee3e4 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h @@ -50,6 +50,20 @@ struct smc_cdc_msg { u8 reserved[18]; } __packed; /* format defined in RFC7609 */ +/* CDC message for SMC-D */ +struct smcd_cdc_msg { + struct smc_wr_rx_hdr common; /* Type = 0xFE */ + u8 res1[7]; + u16 prod_wrap; + u32 prod_count; + u8 res2[2]; + u16 cons_wrap; + u32 cons_count; + struct smc_cdc_producer_flags prod_flags; + struct smc_cdc_conn_state_flags conn_state_flags; + u8 res3[8]; +} __packed; + static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) { return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort || @@ -204,9 +218,9 @@ static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local, smc_curs_write(local, smc_curs_read(&temp, conn), conn); } -static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, - struct smc_cdc_msg *peer, - struct smc_connection *conn) +static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local, + struct smc_cdc_msg *peer, + struct smc_connection *conn) { local->common.type = peer->common.type; local->len = peer->len; @@ -218,6 +232,27 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, local->conn_state_flags = peer->conn_state_flags; } +static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, + struct smcd_cdc_msg *peer) +{ + local->prod.wrap = peer->prod_wrap; + local->prod.count = peer->prod_count; + local->cons.wrap = peer->cons_wrap; + local->cons.count = peer->cons_count; + local->prod_flags = peer->prod_flags; + local->conn_state_flags = peer->conn_state_flags; +} + +static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, + struct smc_cdc_msg *peer, + struct smc_connection *conn) +{ + if (conn->lgr->is_smcd) + smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer); + else + smcr_cdc_msg_to_host(local, peer, conn); +} + struct smc_cdc_tx_pend; int smc_cdc_get_free_slot(struct smc_connection *conn, @@ -227,6 +262,8 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, struct smc_cdc_tx_pend *pend); int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn); +int smcd_cdc_msg_send(struct smc_connection *conn); int smc_cdc_init(void) __init; +void smcd_cdc_rx_init(struct smc_connection *conn); #endif /* SMC_CDC_H */ diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index ae5d168653ce..ad39efdb4f1c 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -23,9 +23,15 @@ #include "smc_core.h" #include "smc_clc.h" #include "smc_ib.h" +#include "smc_ism.h" + +#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 +#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48 /* eye catcher "SMCR" EBCDIC for CLC messages */ static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; +/* eye catcher "SMCD" EBCDIC for CLC messages */ +static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'}; /* check if received message has a correct header length and contains valid * heading and trailing eyecatchers @@ -38,10 +44,14 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) struct smc_clc_msg_decline *dclc; struct smc_clc_msg_trail *trl; - if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER))) + if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && + memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; switch (clcm->type) { case SMC_CLC_PROPOSAL: + if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && + clcm->path != SMC_TYPE_B) + return false; pclc = (struct smc_clc_msg_proposal *)clcm; pclc_prfx = smc_clc_proposal_get_prefix(pclc); if (ntohs(pclc->hdr.length) != @@ -56,10 +66,16 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) break; case SMC_CLC_ACCEPT: case SMC_CLC_CONFIRM: + if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D) + return false; clc = (struct smc_clc_msg_accept_confirm *)clcm; - if (ntohs(clc->hdr.length) != sizeof(*clc)) + if ((clcm->path == SMC_TYPE_R && + ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) || + (clcm->path == SMC_TYPE_D && + ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN)) return false; - trl = &clc->trl; + trl = (struct smc_clc_msg_trail *) + ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl)); break; case SMC_CLC_DECLINE: dclc = (struct smc_clc_msg_decline *)clcm; @@ -70,7 +86,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) default: return false; } - if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER))) + if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && + memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; return true; } @@ -296,6 +313,9 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, datlen = ntohs(clcm->length); if ((len < sizeof(struct smc_clc_msg_hdr)) || (datlen > buflen) || + (clcm->version != SMC_CLC_V1) || + (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && + clcm->path != SMC_TYPE_B) || ((clcm->type != SMC_CLC_DECLINE) && (clcm->type != expected_type))) { smc->sk.sk_err = EPROTO; @@ -357,17 +377,18 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) } /* send CLC PROPOSAL message across internal TCP socket */ -int smc_clc_send_proposal(struct smc_sock *smc, - struct smc_ib_device *smcibdev, - u8 ibport) +int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, + struct smc_ib_device *ibdev, u8 ibport, + struct smcd_dev *ismdev) { struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX]; struct smc_clc_msg_proposal_prefix pclc_prfx; + struct smc_clc_msg_smcd pclc_smcd; struct smc_clc_msg_proposal pclc; struct smc_clc_msg_trail trl; int len, i, plen, rc; int reason_code = 0; - struct kvec vec[4]; + struct kvec vec[5]; struct msghdr msg; /* retrieve ip prefixes for CLC proposal msg */ @@ -382,18 +403,34 @@ int smc_clc_send_proposal(struct smc_sock *smc, memset(&pclc, 0, sizeof(pclc)); memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); pclc.hdr.type = SMC_CLC_PROPOSAL; - pclc.hdr.length = htons(plen); pclc.hdr.version = SMC_CLC_V1; /* SMC version */ - memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE); - memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN); - pclc.iparea_offset = htons(0); + pclc.hdr.path = smc_type; + if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) { + /* add SMC-R specifics */ + memcpy(pclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&pclc.lcl.gid, &ibdev->gid[ibport - 1], SMC_GID_SIZE); + memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN); + pclc.iparea_offset = htons(0); + } + if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) { + /* add SMC-D specifics */ + memset(&pclc_smcd, 0, sizeof(pclc_smcd)); + plen += sizeof(pclc_smcd); + pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET); + pclc_smcd.gid = ismdev->local_gid; + } + pclc.hdr.length = htons(plen); memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); memset(&msg, 0, sizeof(msg)); i = 0; vec[i].iov_base = &pclc; vec[i++].iov_len = sizeof(pclc); + if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) { + vec[i].iov_base = &pclc_smcd; + vec[i++].iov_len = sizeof(pclc_smcd); + } vec[i].iov_base = &pclc_prfx; vec[i++].iov_len = sizeof(pclc_prfx); if (pclc_prfx.ipv6_prefixes_cnt > 0) { @@ -429,35 +466,56 @@ int smc_clc_send_confirm(struct smc_sock *smc) struct kvec vec; int len; - link = &conn->lgr->lnk[SMC_SINGLE_LINK]; /* send SMC Confirm CLC msg */ memset(&cclc, 0, sizeof(cclc)); - memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); cclc.hdr.type = SMC_CLC_CONFIRM; - cclc.hdr.length = htons(sizeof(cclc)); cclc.hdr.version = SMC_CLC_V1; /* SMC version */ - memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); - memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); - hton24(cclc.qpn, link->roce_qp->qp_num); - cclc.rmb_rkey = - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); - cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ - cclc.rmbe_alert_token = htonl(conn->alert_token_local); - cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); - cclc.rmbe_size = conn->rmbe_size_short; - cclc.rmb_dma_addr = cpu_to_be64( - (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); - hton24(cclc.psn, link->psn_initial); - - memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + if (smc->conn.lgr->is_smcd) { + /* SMC-D specific settings */ + memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + cclc.hdr.path = SMC_TYPE_D; + cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); + cclc.gid = conn->lgr->smcd->local_gid; + cclc.token = conn->rmb_desc->token; + cclc.dmbe_size = conn->rmbe_size_short; + cclc.dmbe_idx = 0; + memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); + memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + } else { + /* SMC-R specific settings */ + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + cclc.hdr.path = SMC_TYPE_R; + cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); + memcpy(cclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], + ETH_ALEN); + hton24(cclc.qpn, link->roce_qp->qp_num); + cclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); + cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ + cclc.rmbe_alert_token = htonl(conn->alert_token_local); + cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); + cclc.rmbe_size = conn->rmbe_size_short; + cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address + (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); + hton24(cclc.psn, link->psn_initial); + memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + } memset(&msg, 0, sizeof(msg)); vec.iov_base = &cclc; - vec.iov_len = sizeof(cclc); - len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc)); - if (len < sizeof(cclc)) { + vec.iov_len = ntohs(cclc.hdr.length); + len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, + ntohs(cclc.hdr.length)); + if (len < ntohs(cclc.hdr.length)) { if (len >= 0) { reason_code = -ENETUNREACH; smc->sk.sk_err = -reason_code; @@ -480,35 +538,58 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) int rc = 0; int len; - link = &conn->lgr->lnk[SMC_SINGLE_LINK]; memset(&aclc, 0, sizeof(aclc)); - memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); aclc.hdr.type = SMC_CLC_ACCEPT; - aclc.hdr.length = htons(sizeof(aclc)); aclc.hdr.version = SMC_CLC_V1; /* SMC version */ if (srv_first_contact) aclc.hdr.flag = 1; - memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); - memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); - hton24(aclc.qpn, link->roce_qp->qp_num); - aclc.rmb_rkey = - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); - aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */ - aclc.rmbe_alert_token = htonl(conn->alert_token_local); - aclc.qp_mtu = link->path_mtu; - aclc.rmbe_size = conn->rmbe_size_short, - aclc.rmb_dma_addr = cpu_to_be64( - (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); - hton24(aclc.psn, link->psn_initial); - memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + + if (new_smc->conn.lgr->is_smcd) { + /* SMC-D specific settings */ + aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); + memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + aclc.hdr.path = SMC_TYPE_D; + aclc.gid = conn->lgr->smcd->local_gid; + aclc.token = conn->rmb_desc->token; + aclc.dmbe_size = conn->rmbe_size_short; + aclc.dmbe_idx = 0; + memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); + memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + } else { + /* SMC-R specific settings */ + aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); + memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + aclc.hdr.path = SMC_TYPE_R; + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + memcpy(aclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], + ETH_ALEN); + hton24(aclc.qpn, link->roce_qp->qp_num); + aclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); + aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */ + aclc.rmbe_alert_token = htonl(conn->alert_token_local); + aclc.qp_mtu = link->path_mtu; + aclc.rmbe_size = conn->rmbe_size_short, + aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address + (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); + hton24(aclc.psn, link->psn_initial); + memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + } memset(&msg, 0, sizeof(msg)); vec.iov_base = &aclc; - vec.iov_len = sizeof(aclc); - len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc)); - if (len < sizeof(aclc)) { + vec.iov_len = ntohs(aclc.hdr.length); + len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, + ntohs(aclc.hdr.length)); + if (len < ntohs(aclc.hdr.length)) { if (len >= 0) new_smc->sk.sk_err = EPROTO; else diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 41ff9ea96139..100e988ad1a8 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -23,6 +23,9 @@ #define SMC_CLC_DECLINE 0x04 #define SMC_CLC_V1 0x1 /* SMC version */ +#define SMC_TYPE_R 0 /* SMC-R only */ +#define SMC_TYPE_D 1 /* SMC-D only */ +#define SMC_TYPE_B 3 /* SMC-R and SMC-D */ #define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */ #define SMC_CLC_DECL_MEM 0x01010000 /* insufficient memory resources */ #define SMC_CLC_DECL_TIMEOUT 0x02000000 /* timeout */ @@ -42,9 +45,11 @@ struct smc_clc_msg_hdr { /* header1 of clc messages */ #if defined(__BIG_ENDIAN_BITFIELD) u8 version : 4, flag : 1, - rsvd : 3; + rsvd : 1, + path : 2; #elif defined(__LITTLE_ENDIAN_BITFIELD) - u8 rsvd : 3, + u8 path : 2, + rsvd : 1, flag : 1, version : 4; #endif @@ -77,6 +82,11 @@ struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/ u8 ipv6_prefixes_cnt; /* number of IPv6 prefixes in prefix array */ } __aligned(4); +struct smc_clc_msg_smcd { /* SMC-D GID information */ + u64 gid; /* ISM GID of requestor */ + u8 res[32]; +}; + struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */ struct smc_clc_msg_hdr hdr; struct smc_clc_msg_local lcl; @@ -94,23 +104,45 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */ struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */ struct smc_clc_msg_hdr hdr; - struct smc_clc_msg_local lcl; - u8 qpn[3]; /* QP number */ - __be32 rmb_rkey; /* RMB rkey */ - u8 rmbe_idx; /* Index of RMBE in RMB */ - __be32 rmbe_alert_token;/* unique connection id */ + union { + struct { /* SMC-R */ + struct smc_clc_msg_local lcl; + u8 qpn[3]; /* QP number */ + __be32 rmb_rkey; /* RMB rkey */ + u8 rmbe_idx; /* Index of RMBE in RMB */ + __be32 rmbe_alert_token;/* unique connection id */ #if defined(__BIG_ENDIAN_BITFIELD) - u8 rmbe_size : 4, /* RMBE buf size (compressed notation) */ - qp_mtu : 4; /* QP mtu */ + u8 rmbe_size : 4, /* buf size (compressed) */ + qp_mtu : 4; /* QP mtu */ #elif defined(__LITTLE_ENDIAN_BITFIELD) - u8 qp_mtu : 4, - rmbe_size : 4; + u8 qp_mtu : 4, + rmbe_size : 4; #endif - u8 reserved; - __be64 rmb_dma_addr; /* RMB virtual address */ - u8 reserved2; - u8 psn[3]; /* initial packet sequence number */ - struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */ + u8 reserved; + __be64 rmb_dma_addr; /* RMB virtual address */ + u8 reserved2; + u8 psn[3]; /* packet sequence number */ + struct smc_clc_msg_trail smcr_trl; + /* eye catcher "SMCR" EBCDIC */ + } __packed; + struct { /* SMC-D */ + u64 gid; /* Sender GID */ + u64 token; /* DMB token */ + u8 dmbe_idx; /* DMBE index */ +#if defined(__BIG_ENDIAN_BITFIELD) + u8 dmbe_size : 4, /* buf size (compressed) */ + reserved3 : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved3 : 4, + dmbe_size : 4; +#endif + u16 reserved4; + u32 linkid; /* Link identifier */ + u32 reserved5[3]; + struct smc_clc_msg_trail smcd_trl; + /* eye catcher "SMCD" EBCDIC */ + } __packed; + }; } __packed; /* format defined in RFC7609 */ struct smc_clc_msg_decline { /* clc decline message */ @@ -129,13 +161,26 @@ smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc) ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset)); } +/* get SMC-D info from proposal message */ +static inline struct smc_clc_msg_smcd * +smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop) +{ + if (ntohs(prop->iparea_offset) != sizeof(struct smc_clc_msg_smcd)) + return NULL; + + return (struct smc_clc_msg_smcd *)(prop + 1); +} + +struct smcd_dev; + int smc_clc_prfx_match(struct socket *clcsock, struct smc_clc_msg_proposal_prefix *prop); int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, u8 expected_type); int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info); -int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, - u8 ibport); +int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, + struct smc_ib_device *smcibdev, u8 ibport, + struct smcd_dev *ismdev); int smc_clc_send_confirm(struct smc_sock *smc); int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index add82b0266f3..66741e61a3b0 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -25,6 +25,7 @@ #include "smc_llc.h" #include "smc_cdc.h" #include "smc_close.h" +#include "smc_ism.h" #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) @@ -46,8 +47,8 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) * otherwise there is a risk of out-of-sync link groups. */ mod_delayed_work(system_wq, &lgr->free_work, - lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT : - SMC_LGR_FREE_DELAY_SERV); + (!lgr->is_smcd && lgr->role == SMC_CLNT) ? + SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV); } /* Register connection's alert token in our lookup structure. @@ -153,16 +154,18 @@ static void smc_lgr_free_work(struct work_struct *work) free: spin_unlock_bh(&smc_lgr_list.lock); if (!delayed_work_pending(&lgr->free_work)) { - if (lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE) + if (!lgr->is_smcd && + lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE) smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); smc_lgr_free(lgr); } } /* create a new SMC link group */ -static int smc_lgr_create(struct smc_sock *smc, +static int smc_lgr_create(struct smc_sock *smc, bool is_smcd, struct smc_ib_device *smcibdev, u8 ibport, - char *peer_systemid, unsigned short vlan_id) + char *peer_systemid, unsigned short vlan_id, + struct smcd_dev *smcismdev, u64 peer_gid) { struct smc_link_group *lgr; struct smc_link *lnk; @@ -170,17 +173,23 @@ static int smc_lgr_create(struct smc_sock *smc, int rc = 0; int i; + if (is_smcd && vlan_id) { + rc = smc_ism_get_vlan(smcismdev, vlan_id); + if (rc) + goto out; + } + lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); if (!lgr) { rc = -ENOMEM; goto out; } - lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; + lgr->is_smcd = is_smcd; lgr->sync_err = 0; - memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); lgr->vlan_id = vlan_id; rwlock_init(&lgr->sndbufs_lock); rwlock_init(&lgr->rmbs_lock); + rwlock_init(&lgr->conns_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { INIT_LIST_HEAD(&lgr->sndbufs[i]); INIT_LIST_HEAD(&lgr->rmbs[i]); @@ -189,36 +198,44 @@ static int smc_lgr_create(struct smc_sock *smc, memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE); INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); lgr->conns_all = RB_ROOT; - - lnk = &lgr->lnk[SMC_SINGLE_LINK]; - /* initialize link */ - lnk->state = SMC_LNK_ACTIVATING; - lnk->link_id = SMC_SINGLE_LINK; - lnk->smcibdev = smcibdev; - lnk->ibport = ibport; - lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; - if (!smcibdev->initialized) - smc_ib_setup_per_ibdev(smcibdev); - get_random_bytes(rndvec, sizeof(rndvec)); - lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); - rc = smc_llc_link_init(lnk); - if (rc) - goto free_lgr; - rc = smc_wr_alloc_link_mem(lnk); - if (rc) - goto clear_llc_lnk; - rc = smc_ib_create_protection_domain(lnk); - if (rc) - goto free_link_mem; - rc = smc_ib_create_queue_pair(lnk); - if (rc) - goto dealloc_pd; - rc = smc_wr_create_link(lnk); - if (rc) - goto destroy_qp; - + if (is_smcd) { + /* SMC-D specific settings */ + lgr->peer_gid = peer_gid; + lgr->smcd = smcismdev; + } else { + /* SMC-R specific settings */ + lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; + memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); + + lnk = &lgr->lnk[SMC_SINGLE_LINK]; + /* initialize link */ + lnk->state = SMC_LNK_ACTIVATING; + lnk->link_id = SMC_SINGLE_LINK; + lnk->smcibdev = smcibdev; + lnk->ibport = ibport; + lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; + if (!smcibdev->initialized) + smc_ib_setup_per_ibdev(smcibdev); + get_random_bytes(rndvec, sizeof(rndvec)); + lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + + (rndvec[2] << 16); + rc = smc_llc_link_init(lnk); + if (rc) + goto free_lgr; + rc = smc_wr_alloc_link_mem(lnk); + if (rc) + goto clear_llc_lnk; + rc = smc_ib_create_protection_domain(lnk); + if (rc) + goto free_link_mem; + rc = smc_ib_create_queue_pair(lnk); + if (rc) + goto dealloc_pd; + rc = smc_wr_create_link(lnk); + if (rc) + goto destroy_qp; + } smc->conn.lgr = lgr; - rwlock_init(&lgr->conns_lock); spin_lock_bh(&smc_lgr_list.lock); list_add(&lgr->list, &smc_lgr_list.list); spin_unlock_bh(&smc_lgr_list.lock); @@ -264,7 +281,12 @@ void smc_conn_free(struct smc_connection *conn) { if (!conn->lgr) return; - smc_cdc_tx_dismiss_slots(conn); + if (conn->lgr->is_smcd) { + smc_ism_unset_conn(conn); + tasklet_kill(&conn->rx_tsklet); + } else { + smc_cdc_tx_dismiss_slots(conn); + } smc_lgr_unregister_conn(conn); smc_buf_unuse(conn); } @@ -280,8 +302,8 @@ static void smc_link_clear(struct smc_link *lnk) smc_wr_free_link_mem(lnk); } -static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, - struct smc_buf_desc *buf_desc) +static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb, + struct smc_buf_desc *buf_desc) { struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; @@ -301,6 +323,28 @@ static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, kfree(buf_desc); } +static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb, + struct smc_buf_desc *buf_desc) +{ + if (is_dmb) { + /* restore original buf len */ + buf_desc->len += sizeof(struct smcd_cdc_msg); + smc_ism_unregister_dmb(lgr->smcd, buf_desc); + } else { + kfree(buf_desc->cpu_addr); + } + kfree(buf_desc); +} + +static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, + struct smc_buf_desc *buf_desc) +{ + if (lgr->is_smcd) + smcd_buf_free(lgr, is_rmb, buf_desc); + else + smcr_buf_free(lgr, is_rmb, buf_desc); +} + static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb) { struct smc_buf_desc *buf_desc, *bf_desc; @@ -332,7 +376,10 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr) void smc_lgr_free(struct smc_link_group *lgr) { smc_lgr_free_bufs(lgr); - smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); + if (lgr->is_smcd) + smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); + else + smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); kfree(lgr); } @@ -357,7 +404,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr) lgr->terminating = 1; if (!list_empty(&lgr->list)) /* forget lgr */ list_del_init(&lgr->list); - smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); + if (!lgr->is_smcd) + smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); write_lock_bh(&lgr->conns_lock); node = rb_first(&lgr->conns_all); @@ -374,7 +422,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr) node = rb_first(&lgr->conns_all); } write_unlock_bh(&lgr->conns_lock); - wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait); + if (!lgr->is_smcd) + wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait); smc_lgr_schedule_free_work(lgr); } @@ -392,17 +441,44 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport) spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { - if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev && + if (!lgr->is_smcd && + lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev && lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) __smc_lgr_terminate(lgr); } spin_unlock_bh(&smc_lgr_list.lock); } +/* Called when SMC-D device is terminated or peer is lost */ +void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) +{ + struct smc_link_group *lgr, *l; + LIST_HEAD(lgr_free_list); + + /* run common cleanup function and build free list */ + spin_lock_bh(&smc_lgr_list.lock); + list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { + if (lgr->is_smcd && lgr->smcd == dev && + (!peer_gid || lgr->peer_gid == peer_gid) && + !list_empty(&lgr->list)) { + __smc_lgr_terminate(lgr); + list_move(&lgr->list, &lgr_free_list); + } + } + spin_unlock_bh(&smc_lgr_list.lock); + + /* cancel the regular free workers and actually free lgrs */ + list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { + list_del_init(&lgr->list); + cancel_delayed_work_sync(&lgr->free_work); + smc_lgr_free(lgr); + } +} + /* Determine vlan of internal TCP socket. * @vlan_id: address to store the determined vlan id into */ -static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id) +int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id) { struct dst_entry *dst = sk_dst_get(clcsock->sk); struct net_device *ndev; @@ -477,10 +553,30 @@ static int smc_link_determine_gid(struct smc_link_group *lgr) return -ENODEV; } +static bool smcr_lgr_match(struct smc_link_group *lgr, + struct smc_clc_msg_local *lcl, + enum smc_lgr_role role) +{ + return !memcmp(lgr->peer_systemid, lcl->id_for_peer, + SMC_SYSTEMID_LEN) && + !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid, + SMC_GID_SIZE) && + !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, + sizeof(lcl->mac)) && + lgr->role == role; +} + +static bool smcd_lgr_match(struct smc_link_group *lgr, + struct smcd_dev *smcismdev, u64 peer_gid) +{ + return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev; +} + /* create a new SMC connection (and a new link group if necessary) */ -int smc_conn_create(struct smc_sock *smc, +int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, struct smc_ib_device *smcibdev, u8 ibport, - struct smc_clc_msg_local *lcl, int srv_first_contact) + struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, + u64 peer_gid) { struct smc_connection *conn = &smc->conn; int local_contact = SMC_FIRST_CONTACT; @@ -502,17 +598,12 @@ int smc_conn_create(struct smc_sock *smc, spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry(lgr, &smc_lgr_list.list, list) { write_lock_bh(&lgr->conns_lock); - if (!memcmp(lgr->peer_systemid, lcl->id_for_peer, - SMC_SYSTEMID_LEN) && - !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid, - SMC_GID_SIZE) && - !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, - sizeof(lcl->mac)) && + if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : + smcr_lgr_match(lgr, lcl, role)) && !lgr->sync_err && - (lgr->role == role) && - (lgr->vlan_id == vlan_id) && - ((role == SMC_CLNT) || - (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) { + lgr->vlan_id == vlan_id && + (role == SMC_CLNT || + lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) { /* link group found */ local_contact = SMC_REUSE_CONTACT; conn->lgr = lgr; @@ -535,16 +626,21 @@ int smc_conn_create(struct smc_sock *smc, create: if (local_contact == SMC_FIRST_CONTACT) { - rc = smc_lgr_create(smc, smcibdev, ibport, - lcl->id_for_peer, vlan_id); + rc = smc_lgr_create(smc, is_smcd, smcibdev, ibport, + lcl->id_for_peer, vlan_id, smcd, peer_gid); if (rc) goto out; smc_lgr_register_conn(conn); /* add smc conn to lgr */ - rc = smc_link_determine_gid(conn->lgr); + if (!is_smcd) + rc = smc_link_determine_gid(conn->lgr); } conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; conn->urg_state = SMC_URG_READ; + if (is_smcd) { + conn->rx_off = sizeof(struct smcd_cdc_msg); + smcd_cdc_rx_init(conn); /* init tasklet for this conn */ + } #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&conn->acurs_lock); #endif @@ -609,8 +705,8 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size) return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } -static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr, - bool is_rmb, int bufsize) +static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, + bool is_rmb, int bufsize) { struct smc_buf_desc *buf_desc; struct smc_link *lnk; @@ -668,7 +764,44 @@ static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr, return buf_desc; } -static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) +#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ + +static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, + bool is_dmb, int bufsize) +{ + struct smc_buf_desc *buf_desc; + int rc; + + if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES) + return ERR_PTR(-EAGAIN); + + /* try to alloc a new DMB */ + buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); + if (!buf_desc) + return ERR_PTR(-ENOMEM); + if (is_dmb) { + rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); + if (rc) { + kfree(buf_desc); + return ERR_PTR(-EAGAIN); + } + buf_desc->pages = virt_to_page(buf_desc->cpu_addr); + /* CDC header stored in buf. So, pretend it was smaller */ + buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg); + } else { + buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | + __GFP_NOWARN | __GFP_NORETRY | + __GFP_NOMEMALLOC); + if (!buf_desc->cpu_addr) { + kfree(buf_desc); + return ERR_PTR(-EAGAIN); + } + buf_desc->len = bufsize; + } + return buf_desc; +} + +static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) { struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM); struct smc_connection *conn = &smc->conn; @@ -706,7 +839,11 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) break; /* found reusable slot */ } - buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize); + if (is_smcd) + buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize); + else + buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize); + if (PTR_ERR(buf_desc) == -ENOMEM) break; if (IS_ERR(buf_desc)) @@ -727,7 +864,10 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) conn->rmbe_size_short = bufsize_short; smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); - conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize); + conn->rmbe_update_limit = + smc_rmb_wnd_update_limit(buf_desc->len); + if (is_smcd) + smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */ } else { conn->sndbuf_desc = buf_desc; smc->sk.sk_sndbuf = bufsize * 2; @@ -740,6 +880,8 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->sndbuf_desc, DMA_TO_DEVICE); } @@ -748,6 +890,8 @@ void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->sndbuf_desc, DMA_TO_DEVICE); } @@ -756,6 +900,8 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->rmb_desc, DMA_FROM_DEVICE); } @@ -764,6 +910,8 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->rmb_desc, DMA_FROM_DEVICE); } @@ -774,16 +922,16 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn) * the Linux implementation uses just one RMB-element per RMB, i.e. uses an * extra RMB for every connection in a link group */ -int smc_buf_create(struct smc_sock *smc) +int smc_buf_create(struct smc_sock *smc, bool is_smcd) { int rc; /* create send buffer */ - rc = __smc_buf_create(smc, false); + rc = __smc_buf_create(smc, is_smcd, false); if (rc) return rc; /* create rmb */ - rc = __smc_buf_create(smc, true); + rc = __smc_buf_create(smc, is_smcd, true); if (rc) smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); return rc; @@ -865,7 +1013,8 @@ void smc_core_exit(void) spin_unlock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) { list_del_init(&lgr->list); - smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); + if (!lgr->is_smcd) + smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); cancel_delayed_work_sync(&lgr->free_work); smc_lgr_free(lgr); /* free link group */ } diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 93cb3523bf50..8b47e0168fc3 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -124,15 +124,28 @@ struct smc_buf_desc { void *cpu_addr; /* virtual address of buffer */ struct page *pages; int len; /* length of buffer */ - struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ - struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; - /* for rmb only: memory region - * incl. rkey provided to peer - */ - u32 order; /* allocation order */ u32 used; /* currently used / unused */ u8 reused : 1; /* new created / reused */ u8 regerr : 1; /* err during registration */ + union { + struct { /* SMC-R */ + struct sg_table sgt[SMC_LINKS_PER_LGR_MAX]; + /* virtual buffer */ + struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; + /* for rmb only: memory region + * incl. rkey provided to peer + */ + u32 order; /* allocation order */ + }; + struct { /* SMC-D */ + unsigned short sba_idx; + /* SBA index number */ + u64 token; + /* DMB token number */ + dma_addr_t dma_addr; + /* DMA address */ + }; + }; }; struct smc_rtoken { /* address/key of remote RMB */ @@ -148,12 +161,10 @@ struct smc_rtoken { /* address/key of remote RMB */ * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15) */ +struct smcd_dev; + struct smc_link_group { struct list_head list; - enum smc_lgr_role role; /* client or server */ - struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; /* smc link */ - char peer_systemid[SMC_SYSTEMID_LEN]; - /* unique system_id of peer */ struct rb_root conns_all; /* connection tree */ rwlock_t conns_lock; /* protects conns_all */ unsigned int conns_num; /* current # of connections */ @@ -163,17 +174,35 @@ struct smc_link_group { rwlock_t sndbufs_lock; /* protects tx buffers */ struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */ rwlock_t rmbs_lock; /* protects rx buffers */ - struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] - [SMC_LINKS_PER_LGR_MAX]; - /* remote addr/key pairs */ - unsigned long rtokens_used_mask[BITS_TO_LONGS( - SMC_RMBS_PER_LGR_MAX)]; - /* used rtoken elements */ u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */ struct delayed_work free_work; /* delayed freeing of an lgr */ u8 sync_err : 1; /* lgr no longer fits to peer */ u8 terminating : 1;/* lgr is terminating */ + + bool is_smcd; /* SMC-R or SMC-D */ + union { + struct { /* SMC-R */ + enum smc_lgr_role role; + /* client or server */ + struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; + /* smc link */ + char peer_systemid[SMC_SYSTEMID_LEN]; + /* unique system_id of peer */ + struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] + [SMC_LINKS_PER_LGR_MAX]; + /* remote addr/key pairs */ + unsigned long rtokens_used_mask[BITS_TO_LONGS + (SMC_RMBS_PER_LGR_MAX)]; + /* used rtoken elements */ + }; + struct { /* SMC-D */ + u64 peer_gid; + /* Peer GID (remote) */ + struct smcd_dev *smcd; + /* ISM device for VLAN reg. */ + }; + }; }; /* Find the connection associated with the given alert token in the link group. @@ -217,7 +246,8 @@ void smc_lgr_free(struct smc_link_group *lgr); void smc_lgr_forget(struct smc_link_group *lgr); void smc_lgr_terminate(struct smc_link_group *lgr); void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); -int smc_buf_create(struct smc_sock *smc); +void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); +int smc_buf_create(struct smc_sock *smc, bool is_smcd); int smc_uncompress_bufsize(u8 compressed); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_clc_msg_accept_confirm *clc); @@ -227,9 +257,13 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn); void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); void smc_rmb_sync_sg_for_device(struct smc_connection *conn); +int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id); + void smc_conn_free(struct smc_connection *conn); -int smc_conn_create(struct smc_sock *smc, +int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, struct smc_ib_device *smcibdev, u8 ibport, - struct smc_clc_msg_local *lcl, int srv_first_contact); + struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, + u64 peer_gid); +void smcd_conn_free(struct smc_connection *conn); void smc_core_exit(void); #endif diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 839354402215..6d83eef1b743 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -136,7 +136,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, goto errout; } - if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr && + if (smc->conn.lgr && !smc->conn.lgr->is_smcd && + (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_diag_lgrinfo linfo = { .role = smc->conn.lgr->role, @@ -155,6 +156,21 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0) goto errout; } + if (smc->conn.lgr && smc->conn.lgr->is_smcd && + (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && + !list_empty(&smc->conn.lgr->list)) { + struct smc_connection *conn = &smc->conn; + struct smcd_diag_dmbinfo dinfo = { + .linkid = *((u32 *)conn->lgr->id), + .peer_gid = conn->lgr->peer_gid, + .my_gid = conn->lgr->smcd->local_gid, + .token = conn->rmb_desc->token, + .peer_token = conn->peer_token + }; + + if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) + goto errout; + } nlmsg_end(skb, nlh); return 0; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 0eed7ab9f28b..36de2fd76170 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -143,6 +143,62 @@ out: return rc; } +static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) +{ + struct ib_gid_attr gattr; + int rc; + + rc = ib_query_gid(smcibdev->ibdev, ibport, 0, + &smcibdev->gid[ibport - 1], &gattr); + if (rc || !gattr.ndev) + return -ENODEV; + + memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN); + dev_put(gattr.ndev); + return 0; +} + +/* Create an identifier unique for this instance of SMC-R. + * The MAC-address of the first active registered IB device + * plus a random 2-byte number is used to create this identifier. + * This name is delivered to the peer during connection initialization. + */ +static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, + u8 ibport) +{ + memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], + sizeof(smcibdev->mac[ibport - 1])); + get_random_bytes(&local_systemid[0], 2); +} + +bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) +{ + return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; +} + +static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) +{ + int rc; + + memset(&smcibdev->pattr[ibport - 1], 0, + sizeof(smcibdev->pattr[ibport - 1])); + rc = ib_query_port(smcibdev->ibdev, ibport, + &smcibdev->pattr[ibport - 1]); + if (rc) + goto out; + /* the SMC protocol requires specification of the RoCE MAC address */ + rc = smc_ib_fill_gid_and_mac(smcibdev, ibport); + if (rc) + goto out; + if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, + sizeof(local_systemid)) && + smc_ib_port_active(smcibdev, ibport)) + /* create unique system identifier */ + smc_ib_define_local_systemid(smcibdev, ibport); +out: + return rc; +} + /* process context wrapper for might_sleep smc_ib_remember_port_attr */ static void smc_ib_port_event_work(struct work_struct *work) { @@ -370,62 +426,6 @@ void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev, buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0; } -static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) -{ - struct ib_gid_attr gattr; - int rc; - - rc = ib_query_gid(smcibdev->ibdev, ibport, 0, - &smcibdev->gid[ibport - 1], &gattr); - if (rc || !gattr.ndev) - return -ENODEV; - - memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN); - dev_put(gattr.ndev); - return 0; -} - -/* Create an identifier unique for this instance of SMC-R. - * The MAC-address of the first active registered IB device - * plus a random 2-byte number is used to create this identifier. - * This name is delivered to the peer during connection initialization. - */ -static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, - u8 ibport) -{ - memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], - sizeof(smcibdev->mac[ibport - 1])); - get_random_bytes(&local_systemid[0], 2); -} - -bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) -{ - return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; -} - -int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) -{ - int rc; - - memset(&smcibdev->pattr[ibport - 1], 0, - sizeof(smcibdev->pattr[ibport - 1])); - rc = ib_query_port(smcibdev->ibdev, ibport, - &smcibdev->pattr[ibport - 1]); - if (rc) - goto out; - /* the SMC protocol requires specification of the RoCE MAC address */ - rc = smc_ib_fill_gid_and_mac(smcibdev, ibport); - if (rc) - goto out; - if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, - sizeof(local_systemid)) && - smc_ib_port_active(smcibdev, ibport)) - /* create unique system identifier */ - smc_ib_define_local_systemid(smcibdev, ibport); -out: - return rc; -} - long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) { struct ib_cq_init_attr cqattr = { @@ -454,9 +454,6 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) smcibdev->roce_cq_recv = NULL; goto err; } - INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, - smc_ib_global_event_handler); - ib_register_event_handler(&smcibdev->event_handler); smc_wr_add_dev(smcibdev); smcibdev->initialized = 1; return rc; @@ -472,7 +469,6 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) return; smcibdev->initialized = 0; smc_wr_remove_dev(smcibdev); - ib_unregister_event_handler(&smcibdev->event_handler); ib_destroy_cq(smcibdev->roce_cq_recv); ib_destroy_cq(smcibdev->roce_cq_send); } @@ -483,6 +479,8 @@ static struct ib_client smc_ib_client; static void smc_ib_add_dev(struct ib_device *ibdev) { struct smc_ib_device *smcibdev; + u8 port_cnt; + int i; if (ibdev->node_type != RDMA_NODE_IB_CA) return; @@ -498,6 +496,21 @@ static void smc_ib_add_dev(struct ib_device *ibdev) list_add_tail(&smcibdev->list, &smc_ib_devices.list); spin_unlock(&smc_ib_devices.lock); ib_set_client_data(ibdev, &smc_ib_client, smcibdev); + INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, + smc_ib_global_event_handler); + ib_register_event_handler(&smcibdev->event_handler); + + /* trigger reading of the port attributes */ + port_cnt = smcibdev->ibdev->phys_port_cnt; + for (i = 0; + i < min_t(size_t, port_cnt, SMC_MAX_PORTS); + i++) { + set_bit(i, &smcibdev->port_event_mask); + /* determine pnetids of the port */ + smc_pnetid_by_dev_port(ibdev->dev.parent, i, + smcibdev->pnetid[i]); + } + schedule_work(&smcibdev->port_event_work); } /* callback function for ib_register_client() */ @@ -512,6 +525,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) spin_unlock(&smc_ib_devices.lock); smc_pnet_remove_by_ibdev(smcibdev); smc_ib_cleanup_per_ibdev(smcibdev); + ib_unregister_event_handler(&smcibdev->event_handler); kfree(smcibdev); } diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index e90630dadf8e..7c1223c91229 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -15,6 +15,7 @@ #include <linux/interrupt.h> #include <linux/if_ether.h> #include <rdma/ib_verbs.h> +#include <net/smc.h> #define SMC_MAX_PORTS 2 /* Max # of ports */ #define SMC_GID_SIZE sizeof(union ib_gid) @@ -40,6 +41,8 @@ struct smc_ib_device { /* ib-device infos for smc */ char mac[SMC_MAX_PORTS][ETH_ALEN]; /* mac address per port*/ union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */ + u8 pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN]; + /* pnetid per port */ u8 initialized : 1; /* ib dev CQ, evthdl done */ struct work_struct port_event_work; unsigned long port_event_mask; @@ -51,7 +54,6 @@ struct smc_link; int smc_ib_register_client(void) __init; void smc_ib_unregister_client(void); bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport); -int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c new file mode 100644 index 000000000000..cfade7fdcc6d --- /dev/null +++ b/net/smc/smc_ism.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Shared Memory Communications Direct over ISM devices (SMC-D) + * + * Functions for ISM device. + * + * Copyright IBM Corp. 2018 + */ + +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <asm/page.h> + +#include "smc.h" +#include "smc_core.h" +#include "smc_ism.h" +#include "smc_pnet.h" + +struct smcd_dev_list smcd_dev_list = { + .list = LIST_HEAD_INIT(smcd_dev_list.list), + .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock) +}; + +/* Test if an ISM communication is possible. */ +int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd) +{ + return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0, + vlan_id); +} + +int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos, + void *data, size_t len) +{ + int rc; + + rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal, + pos->offset, data, len); + + return rc < 0 ? rc : 0; +} + +/* Set a connection using this DMBE. */ +void smc_ism_set_conn(struct smc_connection *conn) +{ + unsigned long flags; + + spin_lock_irqsave(&conn->lgr->smcd->lock, flags); + conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; + spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); +} + +/* Unset a connection using this DMBE. */ +void smc_ism_unset_conn(struct smc_connection *conn) +{ + unsigned long flags; + + if (!conn->rmb_desc) + return; + + spin_lock_irqsave(&conn->lgr->smcd->lock, flags); + conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; + spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); +} + +/* Register a VLAN identifier with the ISM device. Use a reference count + * and add a VLAN identifier only when the first DMB using this VLAN is + * registered. + */ +int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid) +{ + struct smc_ism_vlanid *new_vlan, *vlan; + unsigned long flags; + int rc = 0; + + if (!vlanid) /* No valid vlan id */ + return -EINVAL; + + /* create new vlan entry, in case we need it */ + new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL); + if (!new_vlan) + return -ENOMEM; + new_vlan->vlanid = vlanid; + refcount_set(&new_vlan->refcnt, 1); + + /* if there is an existing entry, increase count and return */ + spin_lock_irqsave(&smcd->lock, flags); + list_for_each_entry(vlan, &smcd->vlan, list) { + if (vlan->vlanid == vlanid) { + refcount_inc(&vlan->refcnt); + kfree(new_vlan); + goto out; + } + } + + /* no existing entry found. + * add new entry to device; might fail, e.g., if HW limit reached + */ + if (smcd->ops->add_vlan_id(smcd, vlanid)) { + kfree(new_vlan); + rc = -EIO; + goto out; + } + list_add_tail(&new_vlan->list, &smcd->vlan); +out: + spin_unlock_irqrestore(&smcd->lock, flags); + return rc; +} + +/* Unregister a VLAN identifier with the ISM device. Use a reference count + * and remove a VLAN identifier only when the last DMB using this VLAN is + * unregistered. + */ +int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid) +{ + struct smc_ism_vlanid *vlan; + unsigned long flags; + bool found = false; + int rc = 0; + + if (!vlanid) /* No valid vlan id */ + return -EINVAL; + + spin_lock_irqsave(&smcd->lock, flags); + list_for_each_entry(vlan, &smcd->vlan, list) { + if (vlan->vlanid == vlanid) { + if (!refcount_dec_and_test(&vlan->refcnt)) + goto out; + found = true; + break; + } + } + if (!found) { + rc = -ENOENT; + goto out; /* VLAN id not in table */ + } + + /* Found and the last reference just gone */ + if (smcd->ops->del_vlan_id(smcd, vlanid)) + rc = -EIO; + list_del(&vlan->list); + kfree(vlan); +out: + spin_unlock_irqrestore(&smcd->lock, flags); + return rc; +} + +int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) +{ + struct smcd_dmb dmb; + + memset(&dmb, 0, sizeof(dmb)); + dmb.dmb_tok = dmb_desc->token; + dmb.sba_idx = dmb_desc->sba_idx; + dmb.cpu_addr = dmb_desc->cpu_addr; + dmb.dma_addr = dmb_desc->dma_addr; + dmb.dmb_len = dmb_desc->len; + return smcd->ops->unregister_dmb(smcd, &dmb); +} + +int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, + struct smc_buf_desc *dmb_desc) +{ + struct smcd_dmb dmb; + int rc; + + memset(&dmb, 0, sizeof(dmb)); + dmb.dmb_len = dmb_len; + dmb.sba_idx = dmb_desc->sba_idx; + dmb.vlan_id = lgr->vlan_id; + dmb.rgid = lgr->peer_gid; + rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb); + if (!rc) { + dmb_desc->sba_idx = dmb.sba_idx; + dmb_desc->token = dmb.dmb_tok; + dmb_desc->cpu_addr = dmb.cpu_addr; + dmb_desc->dma_addr = dmb.dma_addr; + dmb_desc->len = dmb.dmb_len; + } + return rc; +} + +struct smc_ism_event_work { + struct work_struct work; + struct smcd_dev *smcd; + struct smcd_event event; +}; + +/* worker for SMC-D events */ +static void smc_ism_event_work(struct work_struct *work) +{ + struct smc_ism_event_work *wrk = + container_of(work, struct smc_ism_event_work, work); + + switch (wrk->event.type) { + case ISM_EVENT_GID: /* GID event, token is peer GID */ + smc_smcd_terminate(wrk->smcd, wrk->event.tok); + break; + case ISM_EVENT_DMB: + break; + } + kfree(wrk); +} + +static void smcd_release(struct device *dev) +{ + struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev); + + kfree(smcd->conn); + kfree(smcd); +} + +struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, + const struct smcd_ops *ops, int max_dmbs) +{ + struct smcd_dev *smcd; + + smcd = kzalloc(sizeof(*smcd), GFP_KERNEL); + if (!smcd) + return NULL; + smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *), + GFP_KERNEL); + if (!smcd->conn) { + kfree(smcd); + return NULL; + } + + smcd->dev.parent = parent; + smcd->dev.release = smcd_release; + device_initialize(&smcd->dev); + dev_set_name(&smcd->dev, name); + smcd->ops = ops; + smc_pnetid_by_dev_port(parent, 0, smcd->pnetid); + + spin_lock_init(&smcd->lock); + INIT_LIST_HEAD(&smcd->vlan); + smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", + WQ_MEM_RECLAIM, name); + return smcd; +} +EXPORT_SYMBOL_GPL(smcd_alloc_dev); + +int smcd_register_dev(struct smcd_dev *smcd) +{ + spin_lock(&smcd_dev_list.lock); + list_add_tail(&smcd->list, &smcd_dev_list.list); + spin_unlock(&smcd_dev_list.lock); + + return device_add(&smcd->dev); +} +EXPORT_SYMBOL_GPL(smcd_register_dev); + +void smcd_unregister_dev(struct smcd_dev *smcd) +{ + spin_lock(&smcd_dev_list.lock); + list_del(&smcd->list); + spin_unlock(&smcd_dev_list.lock); + flush_workqueue(smcd->event_wq); + destroy_workqueue(smcd->event_wq); + smc_smcd_terminate(smcd, 0); + + device_del(&smcd->dev); +} +EXPORT_SYMBOL_GPL(smcd_unregister_dev); + +void smcd_free_dev(struct smcd_dev *smcd) +{ + put_device(&smcd->dev); +} +EXPORT_SYMBOL_GPL(smcd_free_dev); + +/* SMCD Device event handler. Called from ISM device interrupt handler. + * Parameters are smcd device pointer, + * - event->type (0 --> DMB, 1 --> GID), + * - event->code (event code), + * - event->tok (either DMB token when event type 0, or GID when event type 1) + * - event->time (time of day) + * - event->info (debug info). + * + * Context: + * - Function called in IRQ context from ISM device driver event handler. + */ +void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event) +{ + struct smc_ism_event_work *wrk; + + /* copy event to event work queue, and let it be handled there */ + wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); + if (!wrk) + return; + INIT_WORK(&wrk->work, smc_ism_event_work); + wrk->smcd = smcd; + wrk->event = *event; + queue_work(smcd->event_wq, &wrk->work); +} +EXPORT_SYMBOL_GPL(smcd_handle_event); + +/* SMCD Device interrupt handler. Called from ISM device interrupt handler. + * Parameters are smcd device pointer and DMB number. Find the connection and + * schedule the tasklet for this connection. + * + * Context: + * - Function called in IRQ context from ISM device driver IRQ handler. + */ +void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno) +{ + struct smc_connection *conn = NULL; + unsigned long flags; + + spin_lock_irqsave(&smcd->lock, flags); + conn = smcd->conn[dmbno]; + if (conn) + tasklet_schedule(&conn->rx_tsklet); + spin_unlock_irqrestore(&smcd->lock, flags); +} +EXPORT_SYMBOL_GPL(smcd_handle_irq); diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h new file mode 100644 index 000000000000..aee45b860b79 --- /dev/null +++ b/net/smc/smc_ism.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Shared Memory Communications Direct over ISM devices (SMC-D) + * + * SMC-D ISM device structure definitions. + * + * Copyright IBM Corp. 2018 + */ + +#ifndef SMCD_ISM_H +#define SMCD_ISM_H + +#include <linux/uio.h> + +#include "smc.h" + +struct smcd_dev_list { /* List of SMCD devices */ + struct list_head list; + spinlock_t lock; /* Protects list of devices */ +}; + +extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */ + +struct smc_ism_vlanid { /* VLAN id set on ISM device */ + struct list_head list; + unsigned short vlanid; /* Vlan id */ + refcount_t refcnt; /* Reference count */ +}; + +struct smc_ism_position { /* ISM device position to write to */ + u64 token; /* Token of DMB */ + u32 offset; /* Offset into DMBE */ + u8 index; /* Index of DMBE */ + u8 signal; /* Generate interrupt on owner side */ +}; + +struct smcd_dev; + +int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *dev); +void smc_ism_set_conn(struct smc_connection *conn); +void smc_ism_unset_conn(struct smc_connection *conn); +int smc_ism_get_vlan(struct smcd_dev *dev, unsigned short vlan_id); +int smc_ism_put_vlan(struct smcd_dev *dev, unsigned short vlan_id); +int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, + struct smc_buf_desc *dmb_desc); +int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); +int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, + void *data, size_t len); +#endif diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index d7b88b2d1b22..1b6c066d3495 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -22,13 +22,12 @@ #include "smc_pnet.h" #include "smc_ib.h" - -#define SMC_MAX_PNET_ID_LEN 16 /* Max. length of PNET id */ +#include "smc_ism.h" static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { [SMC_PNETID_NAME] = { .type = NLA_NUL_STRING, - .len = SMC_MAX_PNET_ID_LEN - 1 + .len = SMC_MAX_PNETID_LEN - 1 }, [SMC_PNETID_ETHNAME] = { .type = NLA_NUL_STRING, @@ -65,7 +64,7 @@ static struct smc_pnettable { */ struct smc_pnetentry { struct list_head list; - char pnet_name[SMC_MAX_PNET_ID_LEN + 1]; + char pnet_name[SMC_MAX_PNETID_LEN + 1]; struct net_device *ndev; struct smc_ib_device *smcibdev; u8 ib_port; @@ -209,7 +208,7 @@ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid) return false; while (--end >= bf && isspace(*end)) ; - if (end - bf >= SMC_MAX_PNET_ID_LEN) + if (end - bf >= SMC_MAX_PNETID_LEN) return false; while (bf <= end) { if (!isalnum(*bf)) @@ -358,9 +357,6 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) kfree(pnetelem); return rc; } - rc = smc_ib_remember_port_attr(pnetelem->smcibdev, pnetelem->ib_port); - if (rc) - smc_pnet_remove_by_pnetid(pnetelem->pnet_name); return rc; } @@ -485,10 +481,10 @@ static int smc_pnet_netdev_event(struct notifier_block *this, case NETDEV_REBOOT: case NETDEV_UNREGISTER: smc_pnet_remove_by_ndev(event_dev); + return NOTIFY_OK; default: - break; + return NOTIFY_DONE; } - return NOTIFY_DONE; } static struct notifier_block smc_netdev_notifier = { @@ -515,26 +511,91 @@ void smc_pnet_exit(void) genl_unregister_family(&smc_pnet_nl_family); } -/* PNET table analysis for a given sock: - * determine ib_device and port belonging to used internal TCP socket - * ethernet interface. +/* Determine one base device for stacked net devices. + * If the lower device level contains more than one devices + * (for instance with bonding slaves), just the first device + * is used to reach a base device. */ -void smc_pnet_find_roce_resource(struct sock *sk, - struct smc_ib_device **smcibdev, u8 *ibport) +static struct net_device *pnet_find_base_ndev(struct net_device *ndev) { - struct dst_entry *dst = sk_dst_get(sk); - struct smc_pnetentry *pnetelem; + int i, nest_lvl; - *smcibdev = NULL; - *ibport = 0; + rtnl_lock(); + nest_lvl = dev_get_nest_level(ndev); + for (i = 0; i < nest_lvl; i++) { + struct list_head *lower = &ndev->adj_list.lower; + + if (list_empty(lower)) + break; + lower = lower->next; + ndev = netdev_lower_get_next(ndev, &lower); + } + rtnl_unlock(); + return ndev; +} + +/* Determine the corresponding IB device port based on the hardware PNETID. + * Searching stops at the first matching active IB device port. + */ +static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, + struct smc_ib_device **smcibdev, + u8 *ibport) +{ + u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; + struct smc_ib_device *ibdev; + int i; + + ndev = pnet_find_base_ndev(ndev); + if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, + ndev_pnetid)) + return; /* pnetid could not be determined */ + + spin_lock(&smc_ib_devices.lock); + list_for_each_entry(ibdev, &smc_ib_devices.list, list) { + for (i = 1; i <= SMC_MAX_PORTS; i++) { + if (!memcmp(ibdev->pnetid[i - 1], ndev_pnetid, + SMC_MAX_PNETID_LEN) && + smc_ib_port_active(ibdev, i)) { + *smcibdev = ibdev; + *ibport = i; + break; + } + } + } + spin_unlock(&smc_ib_devices.lock); +} + +static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, + struct smcd_dev **smcismdev) +{ + u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; + struct smcd_dev *ismdev; + + ndev = pnet_find_base_ndev(ndev); + if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, + ndev_pnetid)) + return; /* pnetid could not be determined */ + + spin_lock(&smcd_dev_list.lock); + list_for_each_entry(ismdev, &smcd_dev_list.list, list) { + if (!memcmp(ismdev->pnetid, ndev_pnetid, SMC_MAX_PNETID_LEN)) { + *smcismdev = ismdev; + break; + } + } + spin_unlock(&smcd_dev_list.lock); +} + +/* Lookup of coupled ib_device via SMC pnet table */ +static void smc_pnet_find_roce_by_table(struct net_device *netdev, + struct smc_ib_device **smcibdev, + u8 *ibport) +{ + struct smc_pnetentry *pnetelem; - if (!dst) - return; - if (!dst->dev) - goto out_rel; read_lock(&smc_pnettable.lock); list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { - if (dst->dev == pnetelem->ndev) { + if (netdev == pnetelem->ndev) { if (smc_ib_port_active(pnetelem->smcibdev, pnetelem->ib_port)) { *smcibdev = pnetelem->smcibdev; @@ -544,6 +605,54 @@ void smc_pnet_find_roce_resource(struct sock *sk, } } read_unlock(&smc_pnettable.lock); +} + +/* PNET table analysis for a given sock: + * determine ib_device and port belonging to used internal TCP socket + * ethernet interface. + */ +void smc_pnet_find_roce_resource(struct sock *sk, + struct smc_ib_device **smcibdev, u8 *ibport) +{ + struct dst_entry *dst = sk_dst_get(sk); + + *smcibdev = NULL; + *ibport = 0; + + if (!dst) + goto out; + if (!dst->dev) + goto out_rel; + + /* if possible, lookup via hardware-defined pnetid */ + smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport); + if (*smcibdev) + goto out_rel; + + /* lookup via SMC PNET table */ + smc_pnet_find_roce_by_table(dst->dev, smcibdev, ibport); + +out_rel: + dst_release(dst); +out: + return; +} + +void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev) +{ + struct dst_entry *dst = sk_dst_get(sk); + + *smcismdev = NULL; + if (!dst) + goto out; + if (!dst->dev) + goto out_rel; + + /* if possible, lookup via hardware-defined pnetid */ + smc_pnet_find_ism_by_pnetid(dst->dev, smcismdev); + out_rel: dst_release(dst); +out: + return; } diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index 5a29519db976..1e94fd4df7bc 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -12,12 +12,28 @@ #ifndef _SMC_PNET_H #define _SMC_PNET_H +#if IS_ENABLED(CONFIG_HAVE_PNETID) +#include <asm/pnet.h> +#endif + struct smc_ib_device; +struct smcd_dev; + +static inline int smc_pnetid_by_dev_port(struct device *dev, + unsigned short port, u8 *pnetid) +{ +#if IS_ENABLED(CONFIG_HAVE_PNETID) + return pnet_id_by_dev_port(dev, port, pnetid); +#else + return -ENOENT; +#endif +} int smc_pnet_init(void) __init; void smc_pnet_exit(void); int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev); void smc_pnet_find_roce_resource(struct sock *sk, struct smc_ib_device **smcibdev, u8 *ibport); +void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev); #endif diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 3d77b383cccd..b329803c8339 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -305,7 +305,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */ - rcvbuf_base = conn->rmb_desc->cpu_addr; + rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr; do { /* while (read_remaining) */ if (read_done >= target || (pipe && read_done)) diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index f82886b7d1d8..142bcb134dd6 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -24,6 +24,7 @@ #include "smc.h" #include "smc_wr.h" #include "smc_cdc.h" +#include "smc_ism.h" #include "smc_tx.h" #define SMC_TX_WORK_DELAY HZ @@ -250,6 +251,24 @@ out_err: /***************************** sndbuf consumer *******************************/ +/* sndbuf consumer: actual data transfer of one target chunk with ISM write */ +int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, + u32 offset, int signal) +{ + struct smc_ism_position pos; + int rc; + + memset(&pos, 0, sizeof(pos)); + pos.token = conn->peer_token; + pos.index = conn->peer_rmbe_idx; + pos.offset = conn->tx_off + offset; + pos.signal = signal; + rc = smc_ism_write(conn->lgr->smcd, &pos, data, len); + if (rc) + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; + return rc; +} + /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, int num_sges, struct ib_sge sges[]) @@ -297,21 +316,104 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn, smc_curs_add(conn->sndbuf_desc->len, sent, len); } +/* SMC-R helper for smc_tx_rdma_writes() */ +static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, + size_t src_off, size_t src_len, + size_t dst_off, size_t dst_len) +{ + dma_addr_t dma_addr = + sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); + struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + int src_len_sum = src_len, dst_len_sum = dst_len; + struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; + int sent_count = src_off; + int srcchunk, dstchunk; + int num_sges; + int rc; + + for (dstchunk = 0; dstchunk < 2; dstchunk++) { + num_sges = 0; + for (srcchunk = 0; srcchunk < 2; srcchunk++) { + sges[srcchunk].addr = dma_addr + src_off; + sges[srcchunk].length = src_len; + sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; + num_sges++; + + src_off += src_len; + if (src_off >= conn->sndbuf_desc->len) + src_off -= conn->sndbuf_desc->len; + /* modulo in send ring */ + if (src_len_sum == dst_len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + src_len = dst_len - src_len; /* remainder */ + src_len_sum += src_len; + } + rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); + if (rc) + return rc; + if (dst_len_sum == len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + dst_off = 0; /* modulo offset in RMBE ring buffer */ + dst_len = len - dst_len; /* remainder */ + dst_len_sum += dst_len; + src_len = min_t(int, dst_len, conn->sndbuf_desc->len - + sent_count); + src_len_sum = src_len; + } + return 0; +} + +/* SMC-D helper for smc_tx_rdma_writes() */ +static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, + size_t src_off, size_t src_len, + size_t dst_off, size_t dst_len) +{ + int src_len_sum = src_len, dst_len_sum = dst_len; + int srcchunk, dstchunk; + int rc; + + for (dstchunk = 0; dstchunk < 2; dstchunk++) { + for (srcchunk = 0; srcchunk < 2; srcchunk++) { + void *data = conn->sndbuf_desc->cpu_addr + src_off; + + rc = smcd_tx_ism_write(conn, data, src_len, dst_off + + sizeof(struct smcd_cdc_msg), 0); + if (rc) + return rc; + dst_off += src_len; + src_off += src_len; + if (src_off >= conn->sndbuf_desc->len) + src_off -= conn->sndbuf_desc->len; + /* modulo in send ring */ + if (src_len_sum == dst_len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + src_len = dst_len - src_len; /* remainder */ + src_len_sum += src_len; + } + if (dst_len_sum == len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + dst_off = 0; /* modulo offset in RMBE ring buffer */ + dst_len = len - dst_len; /* remainder */ + dst_len_sum += dst_len; + src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off); + src_len_sum = src_len; + } + return 0; +} + /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; * usable snd_wnd as max transmit */ static int smc_tx_rdma_writes(struct smc_connection *conn) { - size_t src_off, src_len, dst_off, dst_len; /* current chunk values */ - size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk; + size_t len, src_len, dst_off, dst_len; /* current chunk values */ union smc_host_cursor sent, prep, prod, cons; - struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; - struct smc_link_group *lgr = conn->lgr; struct smc_cdc_producer_flags *pflags; int to_send, rmbespace; - struct smc_link *link; - dma_addr_t dma_addr; - int num_sges; int rc; /* source: sndbuf */ @@ -341,7 +443,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) len = min(to_send, rmbespace); /* initialize variables for first iteration of subsequent nested loop */ - link = &lgr->lnk[SMC_SINGLE_LINK]; dst_off = prod.count; if (prod.wrap == cons.wrap) { /* the filled destination area is unwrapped, @@ -358,8 +459,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) */ dst_len = len; } - dst_len_sum = dst_len; - src_off = sent.count; /* dst_len determines the maximum src_len */ if (sent.count + dst_len <= conn->sndbuf_desc->len) { /* unwrapped src case: single chunk of entire dst_len */ @@ -368,38 +467,15 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ src_len = conn->sndbuf_desc->len - sent.count; } - src_len_sum = src_len; - dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); - for (dstchunk = 0; dstchunk < 2; dstchunk++) { - num_sges = 0; - for (srcchunk = 0; srcchunk < 2; srcchunk++) { - sges[srcchunk].addr = dma_addr + src_off; - sges[srcchunk].length = src_len; - sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; - num_sges++; - src_off += src_len; - if (src_off >= conn->sndbuf_desc->len) - src_off -= conn->sndbuf_desc->len; - /* modulo in send ring */ - if (src_len_sum == dst_len) - break; /* either on 1st or 2nd iteration */ - /* prepare next (== 2nd) iteration */ - src_len = dst_len - src_len; /* remainder */ - src_len_sum += src_len; - } - rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); - if (rc) - return rc; - if (dst_len_sum == len) - break; /* either on 1st or 2nd iteration */ - /* prepare next (== 2nd) iteration */ - dst_off = 0; /* modulo offset in RMBE ring buffer */ - dst_len = len - dst_len; /* remainder */ - dst_len_sum += dst_len; - src_len = min_t(int, - dst_len, conn->sndbuf_desc->len - sent.count); - src_len_sum = src_len; - } + + if (conn->lgr->is_smcd) + rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len, + dst_off, dst_len); + else + rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, + dst_off, dst_len); + if (rc) + return rc; if (conn->urg_tx_pend && len == to_send) pflags->urg_data_present = 1; @@ -420,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) /* Wakeup sndbuf consumers from any context (IRQ or process) * since there is more data to transmit; usable snd_wnd as max transmit */ -int smc_tx_sndbuf_nonempty(struct smc_connection *conn) +static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_cdc_producer_flags *pflags; struct smc_cdc_tx_pend *pend; @@ -467,6 +543,37 @@ out_unlock: return rc; } +static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) +{ + struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; + int rc = 0; + + spin_lock_bh(&conn->send_lock); + if (!pflags->urg_data_present) + rc = smc_tx_rdma_writes(conn); + if (!rc) + rc = smcd_cdc_msg_send(conn); + + if (!rc && pflags->urg_data_present) { + pflags->urg_data_pending = 0; + pflags->urg_data_present = 0; + } + spin_unlock_bh(&conn->send_lock); + return rc; +} + +int smc_tx_sndbuf_nonempty(struct smc_connection *conn) +{ + int rc; + + if (conn->lgr->is_smcd) + rc = smcd_tx_sndbuf_nonempty(conn); + else + rc = smcr_tx_sndbuf_nonempty(conn); + + return rc; +} + /* Wakeup sndbuf consumers from process context * since there is more data to transmit */ diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h index 9d2238909fa0..b22bdc5694c4 100644 --- a/net/smc/smc_tx.h +++ b/net/smc/smc_tx.h @@ -33,5 +33,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len); int smc_tx_sndbuf_nonempty(struct smc_connection *conn); void smc_tx_sndbuf_nonfull(struct smc_sock *smc); void smc_tx_consumer_update(struct smc_connection *conn, bool force); +int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, + u32 offset, int signal); #endif /* SMC_TX_H */ diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 625acb27efcc..3a512936eea9 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -140,11 +140,13 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, /* We are going to append to the frags_list of head. * Need to unshare the frag_list. */ - err = skb_unclone(head, GFP_ATOMIC); - if (err) { - STRP_STATS_INCR(strp->stats.mem_fail); - desc->error = err; - return 0; + if (skb_has_frag_list(head)) { + err = skb_unclone(head, GFP_ATOMIC); + if (err) { + STRP_STATS_INCR(strp->stats.mem_fail); + desc->error = err; + return 0; + } } if (unlikely(skb_shinfo(head)->frag_list)) { @@ -201,14 +203,16 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, memset(stm, 0, sizeof(*stm)); stm->strp.offset = orig_offset + eaten; } else { - /* Unclone since we may be appending to an skb that we + /* Unclone if we are appending to an skb that we * already share a frag_list with. */ - err = skb_unclone(skb, GFP_ATOMIC); - if (err) { - STRP_STATS_INCR(strp->stats.mem_fail); - desc->error = err; - break; + if (skb_has_frag_list(skb)) { + err = skb_unclone(skb, GFP_ATOMIC); + if (err) { + STRP_STATS_INCR(strp->stats.mem_fail); + desc->error = err; + break; + } } stm = _strp_msg(head); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 2dfb492a7c94..fd6d8f18955c 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -610,6 +610,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, case NETDEV_CHANGE: if (netif_carrier_ok(dev)) break; + /* else: fall through */ case NETDEV_UP: test_and_set_bit_lock(0, &b->up); break; diff --git a/net/tipc/group.c b/net/tipc/group.c index d7a7befeddd4..8f43e7d6046b 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -159,11 +159,6 @@ u32 tipc_group_exclude(struct tipc_group *grp) return 0; } -int tipc_group_size(struct tipc_group *grp) -{ - return grp->member_cnt; -} - struct tipc_group *tipc_group_create(struct net *net, u32 portid, struct tipc_group_req *mreq, bool *group_is_open) @@ -918,3 +913,35 @@ void tipc_group_member_evt(struct tipc_group *grp, } *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); } + +int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb) +{ + struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP); + + if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID, + grp->type) || + nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE, + grp->instance) || + nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, + grp->bc_snd_nxt)) + goto group_msg_cancel; + + if (grp->scope == TIPC_NODE_SCOPE) + if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE)) + goto group_msg_cancel; + + if (grp->scope == TIPC_CLUSTER_SCOPE) + if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE)) + goto group_msg_cancel; + + if (*grp->open) + if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN)) + goto group_msg_cancel; + + nla_nest_end(skb, group); + return 0; + +group_msg_cancel: + nla_nest_cancel(skb, group); + return -1; +} diff --git a/net/tipc/group.h b/net/tipc/group.h index 5996af6e9f1d..76b4e5a7b39d 100644 --- a/net/tipc/group.h +++ b/net/tipc/group.h @@ -72,4 +72,5 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, u32 port, struct sk_buff_head *xmitq); u16 tipc_group_bc_snd_nxt(struct tipc_group *grp); void tipc_group_update_member(struct tipc_member *m, int len); +int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb); #endif diff --git a/net/tipc/link.c b/net/tipc/link.c index 695acb783969..6987ffc8e7a1 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -106,7 +106,8 @@ struct tipc_stats { * @backlogq: queue for messages waiting to be sent * @snt_nxt: next sequence number to use for outbound messages * @last_retransmitted: sequence number of most recently retransmitted message - * @stale_count: # of identical retransmit requests made by peer + * @stale_cnt: counter for number of identical retransmit attempts + * @stale_limit: time when repeated identical retransmits must force link reset * @ackers: # of peers that needs to ack each packet before it can be released * @acked: # last packet acked by a certain peer. Used for broadcast. * @rcv_nxt: next sequence number to expect for inbound messages @@ -127,14 +128,17 @@ struct tipc_link { struct net *net; /* Management and link supervision data */ - u32 peer_session; - u32 session; + u16 peer_session; + u16 session; + u16 snd_nxt_state; + u16 rcv_nxt_state; u32 peer_bearer_id; u32 bearer_id; u32 tolerance; u32 abort_limit; u32 state; u16 peer_caps; + bool in_session; bool active; u32 silent_intv_cnt; char if_name[TIPC_MAX_IF_NAME]; @@ -161,7 +165,8 @@ struct tipc_link { u16 snd_nxt; u16 last_retransm; u16 window; - u32 stale_count; + u16 stale_cnt; + unsigned long stale_limit; /* Reception */ u16 rcv_nxt; @@ -212,11 +217,6 @@ enum { */ #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) -/* Wildcard value for link session numbers. When it is known that - * peer endpoint is down, any session number must be accepted. - */ -#define ANY_SESSION 0x10000 - /* Link FSM states: */ enum { @@ -297,11 +297,6 @@ static bool link_is_bc_rcvlink(struct tipc_link *l) return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); } -int tipc_link_is_active(struct tipc_link *l) -{ - return l->active; -} - void tipc_link_set_active(struct tipc_link *l, bool active) { l->active = active; @@ -337,6 +332,11 @@ char tipc_link_plane(struct tipc_link *l) return l->net_plane; } +void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) +{ + l->peer_caps = capabilities; +} + void tipc_link_add_bc_peer(struct tipc_link *snd_l, struct tipc_link *uc_l, struct sk_buff_head *xmitq) @@ -469,7 +469,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id, l->addr = peer; l->peer_caps = peer_caps; l->net = net; - l->peer_session = ANY_SESSION; + l->in_session = false; l->bearer_id = bearer_id; l->tolerance = tolerance; l->net_plane = net_plane; @@ -838,7 +838,7 @@ void link_prepare_wakeup(struct tipc_link *l) void tipc_link_reset(struct tipc_link *l) { - l->peer_session = ANY_SESSION; + l->in_session = false; l->session++; l->mtu = l->advertised_mtu; __skb_queue_purge(&l->transmq); @@ -857,10 +857,12 @@ void tipc_link_reset(struct tipc_link *l) l->rcv_unacked = 0; l->snd_nxt = 1; l->rcv_nxt = 1; + l->snd_nxt_state = 1; + l->rcv_nxt_state = 1; l->acked = 0; l->silent_intv_cnt = 0; l->rst_cnt = 0; - l->stale_count = 0; + l->stale_cnt = 0; l->bc_peer_is_up = false; memset(&l->mon_state, 0, sizeof(l->mon_state)); tipc_link_reset_stats(l); @@ -997,39 +999,41 @@ static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); } -int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker, +/* tipc_link_retrans() - retransmit one or more packets + * @l: the link to transmit on + * @r: the receiving link ordering the retransmit. Same as l if unicast + * @from: retransmit from (inclusive) this sequence number + * @to: retransmit to (inclusive) this sequence number + * xmitq: queue for accumulating the retransmitted packets + */ +int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, u16 from, u16 to, struct sk_buff_head *xmitq) { struct sk_buff *_skb, *skb = skb_peek(&l->transmq); - struct tipc_msg *hdr; - u16 ack = l->rcv_nxt - 1; u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; + u16 ack = l->rcv_nxt - 1; + struct tipc_msg *hdr; if (!skb) return 0; /* Detect repeated retransmit failures on same packet */ - if (nacker->last_retransm != buf_seqno(skb)) { - nacker->last_retransm = buf_seqno(skb); - nacker->stale_count = 1; - } else if (++nacker->stale_count > 100) { + if (r->last_retransm != buf_seqno(skb)) { + r->last_retransm = buf_seqno(skb); + r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance); + } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { link_retransmit_failure(l, skb); - nacker->stale_count = 0; if (link_is_bc_sndlink(l)) return TIPC_LINK_DOWN_EVT; return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } - /* Move forward to where retransmission should start */ skb_queue_walk(&l->transmq, skb) { - if (!less(buf_seqno(skb), from)) - break; - } - - skb_queue_walk_from(&l->transmq, skb) { - if (more(buf_seqno(skb), to)) - break; hdr = buf_msg(skb); + if (less(msg_seqno(hdr), from)) + continue; + if (more(msg_seqno(hdr), to)) + break; _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); if (!_skb) return 0; @@ -1063,6 +1067,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, skb_queue_tail(mc_inputq, skb); return true; } + /* else: fall through */ case CONN_MANAGER: skb_queue_tail(inputq, skb); return true; @@ -1271,6 +1276,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, /* Forward queues and wake up waiting users */ if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { + l->stale_cnt = 0; tipc_link_advance_backlog(l, xmitq); if (unlikely(!skb_queue_empty(&l->wakeupq))) link_prepare_wakeup(l); @@ -1347,6 +1353,8 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); if (mtyp == STATE_MSG) { + if (l->peer_caps & TIPC_LINK_PROTO_SEQNO) + msg_set_seqno(hdr, l->snd_nxt_state++); msg_set_seq_gap(hdr, rcvgap); msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); msg_set_probe(hdr, probe); @@ -1438,6 +1446,44 @@ tnl: } } +/* tipc_link_validate_msg(): validate message against current link state + * Returns true if message should be accepted, otherwise false + */ +bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) +{ + u16 curr_session = l->peer_session; + u16 session = msg_session(hdr); + int mtyp = msg_type(hdr); + + if (msg_user(hdr) != LINK_PROTOCOL) + return true; + + switch (mtyp) { + case RESET_MSG: + if (!l->in_session) + return true; + /* Accept only RESET with new session number */ + return more(session, curr_session); + case ACTIVATE_MSG: + if (!l->in_session) + return true; + /* Accept only ACTIVATE with new or current session number */ + return !less(session, curr_session); + case STATE_MSG: + /* Accept only STATE with current session number */ + if (!l->in_session) + return false; + if (session != curr_session) + return false; + if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) + return true; + /* Accept only STATE with new sequence number */ + return !less(msg_seqno(hdr), l->rcv_nxt_state); + default: + return false; + } +} + /* tipc_link_proto_rcv(): receive link level protocol message : * Note that network plane id propagates through the network, and may * change at any time. The node with lowest numerical id determines @@ -1471,17 +1517,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, hdr = buf_msg(skb); data = msg_data(hdr); + if (!tipc_link_validate_msg(l, hdr)) + goto exit; + switch (mtyp) { case RESET_MSG: - - /* Ignore duplicate RESET with old session number */ - if ((less_eq(msg_session(hdr), l->peer_session)) && - (l->peer_session != ANY_SESSION)) - break; - /* fall thru' */ - case ACTIVATE_MSG: - /* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) @@ -1509,12 +1550,14 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, rc = TIPC_LINK_UP_EVT; l->peer_session = msg_session(hdr); + l->in_session = true; l->peer_bearer_id = msg_bearer_id(hdr); if (l->mtu > msg_max_pkt(hdr)) l->mtu = msg_max_pkt(hdr); break; case STATE_MSG: + l->rcv_nxt_state = msg_seqno(hdr) + 1; /* Update own tolerance if peer indicates a non-zero value */ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) diff --git a/net/tipc/link.h b/net/tipc/link.h index ec59348a81e8..7bc494a33fdf 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -110,6 +110,8 @@ char *tipc_link_name(struct tipc_link *l); char tipc_link_plane(struct tipc_link *l); int tipc_link_prio(struct tipc_link *l); int tipc_link_window(struct tipc_link *l); +void tipc_link_update_caps(struct tipc_link *l, u16 capabilities); +bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr); unsigned long tipc_link_tolerance(struct tipc_link *l); void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, struct sk_buff_head *xmitq); diff --git a/net/tipc/msg.c b/net/tipc/msg.c index b6c45dccba3d..b61891054709 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -416,26 +416,31 @@ bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu) */ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) { - struct tipc_msg *msg; - int imsz, offset; + struct tipc_msg *hdr, *ihdr; + int imsz; *iskb = NULL; if (unlikely(skb_linearize(skb))) goto none; - msg = buf_msg(skb); - offset = msg_hdr_sz(msg) + *pos; - if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) + hdr = buf_msg(skb); + if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) goto none; - *iskb = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!*iskb)) + ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); + imsz = msg_size(ihdr); + + if ((*pos + imsz) > msg_data_sz(hdr)) goto none; - skb_pull(*iskb, offset); - imsz = msg_size(buf_msg(*iskb)); - skb_trim(*iskb, imsz); + + *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); + if (!*iskb) + goto none; + + skb_copy_to_linear_data(*iskb, ihdr, imsz); if (unlikely(!tipc_msg_validate(iskb))) goto none; + *pos += align(imsz); return true; none: @@ -531,12 +536,6 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) msg_set_hdr_sz(hdr, BASIC_H_SIZE); } - if (skb_cloned(_skb) && - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) - goto exit; - - /* reassign after skb header modifications */ - hdr = buf_msg(_skb); /* Now reverse the concerned fields */ msg_set_errcode(hdr, err); msg_set_non_seq(hdr, 0); @@ -595,10 +594,6 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) if (!skb_cloned(skb)) return true; - /* Unclone buffer in case it was bundled */ - if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) - return false; - return true; } diff --git a/net/tipc/node.c b/net/tipc/node.c index 0453bd451ce8..68014f1b6976 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -45,6 +45,7 @@ #include "netlink.h" #define INVALID_NODE_SIG 0x10000 +#define NODE_CLEANUP_AFTER 300000 /* Flags used to take different actions according to flag type * TIPC_NOTIFY_NODE_DOWN: notify node is down @@ -96,6 +97,7 @@ struct tipc_bclink_entry { * @link_id: local and remote bearer ids of changing link, if any * @publ_list: list of publications * @rcu: rcu struct for tipc_node + * @delete_at: indicates the time for deleting a down node */ struct tipc_node { u32 addr; @@ -121,6 +123,7 @@ struct tipc_node { unsigned long keepalive_intv; struct timer_list timer; struct rcu_head rcu; + unsigned long delete_at; }; /* Node FSM states and events: @@ -160,6 +163,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr); static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); static void tipc_node_put(struct tipc_node *node); static bool node_is_up(struct tipc_node *n); +static void tipc_node_delete_from_list(struct tipc_node *node); struct tipc_sock_conn { u32 port; @@ -359,13 +363,24 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_node *n, *temp_node; + struct tipc_link *l; + int bearer_id; int i; spin_lock_bh(&tn->node_list_lock); n = tipc_node_find(net, addr); if (n) { + if (n->capabilities == capabilities) + goto exit; /* Same node may come back with new capabilities */ + write_lock_bh(&n->lock); n->capabilities = capabilities; + for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + l = n->links[bearer_id].link; + if (l) + tipc_link_update_caps(l, capabilities); + } + write_unlock_bh(&n->lock); goto exit; } n = kzalloc(sizeof(*n), GFP_ATOMIC); @@ -390,6 +405,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, for (i = 0; i < MAX_BEARERS; i++) spin_lock_init(&n->links[i].lock); n->state = SELF_DOWN_PEER_LEAVING; + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); n->signature = INVALID_NODE_SIG; n->active_links[0] = INVALID_BEARER_ID; n->active_links[1] = INVALID_BEARER_ID; @@ -433,11 +449,16 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) tipc_link_set_abort_limit(l, tol / n->keepalive_intv); } -static void tipc_node_delete(struct tipc_node *node) +static void tipc_node_delete_from_list(struct tipc_node *node) { list_del_rcu(&node->list); hlist_del_rcu(&node->hash); tipc_node_put(node); +} + +static void tipc_node_delete(struct tipc_node *node) +{ + tipc_node_delete_from_list(node); del_timer_sync(&node->timer); tipc_node_put(node); @@ -544,6 +565,42 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) tipc_node_put(node); } +static void tipc_node_clear_links(struct tipc_node *node) +{ + int i; + + for (i = 0; i < MAX_BEARERS; i++) { + struct tipc_link_entry *le = &node->links[i]; + + if (le->link) { + kfree(le->link); + le->link = NULL; + node->link_cnt--; + } + } +} + +/* tipc_node_cleanup - delete nodes that does not + * have active links for NODE_CLEANUP_AFTER time + */ +static int tipc_node_cleanup(struct tipc_node *peer) +{ + struct tipc_net *tn = tipc_net(peer->net); + bool deleted = false; + + spin_lock_bh(&tn->node_list_lock); + tipc_node_write_lock(peer); + + if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { + tipc_node_clear_links(peer); + tipc_node_delete_from_list(peer); + deleted = true; + } + tipc_node_write_unlock(peer); + spin_unlock_bh(&tn->node_list_lock); + return deleted; +} + /* tipc_node_timeout - handle expiration of node timer */ static void tipc_node_timeout(struct timer_list *t) @@ -551,21 +608,29 @@ static void tipc_node_timeout(struct timer_list *t) struct tipc_node *n = from_timer(n, t, timer); struct tipc_link_entry *le; struct sk_buff_head xmitq; + int remains = n->link_cnt; int bearer_id; int rc = 0; + if (!node_is_up(n) && tipc_node_cleanup(n)) { + /*Removing the reference of Timer*/ + tipc_node_put(n); + return; + } + __skb_queue_head_init(&xmitq); - for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { tipc_node_read_lock(n); le = &n->links[bearer_id]; - spin_lock_bh(&le->lock); if (le->link) { + spin_lock_bh(&le->lock); /* Link tolerance may change asynchronously: */ tipc_node_calculate_timer(n, le->link); rc = tipc_link_timeout(le->link, &xmitq); + spin_unlock_bh(&le->lock); + remains--; } - spin_unlock_bh(&le->lock); tipc_node_read_unlock(n); tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); if (rc & TIPC_LINK_DOWN_EVT) @@ -1174,6 +1239,7 @@ static void node_lost_contact(struct tipc_node *n, uint i; pr_debug("Lost contact with %x\n", n->addr); + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); /* Clean up broadcast state */ tipc_bcast_remove_peer(n->net, n->bc_entry.link); @@ -1481,7 +1547,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id * tipc_node_check_state - check and if necessary update node state * @skb: TIPC packet * @bearer_id: identity of bearer delivering the packet - * Returns true if state is ok, otherwise consumes buffer and returns false + * Returns true if state and msg are ok, otherwise false */ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, int bearer_id, struct sk_buff_head *xmitq) @@ -1515,6 +1581,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, } } + if (!tipc_link_validate_msg(l, hdr)) + return false; + /* Check and update node accesibility if applicable */ if (state == SELF_UP_PEER_COMING) { if (!tipc_link_is_up(l)) @@ -1743,7 +1812,6 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) struct tipc_node *peer; u32 addr; int err; - int i; /* We identify the peer by its net */ if (!info->attrs[TIPC_NLA_NET]) @@ -1778,15 +1846,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) goto err_out; } - for (i = 0; i < MAX_BEARERS; i++) { - struct tipc_link_entry *le = &peer->links[i]; - - if (le->link) { - kfree(le->link); - le->link = NULL; - peer->link_cnt--; - } - } + tipc_node_clear_links(peer); tipc_node_write_unlock(peer); tipc_node_delete(peer); diff --git a/net/tipc/node.h b/net/tipc/node.h index 846c8f240872..48b3298a248d 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -49,14 +49,16 @@ enum { TIPC_BCAST_STATE_NACK = (1 << 2), TIPC_BLOCK_FLOWCTL = (1 << 3), TIPC_BCAST_RCAST = (1 << 4), - TIPC_NODE_ID128 = (1 << 5) + TIPC_NODE_ID128 = (1 << 5), + TIPC_LINK_PROTO_SEQNO = (1 << 6) }; -#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \ - TIPC_BCAST_STATE_NACK | \ - TIPC_BCAST_RCAST | \ - TIPC_BLOCK_FLOWCTL | \ - TIPC_NODE_ID128) +#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \ + TIPC_BCAST_STATE_NACK | \ + TIPC_BCAST_RCAST | \ + TIPC_BLOCK_FLOWCTL | \ + TIPC_NODE_ID128 | \ + TIPC_LINK_PROTO_SEQNO) #define INVALID_BEARER_ID -1 void tipc_node_stop(struct net *net); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 930852c54d7a..3d21414ba357 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3320,6 +3320,11 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, goto stat_msg_cancel; nla_nest_end(skb, stat); + + if (tsk->group) + if (tipc_group_fill_sock_diag(tsk->group, skb)) + goto stat_msg_cancel; + nla_nest_end(skb, attrs); return 0; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a7a8f8e20ff3..1e968d238adf 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -52,9 +52,12 @@ static DEFINE_SPINLOCK(tls_device_lock); static void tls_device_free_ctx(struct tls_context *ctx) { - struct tls_offload_context *offload_ctx = tls_offload_ctx(ctx); + if (ctx->tx_conf == TLS_HW) + kfree(tls_offload_ctx_tx(ctx)); + + if (ctx->rx_conf == TLS_HW) + kfree(tls_offload_ctx_rx(ctx)); - kfree(offload_ctx); kfree(ctx); } @@ -71,10 +74,11 @@ static void tls_device_gc_task(struct work_struct *work) list_for_each_entry_safe(ctx, tmp, &gc_list, list) { struct net_device *netdev = ctx->netdev; - if (netdev) { + if (netdev && ctx->tx_conf == TLS_HW) { netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); dev_put(netdev); + ctx->netdev = NULL; } list_del(&ctx->list); @@ -82,6 +86,22 @@ static void tls_device_gc_task(struct work_struct *work) } } +static void tls_device_attach(struct tls_context *ctx, struct sock *sk, + struct net_device *netdev) +{ + if (sk->sk_destruct != tls_device_sk_destruct) { + refcount_set(&ctx->refcount, 1); + dev_hold(netdev); + ctx->netdev = netdev; + spin_lock_irq(&tls_device_lock); + list_add_tail(&ctx->list, &tls_device_list); + spin_unlock_irq(&tls_device_lock); + + ctx->sk_destruct = sk->sk_destruct; + sk->sk_destruct = tls_device_sk_destruct; + } +} + static void tls_device_queue_ctx_destruction(struct tls_context *ctx) { unsigned long flags; @@ -125,7 +145,7 @@ static void destroy_record(struct tls_record_info *record) kfree(record); } -static void delete_all_records(struct tls_offload_context *offload_ctx) +static void delete_all_records(struct tls_offload_context_tx *offload_ctx) { struct tls_record_info *info, *temp; @@ -141,14 +161,14 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_record_info *info, *temp; - struct tls_offload_context *ctx; + struct tls_offload_context_tx *ctx; u64 deleted_records = 0; unsigned long flags; if (!tls_ctx) return; - ctx = tls_offload_ctx(tls_ctx); + ctx = tls_offload_ctx_tx(tls_ctx); spin_lock_irqsave(&ctx->lock, flags); info = ctx->retransmit_hint; @@ -179,15 +199,17 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) void tls_device_sk_destruct(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); - if (ctx->open_record) - destroy_record(ctx->open_record); + tls_ctx->sk_destruct(sk); - delete_all_records(ctx); - crypto_free_aead(ctx->aead_send); - ctx->sk_destruct(sk); - clean_acked_data_disable(inet_csk(sk)); + if (tls_ctx->tx_conf == TLS_HW) { + if (ctx->open_record) + destroy_record(ctx->open_record); + delete_all_records(ctx); + crypto_free_aead(ctx->aead_send); + clean_acked_data_disable(inet_csk(sk)); + } if (refcount_dec_and_test(&tls_ctx->refcount)) tls_device_queue_ctx_destruction(tls_ctx); @@ -219,7 +241,7 @@ static void tls_append_frag(struct tls_record_info *record, static int tls_push_record(struct sock *sk, struct tls_context *ctx, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct tls_record_info *record, struct page_frag *pfrag, int flags, @@ -264,7 +286,7 @@ static int tls_push_record(struct sock *sk, return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); } -static int tls_create_new_record(struct tls_offload_context *offload_ctx, +static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, struct page_frag *pfrag, size_t prepend_size) { @@ -290,7 +312,7 @@ static int tls_create_new_record(struct tls_offload_context *offload_ctx, } static int tls_do_allocation(struct sock *sk, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct page_frag *pfrag, size_t prepend_size) { @@ -324,7 +346,7 @@ static int tls_push_data(struct sock *sk, unsigned char record_type) { struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); struct tls_record_info *record = ctx->open_record; @@ -477,7 +499,7 @@ out: return rc; } -struct tls_record_info *tls_get_record(struct tls_offload_context *context, +struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn) { u64 record_sn = context->hint_record_sn; @@ -520,11 +542,123 @@ static int tls_device_push_pending_record(struct sock *sk, int flags) return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); } +void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct net_device *netdev = tls_ctx->netdev; + struct tls_offload_context_rx *rx_ctx; + u32 is_req_pending; + s64 resync_req; + u32 req_seq; + + if (tls_ctx->rx_conf != TLS_HW) + return; + + rx_ctx = tls_offload_ctx_rx(tls_ctx); + resync_req = atomic64_read(&rx_ctx->resync_req); + req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1); + is_req_pending = resync_req; + + if (unlikely(is_req_pending) && req_seq == seq && + atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) + netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, + seq + TLS_HEADER_SIZE - 1, + rcd_sn); +} + +static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) +{ + struct strp_msg *rxm = strp_msg(skb); + int err = 0, offset = rxm->offset, copy, nsg; + struct sk_buff *skb_iter, *unused; + struct scatterlist sg[1]; + char *orig_buf, *buf; + + orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + + TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation); + if (!orig_buf) + return -ENOMEM; + buf = orig_buf; + + nsg = skb_cow_data(skb, 0, &unused); + if (unlikely(nsg < 0)) { + err = nsg; + goto free_buf; + } + + sg_init_table(sg, 1); + sg_set_buf(&sg[0], buf, + rxm->full_len + TLS_HEADER_SIZE + + TLS_CIPHER_AES_GCM_128_IV_SIZE); + skb_copy_bits(skb, offset, buf, + TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE); + + /* We are interested only in the decrypted data not the auth */ + err = decrypt_skb(sk, skb, sg); + if (err != -EBADMSG) + goto free_buf; + else + err = 0; + + copy = min_t(int, skb_pagelen(skb) - offset, + rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE); + + if (skb->decrypted) + skb_store_bits(skb, offset, buf, copy); + + offset += copy; + buf += copy; + + skb_walk_frags(skb, skb_iter) { + copy = min_t(int, skb_iter->len, + rxm->full_len - offset + rxm->offset - + TLS_CIPHER_AES_GCM_128_TAG_SIZE); + + if (skb_iter->decrypted) + skb_store_bits(skb_iter, offset, buf, copy); + + offset += copy; + buf += copy; + } + +free_buf: + kfree(orig_buf); + return err; +} + +int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx); + int is_decrypted = skb->decrypted; + int is_encrypted = !is_decrypted; + struct sk_buff *skb_iter; + + /* Skip if it is already decrypted */ + if (ctx->sw.decrypted) + return 0; + + /* Check if all the data is decrypted already */ + skb_walk_frags(skb, skb_iter) { + is_decrypted &= skb_iter->decrypted; + is_encrypted &= !skb_iter->decrypted; + } + + ctx->sw.decrypted |= is_decrypted; + + /* Return immedeatly if the record is either entirely plaintext or + * entirely ciphertext. Otherwise handle reencrypt partially decrypted + * record. + */ + return (is_encrypted || is_decrypted) ? 0 : + tls_device_reencrypt(sk, skb); +} + int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) { u16 nonce_size, tag_size, iv_size, rec_seq_size; struct tls_record_info *start_marker_record; - struct tls_offload_context *offload_ctx; + struct tls_offload_context_tx *offload_ctx; struct tls_crypto_info *crypto_info; struct net_device *netdev; char *iv, *rec_seq; @@ -546,7 +680,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) goto out; } - offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE, GFP_KERNEL); + offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); if (!offload_ctx) { rc = -ENOMEM; goto free_marker_record; @@ -609,7 +743,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked); ctx->push_pending_record = tls_device_push_pending_record; - offload_ctx->sk_destruct = sk->sk_destruct; /* TLS offload is greatly simplified if we don't send * SKBs where only part of the payload needs to be encrypted. @@ -619,8 +752,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) if (skb) TCP_SKB_CB(skb)->eor = 1; - refcount_set(&ctx->refcount, 1); - /* We support starting offload on multiple sockets * concurrently, so we only need a read lock here. * This lock must precede get_netdev_for_sock to prevent races between @@ -655,19 +786,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) if (rc) goto release_netdev; - ctx->netdev = netdev; - - spin_lock_irq(&tls_device_lock); - list_add_tail(&ctx->list, &tls_device_list); - spin_unlock_irq(&tls_device_lock); + tls_device_attach(ctx, sk, netdev); - sk->sk_validate_xmit_skb = tls_validate_xmit_skb; /* following this assignment tls_is_sk_tx_device_offloaded * will return true and the context might be accessed * by the netdev's xmit function. */ - smp_store_release(&sk->sk_destruct, - &tls_device_sk_destruct); + smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); + dev_put(netdev); up_read(&device_offload_lock); goto out; @@ -690,6 +816,105 @@ out: return rc; } +int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) +{ + struct tls_offload_context_rx *context; + struct net_device *netdev; + int rc = 0; + + /* We support starting offload on multiple sockets + * concurrently, so we only need a read lock here. + * This lock must precede get_netdev_for_sock to prevent races between + * NETDEV_DOWN and setsockopt. + */ + down_read(&device_offload_lock); + netdev = get_netdev_for_sock(sk); + if (!netdev) { + pr_err_ratelimited("%s: netdev not found\n", __func__); + rc = -EINVAL; + goto release_lock; + } + + if (!(netdev->features & NETIF_F_HW_TLS_RX)) { + pr_err_ratelimited("%s: netdev %s with no TLS offload\n", + __func__, netdev->name); + rc = -ENOTSUPP; + goto release_netdev; + } + + /* Avoid offloading if the device is down + * We don't want to offload new flows after + * the NETDEV_DOWN event + */ + if (!(netdev->flags & IFF_UP)) { + rc = -EINVAL; + goto release_netdev; + } + + context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); + if (!context) { + rc = -ENOMEM; + goto release_netdev; + } + + ctx->priv_ctx_rx = context; + rc = tls_set_sw_offload(sk, ctx, 0); + if (rc) + goto release_ctx; + + rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, + &ctx->crypto_recv, + tcp_sk(sk)->copied_seq); + if (rc) { + pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", + __func__); + goto free_sw_resources; + } + + tls_device_attach(ctx, sk, netdev); + goto release_netdev; + +free_sw_resources: + tls_sw_free_resources_rx(sk); +release_ctx: + ctx->priv_ctx_rx = NULL; +release_netdev: + dev_put(netdev); +release_lock: + up_read(&device_offload_lock); + return rc; +} + +void tls_device_offload_cleanup_rx(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct net_device *netdev; + + down_read(&device_offload_lock); + netdev = tls_ctx->netdev; + if (!netdev) + goto out; + + if (!(netdev->features & NETIF_F_HW_TLS_RX)) { + pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n", + __func__); + goto out; + } + + netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, + TLS_OFFLOAD_CTX_DIR_RX); + + if (tls_ctx->tx_conf != TLS_HW) { + dev_put(netdev); + tls_ctx->netdev = NULL; + } +out: + up_read(&device_offload_lock); + kfree(tls_ctx->rx.rec_seq); + kfree(tls_ctx->rx.iv); + tls_sw_release_resources_rx(sk); +} + static int tls_device_down(struct net_device *netdev) { struct tls_context *ctx, *tmp; @@ -710,8 +935,12 @@ static int tls_device_down(struct net_device *netdev) spin_unlock_irqrestore(&tls_device_lock, flags); list_for_each_entry_safe(ctx, tmp, &list, list) { - netdev->tlsdev_ops->tls_dev_del(netdev, ctx, - TLS_OFFLOAD_CTX_DIR_TX); + if (ctx->tx_conf == TLS_HW) + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, + TLS_OFFLOAD_CTX_DIR_TX); + if (ctx->rx_conf == TLS_HW) + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, + TLS_OFFLOAD_CTX_DIR_RX); ctx->netdev = NULL; dev_put(netdev); list_del_init(&ctx->list); @@ -732,12 +961,16 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (!(dev->features & NETIF_F_HW_TLS_TX)) + if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) return NOTIFY_DONE; switch (event) { case NETDEV_REGISTER: case NETDEV_FEAT_CHANGE: + if ((dev->features & NETIF_F_HW_TLS_RX) && + !dev->tlsdev_ops->tls_dev_resync_rx) + return NOTIFY_BAD; + if (dev->tlsdev_ops && dev->tlsdev_ops->tls_dev_add && dev->tlsdev_ops->tls_dev_del) diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 748914abdb60..e3313c45663f 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -214,7 +214,7 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) static int fill_sg_in(struct scatterlist *sg_in, struct sk_buff *skb, - struct tls_offload_context *ctx, + struct tls_offload_context_tx *ctx, u64 *rcd_sn, s32 *sync_size, int *resync_sgs) @@ -299,7 +299,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, s32 sync_size, u64 rcd_sn) { int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int payload_len = skb->len - tcp_payload_offset; void *buf, *iv, *aad, *dummy_buf; struct aead_request *aead_req; @@ -361,7 +361,7 @@ static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb) { int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int payload_len = skb->len - tcp_payload_offset; struct scatterlist *sg_in, sg_out[3]; struct sk_buff *nskb = NULL; @@ -413,9 +413,10 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, return tls_sw_fallback(sk, skb); } +EXPORT_SYMBOL_GPL(tls_validate_xmit_skb); int tls_sw_fallback_init(struct sock *sk, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info) { const u8 *key; diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 301f22430469..b09867c8b817 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -51,15 +51,6 @@ enum { TLSV6, TLS_NUM_PROTS, }; -enum { - TLS_BASE, - TLS_SW, -#ifdef CONFIG_TLS_DEVICE - TLS_HW, -#endif - TLS_HW_RECORD, - TLS_NUM_CONFIG, -}; static struct proto *saved_tcpv6_prot; static DEFINE_MUTEX(tcpv6_prot_mutex); @@ -290,7 +281,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) } #ifdef CONFIG_TLS_DEVICE - if (ctx->tx_conf != TLS_HW) { + if (ctx->rx_conf == TLS_HW) + tls_device_offload_cleanup_rx(sk); + + if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { #else { #endif @@ -470,8 +464,16 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, conf = TLS_SW; } } else { - rc = tls_set_sw_offload(sk, ctx, 0); - conf = TLS_SW; +#ifdef CONFIG_TLS_DEVICE + rc = tls_set_device_offload_rx(sk, ctx); + conf = TLS_HW; + if (rc) { +#else + { +#endif + rc = tls_set_sw_offload(sk, ctx, 0); + conf = TLS_SW; + } } if (rc) @@ -629,6 +631,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; + + prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; + + prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; + + prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; #endif prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4618f1c31137..0c2d029c9d4c 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -53,18 +53,14 @@ static int tls_do_decryption(struct sock *sk, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct strp_msg *rxm = strp_msg(skb); struct aead_request *aead_req; int ret; - unsigned int req_size = sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->aead_recv); - aead_req = kzalloc(req_size, flags); + aead_req = aead_request_alloc(ctx->aead_recv, flags); if (!aead_req) return -ENOMEM; - aead_request_set_tfm(aead_req, ctx->aead_recv); aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, sgin, sgout, data_len + tls_ctx->rx.tag_size, @@ -74,19 +70,7 @@ static int tls_do_decryption(struct sock *sk, ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); - if (ret < 0) - goto out; - - rxm->offset += tls_ctx->rx.prepend_size; - rxm->full_len -= tls_ctx->rx.overhead_size; - tls_advance_record_sn(sk, &tls_ctx->rx); - - ctx->decrypted = true; - - ctx->saved_data_ready(sk); - -out: - kfree(aead_req); + aead_request_free(aead_req); return ret; } @@ -224,8 +208,7 @@ static int tls_push_record(struct sock *sk, int flags, struct aead_request *req; int rc; - req = kzalloc(sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation); + req = aead_request_alloc(ctx->aead_send, sk->sk_allocation); if (!req) return -ENOMEM; @@ -267,7 +250,7 @@ static int tls_push_record(struct sock *sk, int flags, tls_advance_record_sn(sk, &tls_ctx->tx); out_req: - kfree(req); + aead_request_free(req); return rc; } @@ -280,7 +263,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, int length, int *pages_used, unsigned int *size_used, struct scatterlist *to, int to_max_pages, - bool charge) + bool charge, bool revert) { struct page *pages[MAX_SKB_FRAGS]; @@ -331,6 +314,8 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, out: *size_used = size; *pages_used = num_elem; + if (revert) + iov_iter_revert(from, size); return rc; } @@ -432,7 +417,7 @@ alloc_encrypted: &ctx->sg_plaintext_size, ctx->sg_plaintext_data, ARRAY_SIZE(ctx->sg_plaintext_data), - true); + true, false); if (ret) goto fallback_to_reg_send; @@ -670,8 +655,38 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, return skb; } -static int decrypt_skb(struct sock *sk, struct sk_buff *skb, - struct scatterlist *sgout) +static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout, bool *zc) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + struct strp_msg *rxm = strp_msg(skb); + int err = 0; + +#ifdef CONFIG_TLS_DEVICE + err = tls_device_decrypted(sk, skb); + if (err < 0) + return err; +#endif + if (!ctx->decrypted) { + err = decrypt_skb(sk, skb, sgout); + if (err < 0) + return err; + } else { + *zc = false; + } + + rxm->offset += tls_ctx->rx.prepend_size; + rxm->full_len -= tls_ctx->rx.overhead_size; + tls_advance_record_sn(sk, &tls_ctx->rx); + ctx->decrypted = true; + ctx->saved_data_ready(sk); + + return err; +} + +int decrypt_skb(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@ -817,11 +832,11 @@ int tls_sw_recvmsg(struct sock *sk, err = zerocopy_from_iter(sk, &msg->msg_iter, to_copy, &pages, &chunk, &sgin[1], - MAX_SKB_FRAGS, false); + MAX_SKB_FRAGS, false, true); if (err < 0) goto fallback_to_reg_recv; - err = decrypt_skb(sk, skb, sgin); + err = decrypt_skb_update(sk, skb, sgin, &zc); for (; pages > 0; pages--) put_page(sg_page(&sgin[pages])); if (err < 0) { @@ -830,7 +845,7 @@ int tls_sw_recvmsg(struct sock *sk, } } else { fallback_to_reg_recv: - err = decrypt_skb(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL, &zc); if (err < 0) { tls_err_abort(sk, EBADMSG); goto recv_end; @@ -885,6 +900,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, int err = 0; long timeo; int chunk; + bool zc; lock_sock(sk); @@ -901,7 +917,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, } if (!ctx->decrypted) { - err = decrypt_skb(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL, &zc); if (err < 0) { tls_err_abort(sk, EBADMSG); @@ -947,7 +963,7 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - char header[tls_ctx->rx.prepend_size]; + char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; struct strp_msg *rxm = strp_msg(skb); size_t cipher_overhead; size_t data_len = 0; @@ -957,6 +973,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) if (rxm->offset + tls_ctx->rx.prepend_size > skb->len) return 0; + /* Sanity-check size of on-stack buffer. */ + if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) { + ret = -EINVAL; + goto read_failure; + } + /* Linearize header to local buffer */ ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size); @@ -984,6 +1006,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) goto read_failure; } +#ifdef CONFIG_TLS_DEVICE + handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, + *(u64*)tls_ctx->rx.rec_seq); +#endif return data_len + TLS_HEADER_SIZE; read_failure: @@ -996,9 +1022,6 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb) { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct strp_msg *rxm; - - rxm = strp_msg(skb); ctx->decrypted = false; @@ -1028,7 +1051,7 @@ void tls_sw_free_resources_tx(struct sock *sk) kfree(ctx); } -void tls_sw_free_resources_rx(struct sock *sk) +void tls_sw_release_resources_rx(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@ -1047,6 +1070,14 @@ void tls_sw_free_resources_rx(struct sock *sk) strp_done(&ctx->strp); lock_sock(sk); } +} + +void tls_sw_free_resources_rx(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + + tls_sw_release_resources_rx(sk); kfree(ctx); } @@ -1071,28 +1102,38 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) } if (tx) { - sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); - if (!sw_ctx_tx) { - rc = -ENOMEM; - goto out; + if (!ctx->priv_ctx_tx) { + sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); + if (!sw_ctx_tx) { + rc = -ENOMEM; + goto out; + } + ctx->priv_ctx_tx = sw_ctx_tx; + } else { + sw_ctx_tx = + (struct tls_sw_context_tx *)ctx->priv_ctx_tx; } - crypto_init_wait(&sw_ctx_tx->async_wait); - ctx->priv_ctx_tx = sw_ctx_tx; } else { - sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); - if (!sw_ctx_rx) { - rc = -ENOMEM; - goto out; + if (!ctx->priv_ctx_rx) { + sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); + if (!sw_ctx_rx) { + rc = -ENOMEM; + goto out; + } + ctx->priv_ctx_rx = sw_ctx_rx; + } else { + sw_ctx_rx = + (struct tls_sw_context_rx *)ctx->priv_ctx_rx; } - crypto_init_wait(&sw_ctx_rx->async_wait); - ctx->priv_ctx_rx = sw_ctx_rx; } if (tx) { + crypto_init_wait(&sw_ctx_tx->async_wait); crypto_info = &ctx->crypto_send; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; } else { + crypto_init_wait(&sw_ctx_rx->async_wait); crypto_info = &ctx->crypto_recv; cctx = &ctx->rx; aead = &sw_ctx_rx->aead_recv; @@ -1117,7 +1158,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) } /* Sanity-check the IV size for stack allocations. */ - if (iv_size > MAX_IV_SIZE) { + if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) { rc = -EINVAL; goto free_priv; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 48e8097339ab..a88551f3bc43 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -3,7 +3,7 @@ * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2015 Intel Deutschland GmbH + * Copyright 2015-2017 Intel Deutschland GmbH */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -744,6 +744,8 @@ int wiphy_register(struct wiphy *wiphy) /* sanity check supported bands/channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { + u16 types = 0; + sband = wiphy->bands[band]; if (!sband) continue; @@ -788,6 +790,23 @@ int wiphy_register(struct wiphy *wiphy) sband->channels[i].band = band; } + for (i = 0; i < sband->n_iftype_data; i++) { + const struct ieee80211_sband_iftype_data *iftd; + + iftd = &sband->iftype_data[i]; + + if (WARN_ON(!iftd->types_mask)) + return -EINVAL; + if (WARN_ON(types & iftd->types_mask)) + return -EINVAL; + + /* at least one piece of information must be present */ + if (WARN_ON(!iftd->he_cap.has_he)) + return -EINVAL; + + types |= iftd->types_mask; + } + have_band = true; } diff --git a/net/wireless/core.h b/net/wireless/core.h index 63eb1b5fdd04..7f52ef569320 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -76,7 +76,7 @@ struct cfg80211_registered_device { struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; struct list_head sched_scan_req_list; - unsigned long suspend_at; + time64_t suspend_at; struct work_struct scan_done_wk; struct genl_info *cur_cmd_info; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4eece06be1e7..e4e5f025d16b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -428,6 +428,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, + [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY, + .len = NL80211_HE_MAX_CAPABILITY_LEN }, }; /* policy for the key attributes */ @@ -1324,6 +1326,34 @@ static int nl80211_send_coalesce(struct sk_buff *msg, return 0; } +static int +nl80211_send_iftype_data(struct sk_buff *msg, + const struct ieee80211_sband_iftype_data *iftdata) +{ + const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap; + + if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES, + iftdata->types_mask)) + return -ENOBUFS; + + if (he_cap->has_he) { + if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC, + sizeof(he_cap->he_cap_elem.mac_cap_info), + he_cap->he_cap_elem.mac_cap_info) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, + sizeof(he_cap->he_cap_elem.phy_cap_info), + he_cap->he_cap_elem.phy_cap_info) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, + sizeof(he_cap->he_mcs_nss_supp), + &he_cap->he_mcs_nss_supp) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, + sizeof(he_cap->ppe_thres), he_cap->ppe_thres)) + return -ENOBUFS; + } + + return 0; +} + static int nl80211_send_band_rateinfo(struct sk_buff *msg, struct ieee80211_supported_band *sband) { @@ -1353,6 +1383,32 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg, sband->vht_cap.cap))) return -ENOBUFS; + if (sband->n_iftype_data) { + struct nlattr *nl_iftype_data = + nla_nest_start(msg, NL80211_BAND_ATTR_IFTYPE_DATA); + int err; + + if (!nl_iftype_data) + return -ENOBUFS; + + for (i = 0; i < sband->n_iftype_data; i++) { + struct nlattr *iftdata; + + iftdata = nla_nest_start(msg, i + 1); + if (!iftdata) + return -ENOBUFS; + + err = nl80211_send_iftype_data(msg, + &sband->iftype_data[i]); + if (err) + return err; + + nla_nest_end(msg, iftdata); + } + + nla_nest_end(msg, nl_iftype_data); + } + /* add bitrates */ nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); if (!nl_rates) @@ -2757,7 +2813,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) || nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->devlist_generation ^ - (cfg80211_rdev_list_generation << 2))) + (cfg80211_rdev_list_generation << 2)) || + nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr)) goto nla_put_failure; if (rdev->ops->get_channel) { @@ -4471,6 +4528,9 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, case RATE_INFO_BW_160: rate_flg = NL80211_RATE_INFO_160_MHZ_WIDTH; break; + case RATE_INFO_BW_HE_RU: + rate_flg = 0; + WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS)); } if (rate_flg && nla_put_flag(msg, rate_flg)) @@ -4490,6 +4550,19 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, if (info->flags & RATE_INFO_FLAGS_SHORT_GI && nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) return false; + } else if (info->flags & RATE_INFO_FLAGS_HE_MCS) { + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_MCS, info->mcs)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_NSS, info->nss)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_GI, info->he_gi)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_DCM, info->he_dcm)) + return false; + if (info->bw == RATE_INFO_BW_HE_RU && + nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC, + info->he_ru_alloc)) + return false; } nla_nest_end(msg, rate); @@ -4546,13 +4619,13 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, #define PUT_SINFO(attr, memb, type) do { \ BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \ - if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \ + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb)) \ goto nla_put_failure; \ } while (0) #define PUT_SINFO_U64(attr, memb) do { \ - if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \ + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb, NL80211_STA_INFO_PAD)) \ goto nla_put_failure; \ @@ -4561,14 +4634,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(CONNECTED_TIME, connected_time, u32); PUT_SINFO(INACTIVE_TIME, inactive_time, u32); - if (sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES) | - BIT(NL80211_STA_INFO_RX_BYTES64)) && + if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, (u32)sinfo->rx_bytes)) goto nla_put_failure; - if (sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_TX_BYTES64)) && + if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, (u32)sinfo->tx_bytes)) goto nla_put_failure; @@ -4588,24 +4661,24 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, default: break; } - if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal, NL80211_STA_INFO_CHAIN_SIGNAL)) goto nla_put_failure; } - if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal_avg, NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) goto nla_put_failure; } - if (sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) goto nla_put_failure; } - if (sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->rxrate, NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; @@ -4621,7 +4694,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(PEER_PM, peer_pm, u32); PUT_SINFO(NONPEER_PM, nonpeer_pm, u32); - if (sinfo->filled & BIT(NL80211_STA_INFO_BSS_PARAM)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) goto nla_put_failure; @@ -4640,7 +4713,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, nla_nest_end(msg, bss_param); } - if ((sinfo->filled & BIT(NL80211_STA_INFO_STA_FLAGS)) && + if ((sinfo->filled & BIT_ULL(NL80211_STA_INFO_STA_FLAGS)) && nla_put(msg, NL80211_STA_INFO_STA_FLAGS, sizeof(struct nl80211_sta_flag_update), &sinfo->sta_flags)) @@ -4886,7 +4959,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy, return -EINVAL; if (params->supported_rates) return -EINVAL; - if (params->ext_capab || params->ht_capa || params->vht_capa) + if (params->ext_capab || params->ht_capa || params->vht_capa || + params->he_capa) return -EINVAL; } @@ -5092,6 +5166,15 @@ static int nl80211_set_station_tdls(struct genl_info *info, if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) params->vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); + if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { + params->he_capa = + nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + params->he_capa_len = + nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + + if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) + return -EINVAL; + } err = nl80211_parse_sta_channel_info(info, params); if (err) @@ -5319,6 +5402,17 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); + if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { + params.he_capa = + nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + params.he_capa_len = + nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + + /* max len is validated in nla policy */ + if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) + return -EINVAL; + } + if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.opmode_notif_used = true; params.opmode_notif = @@ -5351,6 +5445,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { params.ht_capa = NULL; params.vht_capa = NULL; + + /* HE requires WME */ + if (params.he_capa_len) + return -EINVAL; } /* When you run into this, adjust the code below for the new flag */ @@ -6848,6 +6946,16 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev) return regulatory_pre_cac_allowed(wdev->wiphy); } +static bool nl80211_check_scan_feat(struct wiphy *wiphy, u32 flags, u32 flag, + enum nl80211_ext_feature_index feat) +{ + if (!(flags & flag)) + return true; + if (wiphy_ext_feature_isset(wiphy, feat)) + return true; + return false; +} + static int nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, void *request, struct nlattr **attrs, @@ -6882,15 +6990,33 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) || - ((*flags & NL80211_SCAN_FLAG_LOW_SPAN) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_LOW_SPAN_SCAN)) || - ((*flags & NL80211_SCAN_FLAG_LOW_POWER) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_LOW_POWER_SCAN)) || - ((*flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN))) + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_LOW_SPAN, + NL80211_EXT_FEATURE_LOW_SPAN_SCAN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_LOW_POWER, + NL80211_EXT_FEATURE_LOW_POWER_SCAN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_HIGH_ACCURACY, + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_RANDOM_SN, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT)) return -EOPNOTSUPP; if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { @@ -6905,26 +7031,6 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, return err; } - if ((*flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME)) - return -EOPNOTSUPP; - - if ((*flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP)) - return -EOPNOTSUPP; - - if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION)) - return -EOPNOTSUPP; - - if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE)) - return -EOPNOTSUPP; - return 0; } @@ -10147,7 +10253,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, if (err) return err; - if (sinfo.filled & BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) wdev->cqm_config->last_rssi_event_value = (s8) sinfo.rx_beacon_signal_avg; } diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 570a2b67ca10..6ab32f6a1961 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -102,7 +102,7 @@ static int wiphy_suspend(struct device *dev) struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; - rdev->suspend_at = get_seconds(); + rdev->suspend_at = ktime_get_boottime_seconds(); rtnl_lock(); if (rdev->wiphy.registered) { @@ -130,7 +130,7 @@ static int wiphy_resume(struct device *dev) int ret = 0; /* Age scan results with time spent in suspend */ - cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); + cfg80211_bss_age(rdev, ktime_get_boottime_seconds() - rdev->suspend_at); rtnl_lock(); if (rdev->wiphy.registered && rdev->ops->resume) diff --git a/net/wireless/util.c b/net/wireless/util.c index 3c654cd7ba56..e0825a019e9f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -4,6 +4,7 @@ * * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2017 Intel Deutschland GmbH */ #include <linux/export.h> #include <linux/bitops.h> @@ -1142,6 +1143,85 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) return 0; } +static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) +{ +#define SCALE 2048 + u16 mcs_divisors[12] = { + 34133, /* 16.666666... */ + 17067, /* 8.333333... */ + 11378, /* 5.555555... */ + 8533, /* 4.166666... */ + 5689, /* 2.777777... */ + 4267, /* 2.083333... */ + 3923, /* 1.851851... */ + 3413, /* 1.666666... */ + 2844, /* 1.388888... */ + 2560, /* 1.250000... */ + 2276, /* 1.111111... */ + 2048, /* 1.000000... */ + }; + u32 rates_160M[3] = { 960777777, 907400000, 816666666 }; + u32 rates_969[3] = { 480388888, 453700000, 408333333 }; + u32 rates_484[3] = { 229411111, 216666666, 195000000 }; + u32 rates_242[3] = { 114711111, 108333333, 97500000 }; + u32 rates_106[3] = { 40000000, 37777777, 34000000 }; + u32 rates_52[3] = { 18820000, 17777777, 16000000 }; + u32 rates_26[3] = { 9411111, 8888888, 8000000 }; + u64 tmp; + u32 result; + + if (WARN_ON_ONCE(rate->mcs > 11)) + return 0; + + if (WARN_ON_ONCE(rate->he_gi > NL80211_RATE_INFO_HE_GI_3_2)) + return 0; + if (WARN_ON_ONCE(rate->he_ru_alloc > + NL80211_RATE_INFO_HE_RU_ALLOC_2x996)) + return 0; + if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) + return 0; + + if (rate->bw == RATE_INFO_BW_160) + result = rates_160M[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_80 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996)) + result = rates_969[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_40 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484)) + result = rates_484[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_20 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_242)) + result = rates_242[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_106) + result = rates_106[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_52) + result = rates_52[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) + result = rates_26[rate->he_gi]; + else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", + rate->bw, rate->he_ru_alloc)) + return 0; + + /* now scale to the appropriate MCS */ + tmp = result; + tmp *= SCALE; + do_div(tmp, mcs_divisors[rate->mcs]); + result = tmp; + + /* and take NSS, DCM into account */ + result = (result * rate->nss) / 8; + if (rate->he_dcm) + result /= 2; + + return result; +} + u32 cfg80211_calculate_bitrate(struct rate_info *rate) { if (rate->flags & RATE_INFO_FLAGS_MCS) @@ -1150,6 +1230,8 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate) return cfg80211_calculate_bitrate_60g(rate); if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return cfg80211_calculate_bitrate_vht(rate); + if (rate->flags & RATE_INFO_FLAGS_HE_MCS) + return cfg80211_calculate_bitrate_he(rate); return rate->legacy; } @@ -1791,8 +1873,9 @@ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp) { - sinfo->pertid = kcalloc(sizeof(*(sinfo->pertid)), - IEEE80211_NUM_TIDS + 1, gfp); + sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1, + sizeof(*(sinfo->pertid)), + gfp); if (!sinfo->pertid) return -ENOMEM; diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 05186a47878f..167f7025ac98 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1278,7 +1278,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev, if (err) return err; - if (!(sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))) + if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) return -EOPNOTSUPP; rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); @@ -1320,7 +1320,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: - if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) { + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { int sig = sinfo.signal; wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; @@ -1334,7 +1334,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) break; } case CFG80211_SIGNAL_TYPE_UNSPEC: - if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) { + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; wstats.qual.level = sinfo.signal; @@ -1347,9 +1347,9 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) } wstats.qual.updated |= IW_QUAL_NOISE_INVALID; - if (sinfo.filled & BIT(NL80211_STA_INFO_RX_DROP_MISC)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC)) wstats.discard.misc = sinfo.rx_dropped_misc; - if (sinfo.filled & BIT(NL80211_STA_INFO_TX_FAILED)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) wstats.discard.retries = sinfo.tx_failed; return &wstats; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 1303af10e54d..9ea2f7b64869 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -52,6 +52,7 @@ hostprogs-y += xdp_adjust_tail hostprogs-y += xdpsock hostprogs-y += xdp_fwd hostprogs-y += task_fd_query +hostprogs-y += xdp_sample_pkts # Libbpf dependencies LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a @@ -107,6 +108,7 @@ xdp_adjust_tail-objs := xdp_adjust_tail_user.o xdpsock-objs := bpf_load.o xdpsock_user.o xdp_fwd-objs := bpf_load.o xdp_fwd_user.o task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS) +xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS) # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -163,6 +165,7 @@ always += xdp_adjust_tail_kern.o always += xdpsock_kern.o always += xdp_fwd_kern.o always += task_fd_query_kern.o +always += xdp_sample_pkts_kern.o HOSTCFLAGS += -I$(objtree)/usr/include HOSTCFLAGS += -I$(srctree)/tools/lib/ @@ -179,6 +182,7 @@ HOSTCFLAGS_spintest_user.o += -I$(srctree)/tools/lib/bpf/ HOSTCFLAGS_trace_event_user.o += -I$(srctree)/tools/lib/bpf/ HOSTCFLAGS_sampleip_user.o += -I$(srctree)/tools/lib/bpf/ HOSTCFLAGS_task_fd_query_user.o += -I$(srctree)/tools/lib/bpf/ +HOSTCFLAGS_xdp_sample_pkts_user.o += -I$(srctree)/tools/lib/bpf/ HOST_LOADLIBES += $(LIBBPF) -lelf HOSTLOADLIBES_tracex4 += -lrt diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index 303e9e7161f3..8cb703671b04 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -134,7 +134,16 @@ bool parse_eth(struct ethhdr *eth, void *data_end, return false; eth_type = vlan_hdr->h_vlan_encapsulated_proto; } - /* TODO: Handle double VLAN tagged packet */ + /* Handle double VLAN tagged packet */ + if (eth_type == htons(ETH_P_8021Q) || eth_type == htons(ETH_P_8021AD)) { + struct vlan_hdr *vlan_hdr; + + vlan_hdr = (void *)eth + offset; + offset += sizeof(*vlan_hdr); + if ((void *)eth + offset > data_end) + return false; + eth_type = vlan_hdr->h_vlan_encapsulated_proto; + } *eth_proto = ntohs(eth_type); *l3_offset = offset; diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c index 3fd209291653..222a83eed1cb 100644 --- a/samples/bpf/xdp_rxq_info_kern.c +++ b/samples/bpf/xdp_rxq_info_kern.c @@ -4,6 +4,8 @@ * Example howto extract XDP RX-queue info */ #include <uapi/linux/bpf.h> +#include <uapi/linux/if_ether.h> +#include <uapi/linux/in.h> #include "bpf_helpers.h" /* Config setup from with userspace @@ -14,6 +16,12 @@ struct config { __u32 action; int ifindex; + __u32 options; +}; +enum cfg_options_flags { + NO_TOUCH = 0x0U, + READ_MEM = 0x1U, + SWAP_MAC = 0x2U, }; struct bpf_map_def SEC("maps") config_map = { .type = BPF_MAP_TYPE_ARRAY, @@ -45,6 +53,23 @@ struct bpf_map_def SEC("maps") rx_queue_index_map = { .max_entries = MAX_RXQs + 1, }; +static __always_inline +void swap_src_dst_mac(void *data) +{ + unsigned short *p = data; + unsigned short dst[3]; + + dst[0] = p[0]; + dst[1] = p[1]; + dst[2] = p[2]; + p[0] = p[3]; + p[1] = p[4]; + p[2] = p[5]; + p[3] = dst[0]; + p[4] = dst[1]; + p[5] = dst[2]; +} + SEC("xdp_prog0") int xdp_prognum0(struct xdp_md *ctx) { @@ -90,6 +115,24 @@ int xdp_prognum0(struct xdp_md *ctx) if (key == MAX_RXQs) rxq_rec->issue++; + /* Default: Don't touch packet data, only count packets */ + if (unlikely(config->options & (READ_MEM|SWAP_MAC))) { + struct ethhdr *eth = data; + + if (eth + 1 > data_end) + return XDP_ABORTED; + + /* Avoid compiler removing this: Drop non 802.3 Ethertypes */ + if (ntohs(eth->h_proto) < ETH_P_802_3_MIN) + return XDP_ABORTED; + + /* XDP_TX requires changing MAC-addrs, else HW may drop. + * Can also be enabled with --swapmac (for test purposes) + */ + if (unlikely(config->options & SWAP_MAC)) + swap_src_dst_mac(data); + } + return config->action; } diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index e4e9ba52bff0..248a7eab9531 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -50,6 +50,8 @@ static const struct option long_options[] = { {"sec", required_argument, NULL, 's' }, {"no-separators", no_argument, NULL, 'z' }, {"action", required_argument, NULL, 'a' }, + {"readmem", no_argument, NULL, 'r' }, + {"swapmac", no_argument, NULL, 'm' }, {0, 0, NULL, 0 } }; @@ -66,6 +68,12 @@ static void int_exit(int sig) struct config { __u32 action; int ifindex; + __u32 options; +}; +enum cfg_options_flags { + NO_TOUCH = 0x0U, + READ_MEM = 0x1U, + SWAP_MAC = 0x2U, }; #define XDP_ACTION_MAX (XDP_TX + 1) #define XDP_ACTION_MAX_STRLEN 11 @@ -109,6 +117,18 @@ static void list_xdp_actions(void) printf("\n"); } +static char* options2str(enum cfg_options_flags flag) +{ + if (flag == NO_TOUCH) + return "no_touch"; + if (flag & SWAP_MAC) + return "swapmac"; + if (flag & READ_MEM) + return "read"; + fprintf(stderr, "ERR: Unknown config option flags"); + exit(EXIT_FAIL); +} + static void usage(char *argv[]) { int i; @@ -305,7 +325,7 @@ static __u64 calc_errs_pps(struct datarec *r, static void stats_print(struct stats_record *stats_rec, struct stats_record *stats_prev, - int action) + int action, __u32 cfg_opt) { unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; unsigned int nr_cpus = bpf_num_possible_cpus(); @@ -316,8 +336,8 @@ static void stats_print(struct stats_record *stats_rec, int i; /* Header */ - printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n", - ifname, ifindex, action2str(action)); + printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s options:%s\n", + ifname, ifindex, action2str(action), options2str(cfg_opt)); /* stats_global_map */ { @@ -399,7 +419,7 @@ static inline void swap(struct stats_record **a, struct stats_record **b) *b = tmp; } -static void stats_poll(int interval, int action) +static void stats_poll(int interval, int action, __u32 cfg_opt) { struct stats_record *record, *prev; @@ -410,7 +430,7 @@ static void stats_poll(int interval, int action) while (1) { swap(&prev, &record); stats_collect(record); - stats_print(record, prev, action); + stats_print(record, prev, action, cfg_opt); sleep(interval); } @@ -421,6 +441,7 @@ static void stats_poll(int interval, int action) int main(int argc, char **argv) { + __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */ struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; struct bpf_prog_load_attr prog_load_attr = { .prog_type = BPF_PROG_TYPE_XDP, @@ -435,6 +456,7 @@ int main(int argc, char **argv) int interval = 2; __u32 key = 0; + char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 }; int action = XDP_PASS; /* Default action */ char *action_str = NULL; @@ -496,6 +518,12 @@ int main(int argc, char **argv) action_str = (char *)&action_str_buf; strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN); break; + case 'r': + cfg_options |= READ_MEM; + break; + case 'm': + cfg_options |= SWAP_MAC; + break; case 'h': error: default: @@ -523,6 +551,11 @@ int main(int argc, char **argv) } cfg.action = action; + /* XDP_TX requires changing MAC-addrs, else HW may drop */ + if (action == XDP_TX) + cfg_options |= SWAP_MAC; + cfg.options = cfg_options; + /* Trick to pretty printf with thousands separators use %' */ if (use_separators) setlocale(LC_NUMERIC, "en_US"); @@ -542,6 +575,6 @@ int main(int argc, char **argv) return EXIT_FAIL_XDP; } - stats_poll(interval, action); + stats_poll(interval, action, cfg_options); return EXIT_OK; } diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c new file mode 100644 index 000000000000..f7ca8b850978 --- /dev/null +++ b/samples/bpf/xdp_sample_pkts_kern.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/ptrace.h> +#include <linux/version.h> +#include <uapi/linux/bpf.h> +#include "bpf_helpers.h" + +#define SAMPLE_SIZE 64ul +#define MAX_CPUS 128 + +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +struct bpf_map_def SEC("maps") my_map = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(u32), + .max_entries = MAX_CPUS, +}; + +SEC("xdp_sample") +int xdp_sample_prog(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + + /* Metadata will be in the perf event before the packet data. */ + struct S { + u16 cookie; + u16 pkt_len; + } __packed metadata; + + if (data < data_end) { + /* The XDP perf_event_output handler will use the upper 32 bits + * of the flags argument as a number of bytes to include of the + * packet payload in the event data. If the size is too big, the + * call to bpf_perf_event_output will fail and return -EFAULT. + * + * See bpf_xdp_event_output in net/core/filter.c. + * + * The BPF_F_CURRENT_CPU flag means that the event output fd + * will be indexed by the CPU number in the event map. + */ + u64 flags = BPF_F_CURRENT_CPU; + u16 sample_size; + int ret; + + metadata.cookie = 0xdead; + metadata.pkt_len = (u16)(data_end - data); + sample_size = min(metadata.pkt_len, SAMPLE_SIZE); + flags |= (u64)sample_size << 32; + + ret = bpf_perf_event_output(ctx, &my_map, flags, + &metadata, sizeof(metadata)); + if (ret) + bpf_printk("perf_event_output failed: %d\n", ret); + } + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c new file mode 100644 index 000000000000..8dd87c1eb560 --- /dev/null +++ b/samples/bpf/xdp_sample_pkts_user.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <linux/perf_event.h> +#include <linux/bpf.h> +#include <net/if.h> +#include <errno.h> +#include <assert.h> +#include <sys/sysinfo.h> +#include <sys/ioctl.h> +#include <signal.h> +#include <libbpf.h> +#include <bpf/bpf.h> + +#include "perf-sys.h" +#include "trace_helpers.h" + +#define MAX_CPUS 128 +static int pmu_fds[MAX_CPUS], if_idx; +static struct perf_event_mmap_page *headers[MAX_CPUS]; +static char *if_name; + +static int do_attach(int idx, int fd, const char *name) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, fd, 0); + if (err < 0) + printf("ERROR: failed to attach program to %s\n", name); + + return err; +} + +static int do_detach(int idx, const char *name) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, -1, 0); + if (err < 0) + printf("ERROR: failed to detach program from %s\n", name); + + return err; +} + +#define SAMPLE_SIZE 64 + +static int print_bpf_output(void *data, int size) +{ + struct { + __u16 cookie; + __u16 pkt_len; + __u8 pkt_data[SAMPLE_SIZE]; + } __packed *e = data; + int i; + + if (e->cookie != 0xdead) { + printf("BUG cookie %x sized %d\n", + e->cookie, size); + return LIBBPF_PERF_EVENT_ERROR; + } + + printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len); + for (i = 0; i < 14 && i < e->pkt_len; i++) + printf("%02x ", e->pkt_data[i]); + printf("\n"); + + return LIBBPF_PERF_EVENT_CONT; +} + +static void test_bpf_perf_event(int map_fd, int num) +{ + struct perf_event_attr attr = { + .sample_type = PERF_SAMPLE_RAW, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_BPF_OUTPUT, + .wakeup_events = 1, /* get an fd notification for every event */ + }; + int i; + + for (i = 0; i < num; i++) { + int key = i; + + pmu_fds[i] = sys_perf_event_open(&attr, -1/*pid*/, i/*cpu*/, + -1/*group_fd*/, 0); + + assert(pmu_fds[i] >= 0); + assert(bpf_map_update_elem(map_fd, &key, + &pmu_fds[i], BPF_ANY) == 0); + ioctl(pmu_fds[i], PERF_EVENT_IOC_ENABLE, 0); + } +} + +static void sig_handler(int signo) +{ + do_detach(if_idx, if_name); + exit(0); +} + +int main(int argc, char **argv) +{ + struct bpf_prog_load_attr prog_load_attr = { + .prog_type = BPF_PROG_TYPE_XDP, + }; + struct bpf_object *obj; + struct bpf_map *map; + int prog_fd, map_fd; + char filename[256]; + int ret, err, i; + int numcpus; + + if (argc < 2) { + printf("Usage: %s <ifname>\n", argv[0]); + return 1; + } + + numcpus = get_nprocs(); + if (numcpus > MAX_CPUS) + numcpus = MAX_CPUS; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + prog_load_attr.file = filename; + + if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + return 1; + + if (!prog_fd) { + printf("load_bpf_file: %s\n", strerror(errno)); + return 1; + } + + map = bpf_map__next(NULL, obj); + if (!map) { + printf("finding a map in obj file failed\n"); + return 1; + } + map_fd = bpf_map__fd(map); + + if_idx = if_nametoindex(argv[1]); + if (!if_idx) + if_idx = strtoul(argv[1], NULL, 0); + + if (!if_idx) { + fprintf(stderr, "Invalid ifname\n"); + return 1; + } + if_name = argv[1]; + err = do_attach(if_idx, prog_fd, argv[1]); + if (err) + return err; + + if (signal(SIGINT, sig_handler) || + signal(SIGHUP, sig_handler) || + signal(SIGTERM, sig_handler)) { + perror("signal"); + return 1; + } + + test_bpf_perf_event(map_fd, numcpus); + + for (i = 0; i < numcpus; i++) + if (perf_event_mmap_header(pmu_fds[i], &headers[i]) < 0) + return 1; + + ret = perf_event_poller_multi(pmu_fds, headers, numcpus, + print_bpf_output); + kill(0, SIGINT); + return ret; +} diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers new file mode 100644 index 000000000000..c34fea77f39f --- /dev/null +++ b/tools/bpf/Makefile.helpers @@ -0,0 +1,59 @@ +ifndef allow-override + include ../scripts/Makefile.include + include ../scripts/utilities.mak +else + # Assume Makefile.helpers is being run from bpftool/Documentation + # subdirectory. Go up two more directories to fetch bpf.h header and + # associated script. + UP2DIR := ../../ +endif + +INSTALL ?= install +RM ?= rm -f +RMDIR ?= rmdir --ignore-fail-on-non-empty + +ifeq ($(V),1) + Q = +else + Q = @ +endif + +prefix ?= /usr/local +mandir ?= $(prefix)/man +man7dir = $(mandir)/man7 + +HELPERS_RST = bpf-helpers.rst +MAN7_RST = $(HELPERS_RST) + +_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST)) +DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7)) + +helpers: man7 +man7: $(DOC_MAN7) + +RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null) + +$(OUTPUT)$(HELPERS_RST): $(UP2DIR)../../include/uapi/linux/bpf.h + $(QUIET_GEN)$(UP2DIR)../../scripts/bpf_helpers_doc.py --filename $< > $@ + +$(OUTPUT)%.7: $(OUTPUT)%.rst +ifndef RST2MAN_DEP + $(error "rst2man not found, but required to generate man pages") +endif + $(QUIET_GEN)rst2man $< > $@ + +helpers-clean: + $(call QUIET_CLEAN, eBPF_helpers-manpage) + $(Q)$(RM) $(DOC_MAN7) $(OUTPUT)$(HELPERS_RST) + +helpers-install: helpers + $(call QUIET_INSTALL, eBPF_helpers-manpage) + $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir) + $(Q)$(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir) + +helpers-uninstall: + $(call QUIET_UNINST, eBPF_helpers-manpage) + $(Q)$(RM) $(addprefix $(DESTDIR)$(man7dir)/,$(_DOC_MAN7)) + $(Q)$(RMDIR) $(DESTDIR)$(man7dir) + +.PHONY: helpers helpers-clean helpers-install helpers-uninstall diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index a9d47c1558bb..f7663a3e60c9 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -15,12 +15,15 @@ prefix ?= /usr/local mandir ?= $(prefix)/man man8dir = $(mandir)/man8 -MAN8_RST = $(wildcard *.rst) +# Load targets for building eBPF helpers man page. +include ../../Makefile.helpers + +MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst)) _DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST)) DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8)) -man: man8 +man: man8 helpers man8: $(DOC_MAN8) RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null) @@ -31,16 +34,16 @@ ifndef RST2MAN_DEP endif $(QUIET_GEN)rst2man $< > $@ -clean: +clean: helpers-clean $(call QUIET_CLEAN, Documentation) $(Q)$(RM) $(DOC_MAN8) -install: man +install: man helpers-install $(call QUIET_INSTALL, Documentation-man) $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir) $(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir) -uninstall: +uninstall: helpers-uninstall $(call QUIET_UNINST, Documentation-man) $(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8)) $(Q)$(RMDIR) $(DESTDIR)$(man8dir) diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 7b0e6d453e92..edbe81534c6d 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -15,12 +15,13 @@ SYNOPSIS *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } } *COMMANDS* := - { **show** | **list** | **attach** | **detach** | **help** } + { **show** | **list** | **tree** | **attach** | **detach** | **help** } MAP COMMANDS ============= | **bpftool** **cgroup { show | list }** *CGROUP* +| **bpftool** **cgroup tree** [*CGROUP_ROOT*] | **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] | **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* | **bpftool** **cgroup help** @@ -39,6 +40,15 @@ DESCRIPTION Output will start with program ID followed by attach type, attach flags and program name. + **bpftool cgroup tree** [*CGROUP_ROOT*] + Iterate over all cgroups in *CGROUP_ROOT* and list all + attached programs. If *CGROUP_ROOT* is not specified, + bpftool uses cgroup v2 mountpoint. + + The output is similar to the output of cgroup show/list + commands: it starts with absolute cgroup path, followed by + program ID, attach type, attach flags and program name. + **bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] Attach program *PROG* to the cgroup *CGROUP* with attach type *ATTACH_TYPE* and optional *ATTACH_FLAGS*. diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 43d34a5c3ec5..64156a16d530 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -24,10 +24,20 @@ MAP COMMANDS | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}] | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}] | **bpftool** **prog pin** *PROG* *FILE* -| **bpftool** **prog load** *OBJ* *FILE* +| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] | **bpftool** **prog help** | +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } +| *TYPE* := { +| **socket** | **kprobe** | **kretprobe** | **classifier** | **action** | +| **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** | +| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** | +| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** | +| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** | +| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** +| } + DESCRIPTION =========== @@ -64,8 +74,19 @@ DESCRIPTION Note: *FILE* must be located in *bpffs* mount. - **bpftool prog load** *OBJ* *FILE* + **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] Load bpf program from binary *OBJ* and pin as *FILE*. + **type** is optional, if not specified program type will be + inferred from section names. + By default bpftool will create new maps as declared in the ELF + object being loaded. **map** parameter allows for the reuse + of existing maps. It can be specified multiple times, each + time for a different map. *IDX* refers to index of the map + to be replaced in the ELF file counting from 0, while *NAME* + allows to replace a map by name. *MAP* specifies the map to + use, referring to it by **id** or through a **pinned** file. + If **dev** *NAME* is specified program will be loaded onto + given networking device (offload). Note: *FILE* must be located in *bpffs* mount. @@ -159,6 +180,14 @@ EXAMPLES mov %rbx,0x0(%rbp) 48 89 5d 00 +| +| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7** +| **# bpftool prog show pinned /sys/fs/bpf/xdp1** +| 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl +| loaded_at 2018-06-25T16:17:31-0700 uid 0 +| xlated 488B jited 336B memlock 4096B map_ids 7 +| **# rm /sys/fs/bpf/xdp1** +| SEE ALSO ======== diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 892dbf095bff..6c4830e18879 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -23,7 +23,7 @@ endif LIBBPF = $(BPF_PATH)libbpf.a -BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion) +BPFTOOL_VERSION := $(shell make --no-print-directory -sC ../../.. kernelversion) $(LIBBPF): FORCE $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) @@ -52,7 +52,7 @@ INSTALL ?= install RM ?= rm -f FEATURE_USER = .bpftool -FEATURE_TESTS = libbfd disassembler-four-args +FEATURE_TESTS = libbfd disassembler-four-args reallocarray FEATURE_DISPLAY = libbfd disassembler-four-args check_feat := 1 @@ -75,6 +75,10 @@ ifeq ($(feature-disassembler-four-args), 1) CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE endif +ifeq ($(feature-reallocarray), 0) +CFLAGS += -DCOMPAT_NEED_REALLOCARRAY +endif + include $(wildcard $(OUTPUT)*.d) all: $(OUTPUT)bpftool diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 1e1083321643..598066c40191 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -99,6 +99,35 @@ _bpftool_get_prog_tags() command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) ) } +_bpftool_get_obj_map_names() +{ + local obj + + obj=$1 + + maps=$(objdump -j maps -t $obj 2>/dev/null | \ + command awk '/g . maps/ {print $NF}') + + COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) ) +} + +_bpftool_get_obj_map_idxs() +{ + local obj + + obj=$1 + + nmaps=$(objdump -j maps -t $obj 2>/dev/null | grep -c 'g . maps') + + COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) ) +} + +_sysfs_get_netdevs() +{ + COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \ + "$cur" ) ) +} + # For bpftool map update: retrieve type of the map to update. _bpftool_map_update_map_type() { @@ -153,6 +182,13 @@ _bpftool() local cur prev words objword _init_completion || return + # Deal with options + if [[ ${words[cword]} == -* ]]; then + local c='--version --json --pretty --bpffs' + COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) + return 0 + fi + # Deal with simplest keywords case $prev in help|hex|opcodes|visual) @@ -172,20 +208,23 @@ _bpftool() ;; esac - # Search for object and command - local object command cmdword - for (( cmdword=1; cmdword < ${#words[@]}-1; cmdword++ )); do - [[ -n $object ]] && command=${words[cmdword]} && break - [[ ${words[cmdword]} != -* ]] && object=${words[cmdword]} + # Remove all options so completions don't have to deal with them. + local i + for (( i=1; i < ${#words[@]}; )); do + if [[ ${words[i]::1} == - ]]; then + words=( "${words[@]:0:i}" "${words[@]:i+1}" ) + [[ $i -le $cword ]] && cword=$(( cword - 1 )) + else + i=$(( ++i )) + fi done + cur=${words[cword]} + prev=${words[cword - 1]} + + local object=${words[1]} command=${words[2]} - if [[ -z $object ]]; then + if [[ -z $object || $cword -eq 1 ]]; then case $cur in - -*) - local c='--version --json --pretty' - COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) - return 0 - ;; *) COMPREPLY=( $( compgen -W "$( bpftool help 2>&1 | \ command sed \ @@ -204,12 +243,14 @@ _bpftool() # Completion depends on object and command in use case $object in prog) - case $prev in - id) - _bpftool_get_prog_ids - return 0 - ;; - esac + if [[ $command != "load" ]]; then + case $prev in + id) + _bpftool_get_prog_ids + return 0 + ;; + esac + fi local PROG_TYPE='id pinned tag' case $command in @@ -252,8 +293,57 @@ _bpftool() return 0 ;; load) - _filedir - return 0 + local obj + + if [[ ${#words[@]} -lt 6 ]]; then + _filedir + return 0 + fi + + obj=${words[3]} + + if [[ ${words[-4]} == "map" ]]; then + COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) ) + return 0 + fi + if [[ ${words[-3]} == "map" ]]; then + if [[ ${words[-2]} == "idx" ]]; then + _bpftool_get_obj_map_idxs $obj + elif [[ ${words[-2]} == "name" ]]; then + _bpftool_get_obj_map_names $obj + fi + return 0 + fi + if [[ ${words[-2]} == "map" ]]; then + COMPREPLY=( $( compgen -W "idx name" -- "$cur" ) ) + return 0 + fi + + case $prev in + type) + COMPREPLY=( $( compgen -W "socket kprobe kretprobe classifier action tracepoint raw_tracepoint xdp perf_event cgroup/skb cgroup/sock cgroup/dev lwt_in lwt_out lwt_xmit lwt_seg6local sockops sk_skb sk_msg lirc_mode2 cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 cgroup/post_bind4 cgroup/post_bind6" -- \ + "$cur" ) ) + return 0 + ;; + id) + _bpftool_get_map_ids + return 0 + ;; + pinned) + _filedir + return 0 + ;; + dev) + _sysfs_get_netdevs + return 0 + ;; + *) + COMPREPLY=( $( compgen -W "map" -- "$cur" ) ) + _bpftool_once_attr 'type' + _bpftool_once_attr 'dev' + return 0 + ;; + esac ;; *) [[ $prev == $object ]] && \ @@ -404,6 +494,10 @@ _bpftool() _filedir return 0 ;; + tree) + _filedir + return 0 + ;; attach|detach) local ATTACH_TYPES='ingress egress sock_create sock_ops \ device bind4 bind6 post_bind4 post_bind6 connect4 \ @@ -445,7 +539,7 @@ _bpftool() *) [[ $prev == $object ]] && \ COMPREPLY=( $( compgen -W 'help attach detach \ - show list' -- "$cur" ) ) + show list tree' -- "$cur" ) ) ;; esac ;; diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c new file mode 100644 index 000000000000..55bc512a1831 --- /dev/null +++ b/tools/bpf/bpftool/btf_dumper.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Facebook */ + +#include <ctype.h> +#include <stdio.h> /* for (FILE *) used by json_writer */ +#include <string.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/btf.h> +#include <linux/err.h> + +#include "btf.h" +#include "json_writer.h" +#include "main.h" + +#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) +#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) +#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) +#define BITS_ROUNDUP_BYTES(bits) \ + (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) + +static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, + __u8 bit_offset, const void *data); + +static void btf_dumper_ptr(const void *data, json_writer_t *jw, + bool is_plain_text) +{ + if (is_plain_text) + jsonw_printf(jw, "%p", *(unsigned long *)data); + else + jsonw_printf(jw, "%u", *(unsigned long *)data); +} + +static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + int actual_type_id; + + actual_type_id = btf__resolve_type(d->btf, type_id); + if (actual_type_id < 0) + return actual_type_id; + + return btf_dumper_do_type(d, actual_type_id, 0, data); +} + +static void btf_dumper_enum(const void *data, json_writer_t *jw) +{ + jsonw_printf(jw, "%d", *(int *)data); +} + +static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + const struct btf_type *t = btf__type_by_id(d->btf, type_id); + struct btf_array *arr = (struct btf_array *)(t + 1); + long long elem_size; + int ret = 0; + __u32 i; + + elem_size = btf__resolve_size(d->btf, arr->type); + if (elem_size < 0) + return elem_size; + + jsonw_start_array(d->jw); + for (i = 0; i < arr->nelems; i++) { + ret = btf_dumper_do_type(d, arr->type, 0, + data + i * elem_size); + if (ret) + break; + } + + jsonw_end_array(d->jw); + return ret; +} + +static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset, + const void *data, json_writer_t *jw, + bool is_plain_text) +{ + int left_shift_bits, right_shift_bits; + int nr_bits = BTF_INT_BITS(int_type); + int total_bits_offset; + int bytes_to_copy; + int bits_to_copy; + __u64 print_num; + + total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); + data += BITS_ROUNDDOWN_BYTES(total_bits_offset); + bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset); + bits_to_copy = bit_offset + nr_bits; + bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); + + print_num = 0; + memcpy(&print_num, data, bytes_to_copy); +#if defined(__BIG_ENDIAN_BITFIELD) + left_shift_bits = bit_offset; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + left_shift_bits = 64 - bits_to_copy; +#else +#error neither big nor little endian +#endif + right_shift_bits = 64 - nr_bits; + + print_num <<= left_shift_bits; + print_num >>= right_shift_bits; + if (is_plain_text) + jsonw_printf(jw, "0x%llx", print_num); + else + jsonw_printf(jw, "%llu", print_num); +} + +static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset, + const void *data, json_writer_t *jw, + bool is_plain_text) +{ + __u32 *int_type; + __u32 nr_bits; + + int_type = (__u32 *)(t + 1); + nr_bits = BTF_INT_BITS(*int_type); + /* if this is bit field */ + if (bit_offset || BTF_INT_OFFSET(*int_type) || + BITS_PER_BYTE_MASKED(nr_bits)) { + btf_dumper_int_bits(*int_type, bit_offset, data, jw, + is_plain_text); + return 0; + } + + switch (BTF_INT_ENCODING(*int_type)) { + case 0: + if (BTF_INT_BITS(*int_type) == 64) + jsonw_printf(jw, "%lu", *(__u64 *)data); + else if (BTF_INT_BITS(*int_type) == 32) + jsonw_printf(jw, "%u", *(__u32 *)data); + else if (BTF_INT_BITS(*int_type) == 16) + jsonw_printf(jw, "%hu", *(__u16 *)data); + else if (BTF_INT_BITS(*int_type) == 8) + jsonw_printf(jw, "%hhu", *(__u8 *)data); + else + btf_dumper_int_bits(*int_type, bit_offset, data, jw, + is_plain_text); + break; + case BTF_INT_SIGNED: + if (BTF_INT_BITS(*int_type) == 64) + jsonw_printf(jw, "%ld", *(long long *)data); + else if (BTF_INT_BITS(*int_type) == 32) + jsonw_printf(jw, "%d", *(int *)data); + else if (BTF_INT_BITS(*int_type) == 16) + jsonw_printf(jw, "%hd", *(short *)data); + else if (BTF_INT_BITS(*int_type) == 8) + jsonw_printf(jw, "%hhd", *(char *)data); + else + btf_dumper_int_bits(*int_type, bit_offset, data, jw, + is_plain_text); + break; + case BTF_INT_CHAR: + if (isprint(*(char *)data)) + jsonw_printf(jw, "\"%c\"", *(char *)data); + else + if (is_plain_text) + jsonw_printf(jw, "0x%hhx", *(char *)data); + else + jsonw_printf(jw, "\"\\u00%02hhx\"", + *(char *)data); + break; + case BTF_INT_BOOL: + jsonw_bool(jw, *(int *)data); + break; + default: + /* shouldn't happen */ + return -EINVAL; + } + + return 0; +} + +static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + const struct btf_type *t; + struct btf_member *m; + const void *data_off; + int ret = 0; + int i, vlen; + + t = btf__type_by_id(d->btf, type_id); + if (!t) + return -EINVAL; + + vlen = BTF_INFO_VLEN(t->info); + jsonw_start_object(d->jw); + m = (struct btf_member *)(t + 1); + + for (i = 0; i < vlen; i++) { + data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset); + jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); + ret = btf_dumper_do_type(d, m[i].type, + BITS_PER_BYTE_MASKED(m[i].offset), + data_off); + if (ret) + break; + } + + jsonw_end_object(d->jw); + + return ret; +} + +static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, + __u8 bit_offset, const void *data) +{ + const struct btf_type *t = btf__type_by_id(d->btf, type_id); + + switch (BTF_INFO_KIND(t->info)) { + case BTF_KIND_INT: + return btf_dumper_int(t, bit_offset, data, d->jw, + d->is_plain_text); + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + return btf_dumper_struct(d, type_id, data); + case BTF_KIND_ARRAY: + return btf_dumper_array(d, type_id, data); + case BTF_KIND_ENUM: + btf_dumper_enum(data, d->jw); + return 0; + case BTF_KIND_PTR: + btf_dumper_ptr(data, d->jw, d->is_plain_text); + return 0; + case BTF_KIND_UNKN: + jsonw_printf(d->jw, "(unknown)"); + return 0; + case BTF_KIND_FWD: + /* map key or value can't be forward */ + jsonw_printf(d->jw, "(fwd-kind-invalid)"); + return -EINVAL; + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + return btf_dumper_modifier(d, type_id, data); + default: + jsonw_printf(d->jw, "(unsupported-kind"); + return -EINVAL; + } +} + +int btf_dumper_type(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + return btf_dumper_do_type(d, type_id, 0, data); +} diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 16bee011e16c..ee7a9765c6b3 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -2,7 +2,12 @@ // Copyright (C) 2017 Facebook // Author: Roman Gushchin <guro@fb.com> +#define _XOPEN_SOURCE 500 +#include <errno.h> #include <fcntl.h> +#include <ftw.h> +#include <mntent.h> +#include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> @@ -53,7 +58,8 @@ static enum bpf_attach_type parse_attach_type(const char *str) } static int show_bpf_prog(int id, const char *attach_type_str, - const char *attach_flags_str) + const char *attach_flags_str, + int level) { struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); @@ -78,7 +84,8 @@ static int show_bpf_prog(int id, const char *attach_type_str, jsonw_string_field(json_wtr, "name", info.name); jsonw_end_object(json_wtr); } else { - printf("%-8u %-15s %-15s %-15s\n", info.id, + printf("%s%-8u %-15s %-15s %-15s\n", level ? " " : "", + info.id, attach_type_str, attach_flags_str, info.name); @@ -88,7 +95,20 @@ static int show_bpf_prog(int id, const char *attach_type_str, return 0; } -static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) +static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) +{ + __u32 prog_cnt = 0; + int ret; + + ret = bpf_prog_query(cgroup_fd, type, 0, NULL, NULL, &prog_cnt); + if (ret) + return -1; + + return prog_cnt; +} + +static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, + int level) { __u32 prog_ids[1024] = {0}; char *attach_flags_str; @@ -123,7 +143,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) for (iter = 0; iter < prog_cnt; iter++) show_bpf_prog(prog_ids[iter], attach_type_strings[type], - attach_flags_str); + attach_flags_str, level); return 0; } @@ -161,7 +181,7 @@ static int do_show(int argc, char **argv) * If we were able to get the show for at least one * attach type, let's return 0. */ - if (show_attached_bpf_progs(cgroup_fd, type) == 0) + if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0) ret = 0; } @@ -173,6 +193,143 @@ exit: return ret; } +/* + * To distinguish nftw() errors and do_show_tree_fn() errors + * and avoid duplicating error messages, let's return -2 + * from do_show_tree_fn() in case of error. + */ +#define NFTW_ERR -1 +#define SHOW_TREE_FN_ERR -2 +static int do_show_tree_fn(const char *fpath, const struct stat *sb, + int typeflag, struct FTW *ftw) +{ + enum bpf_attach_type type; + bool skip = true; + int cgroup_fd; + + if (typeflag != FTW_D) + return 0; + + cgroup_fd = open(fpath, O_RDONLY); + if (cgroup_fd < 0) { + p_err("can't open cgroup %s: %s", fpath, strerror(errno)); + return SHOW_TREE_FN_ERR; + } + + for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { + int count = count_attached_bpf_progs(cgroup_fd, type); + + if (count < 0 && errno != EINVAL) { + p_err("can't query bpf programs attached to %s: %s", + fpath, strerror(errno)); + close(cgroup_fd); + return SHOW_TREE_FN_ERR; + } + if (count > 0) { + skip = false; + break; + } + } + + if (skip) { + close(cgroup_fd); + return 0; + } + + if (json_output) { + jsonw_start_object(json_wtr); + jsonw_string_field(json_wtr, "cgroup", fpath); + jsonw_name(json_wtr, "programs"); + jsonw_start_array(json_wtr); + } else { + printf("%s\n", fpath); + } + + for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) + show_attached_bpf_progs(cgroup_fd, type, ftw->level); + + if (json_output) { + jsonw_end_array(json_wtr); + jsonw_end_object(json_wtr); + } + + close(cgroup_fd); + + return 0; +} + +static char *find_cgroup_root(void) +{ + struct mntent *mnt; + FILE *f; + + f = fopen("/proc/mounts", "r"); + if (f == NULL) + return NULL; + + while ((mnt = getmntent(f))) { + if (strcmp(mnt->mnt_type, "cgroup2") == 0) { + fclose(f); + return strdup(mnt->mnt_dir); + } + } + + fclose(f); + return NULL; +} + +static int do_show_tree(int argc, char **argv) +{ + char *cgroup_root; + int ret; + + switch (argc) { + case 0: + cgroup_root = find_cgroup_root(); + if (!cgroup_root) { + p_err("cgroup v2 isn't mounted"); + return -1; + } + break; + case 1: + cgroup_root = argv[0]; + break; + default: + p_err("too many parameters for cgroup tree"); + return -1; + } + + + if (json_output) + jsonw_start_array(json_wtr); + else + printf("%s\n" + "%-8s %-15s %-15s %-15s\n", + "CgroupPath", + "ID", "AttachType", "AttachFlags", "Name"); + + switch (nftw(cgroup_root, do_show_tree_fn, 1024, FTW_MOUNT)) { + case NFTW_ERR: + p_err("can't iterate over %s: %s", cgroup_root, + strerror(errno)); + ret = -1; + break; + case SHOW_TREE_FN_ERR: + ret = -1; + break; + default: + ret = 0; + } + + if (json_output) + jsonw_end_array(json_wtr); + + if (argc == 0) + free(cgroup_root); + + return ret; +} + static int do_attach(int argc, char **argv) { enum bpf_attach_type attach_type; @@ -289,6 +446,7 @@ static int do_help(int argc, char **argv) fprintf(stderr, "Usage: %s %s { show | list } CGROUP\n" + " %s %s tree [CGROUP_ROOT]\n" " %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n" " %s %s detach CGROUP ATTACH_TYPE PROG\n" " %s %s help\n" @@ -298,6 +456,7 @@ static int do_help(int argc, char **argv) " " HELP_SPEC_PROGRAM "\n" " " HELP_SPEC_OPTIONS "\n" "", + bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]); @@ -307,6 +466,7 @@ static int do_help(int argc, char **argv) static const struct cmd cmds[] = { { "show", do_show }, { "list", do_show }, + { "tree", do_show_tree }, { "attach", do_attach }, { "detach", do_detach }, { "help", do_help }, diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 32f9e397a6c0..b432daea4520 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski <kubakici@wp.pl> */ - #include <ctype.h> #include <errno.h> #include <fcntl.h> diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index eea7f14355f3..d15a62be6cf0 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski <kubakici@wp.pl> */ - #include <bfd.h> #include <ctype.h> #include <errno.h> diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 63fdb310b9a4..238e734d75b3 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski <kubakici@wp.pl> */ - #ifndef __BPF_TOOL_H #define __BPF_TOOL_H @@ -44,6 +42,7 @@ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/hashtable.h> +#include <tools/libc_compat.h> #include "json_writer.h" @@ -52,6 +51,21 @@ #define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) #define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) #define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; }) +#define GET_ARG() ({ argc--; *argv++; }) +#define REQ_ARGS(cnt) \ + ({ \ + int _cnt = (cnt); \ + bool _res; \ + \ + if (argc < _cnt) { \ + p_err("'%s' needs at least %d arguments, %d found", \ + argv[-1], _cnt, argc); \ + _res = false; \ + } else { \ + _res = true; \ + } \ + _res; \ + }) #define ERR_MAX_LEN 1024 @@ -61,6 +75,8 @@ "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }" #define HELP_SPEC_OPTIONS \ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} }" +#define HELP_SPEC_MAP \ + "MAP := { id MAP_ID | pinned FILE }" enum bpf_obj_type { BPF_OBJ_UNKNOWN, @@ -122,6 +138,7 @@ int do_cgroup(int argc, char **arg); int do_perf(int argc, char **arg); int prog_parse_fd(int *argc, char ***argv); +int map_parse_fd(int *argc, char ***argv); int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len); void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, @@ -133,4 +150,19 @@ unsigned int get_page_size(void); unsigned int get_possible_cpus(void); const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino); +struct btf_dumper { + const struct btf *btf; + json_writer_t *jw; + bool is_plain_text; +}; + +/* btf_dumper_type - print data along with type information + * @d: an instance containing context for dumping types + * @type_id: index in btf->types array. this points to the type to be dumped + * @data: pointer the actual data, i.e. the values to be printed + * + * Returns zero on success and negative error code otherwise + */ +int btf_dumper_type(const struct btf_dumper *d, __u32 type_id, + const void *data); #endif diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 097b1a5e046b..9c8191845585 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -31,11 +31,10 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski <kubakici@wp.pl> */ - #include <assert.h> #include <errno.h> #include <fcntl.h> +#include <linux/err.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> @@ -46,6 +45,8 @@ #include <bpf.h> +#include "btf.h" +#include "json_writer.h" #include "main.h" static const char * const map_type_name[] = { @@ -95,7 +96,7 @@ static void *alloc_value(struct bpf_map_info *info) return malloc(info->value_size); } -static int map_parse_fd(int *argc, char ***argv) +int map_parse_fd(int *argc, char ***argv) { int fd; @@ -150,8 +151,109 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) return fd; } +static int do_dump_btf(const struct btf_dumper *d, + struct bpf_map_info *map_info, void *key, + void *value) +{ + int ret; + + /* start of key-value pair */ + jsonw_start_object(d->jw); + + jsonw_name(d->jw, "key"); + + ret = btf_dumper_type(d, map_info->btf_key_type_id, key); + if (ret) + goto err_end_obj; + + jsonw_name(d->jw, "value"); + + ret = btf_dumper_type(d, map_info->btf_value_type_id, value); + +err_end_obj: + /* end of key-value pair */ + jsonw_end_object(d->jw); + + return ret; +} + +static int get_btf(struct bpf_map_info *map_info, struct btf **btf) +{ + struct bpf_btf_info btf_info = { 0 }; + __u32 len = sizeof(btf_info); + __u32 last_size; + int btf_fd; + void *ptr; + int err; + + err = 0; + *btf = NULL; + btf_fd = bpf_btf_get_fd_by_id(map_info->btf_id); + if (btf_fd < 0) + return 0; + + /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so + * let's start with a sane default - 4KiB here - and resize it only if + * bpf_obj_get_info_by_fd() needs a bigger buffer. + */ + btf_info.btf_size = 4096; + last_size = btf_info.btf_size; + ptr = malloc(last_size); + if (!ptr) { + err = -ENOMEM; + goto exit_free; + } + + bzero(ptr, last_size); + btf_info.btf = ptr_to_u64(ptr); + err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + + if (!err && btf_info.btf_size > last_size) { + void *temp_ptr; + + last_size = btf_info.btf_size; + temp_ptr = realloc(ptr, last_size); + if (!temp_ptr) { + err = -ENOMEM; + goto exit_free; + } + ptr = temp_ptr; + bzero(ptr, last_size); + btf_info.btf = ptr_to_u64(ptr); + err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + } + + if (err || btf_info.btf_size > last_size) { + err = errno; + goto exit_free; + } + + *btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL); + if (IS_ERR(*btf)) { + err = PTR_ERR(btf); + *btf = NULL; + } + +exit_free: + close(btf_fd); + free(ptr); + + return err; +} + +static json_writer_t *get_btf_writer(void) +{ + json_writer_t *jw = jsonw_new(stdout); + + if (!jw) + return NULL; + jsonw_pretty(jw, true); + + return jw; +} + static void print_entry_json(struct bpf_map_info *info, unsigned char *key, - unsigned char *value) + unsigned char *value, struct btf *btf) { jsonw_start_object(json_wtr); @@ -160,6 +262,16 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key, print_hex_data_json(key, info->key_size); jsonw_name(json_wtr, "value"); print_hex_data_json(value, info->value_size); + if (btf) { + struct btf_dumper d = { + .btf = btf, + .jw = json_wtr, + .is_plain_text = false, + }; + + jsonw_name(json_wtr, "formatted"); + do_dump_btf(&d, info, key, value); + } } else { unsigned int i, n; @@ -510,10 +622,12 @@ static int do_show(int argc, char **argv) static int do_dump(int argc, char **argv) { + struct bpf_map_info info = {}; void *key, *value, *prev_key; unsigned int num_elems = 0; - struct bpf_map_info info = {}; __u32 len = sizeof(info); + json_writer_t *btf_wtr; + struct btf *btf = NULL; int err; int fd; @@ -539,8 +653,27 @@ static int do_dump(int argc, char **argv) } prev_key = NULL; + + err = get_btf(&info, &btf); + if (err) { + p_err("failed to get btf"); + goto exit_free; + } + if (json_output) jsonw_start_array(json_wtr); + else + if (btf) { + btf_wtr = get_btf_writer(); + if (!btf_wtr) { + p_info("failed to create json writer for btf. falling back to plain output"); + btf__free(btf); + btf = NULL; + } else { + jsonw_start_array(btf_wtr); + } + } + while (true) { err = bpf_map_get_next_key(fd, prev_key, key); if (err) { @@ -551,9 +684,19 @@ static int do_dump(int argc, char **argv) if (!bpf_map_lookup_elem(fd, key, value)) { if (json_output) - print_entry_json(&info, key, value); + print_entry_json(&info, key, value, btf); else - print_entry_plain(&info, key, value); + if (btf) { + struct btf_dumper d = { + .btf = btf, + .jw = btf_wtr, + .is_plain_text = true, + }; + + do_dump_btf(&d, &info, key, value); + } else { + print_entry_plain(&info, key, value); + } } else { if (json_output) { jsonw_name(json_wtr, "key"); @@ -576,14 +719,19 @@ static int do_dump(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); - else + else if (btf) { + jsonw_end_array(btf_wtr); + jsonw_destroy(&btf_wtr); + } else { printf("Found %u element%s\n", num_elems, num_elems != 1 ? "s" : ""); + } exit_free: free(key); free(value); close(fd); + btf__free(btf); return err; } @@ -639,6 +787,8 @@ static int do_lookup(int argc, char **argv) { struct bpf_map_info info = {}; __u32 len = sizeof(info); + json_writer_t *btf_wtr; + struct btf *btf = NULL; void *key, *value; int err; int fd; @@ -663,27 +813,60 @@ static int do_lookup(int argc, char **argv) goto exit_free; err = bpf_map_lookup_elem(fd, key, value); - if (!err) { - if (json_output) - print_entry_json(&info, key, value); - else + if (err) { + if (errno == ENOENT) { + if (json_output) { + jsonw_null(json_wtr); + } else { + printf("key:\n"); + fprint_hex(stdout, key, info.key_size, " "); + printf("\n\nNot found\n"); + } + } else { + p_err("lookup failed: %s", strerror(errno)); + } + + goto exit_free; + } + + /* here means bpf_map_lookup_elem() succeeded */ + err = get_btf(&info, &btf); + if (err) { + p_err("failed to get btf"); + goto exit_free; + } + + if (json_output) { + print_entry_json(&info, key, value, btf); + } else if (btf) { + /* if here json_wtr wouldn't have been initialised, + * so let's create separate writer for btf + */ + btf_wtr = get_btf_writer(); + if (!btf_wtr) { + p_info("failed to create json writer for btf. falling back to plain output"); + btf__free(btf); + btf = NULL; print_entry_plain(&info, key, value); - } else if (errno == ENOENT) { - if (json_output) { - jsonw_null(json_wtr); } else { - printf("key:\n"); - fprint_hex(stdout, key, info.key_size, " "); - printf("\n\nNot found\n"); + struct btf_dumper d = { + .btf = btf, + .jw = btf_wtr, + .is_plain_text = true, + }; + + do_dump_btf(&d, &info, key, value); + jsonw_destroy(&btf_wtr); } } else { - p_err("lookup failed: %s", strerror(errno)); + print_entry_plain(&info, key, value); } exit_free: free(key); free(value); close(fd); + btf__free(btf); return err; } @@ -826,7 +1009,7 @@ static int do_help(int argc, char **argv) " %s %s event_pipe MAP [cpu N index M]\n" " %s %s help\n" "\n" - " MAP := { id MAP_ID | pinned FILE }\n" + " " HELP_SPEC_MAP "\n" " DATA := { [hex] BYTES }\n" " " HELP_SPEC_PROGRAM "\n" " VALUE := { DATA | MAP | PROG }\n" diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 959aa53ab678..dce960d22106 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -31,8 +31,7 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski <kubakici@wp.pl> */ - +#define _GNU_SOURCE #include <errno.h> #include <fcntl.h> #include <stdarg.h> @@ -41,9 +40,12 @@ #include <string.h> #include <time.h> #include <unistd.h> +#include <net/if.h> #include <sys/types.h> #include <sys/stat.h> +#include <linux/err.h> + #include <bpf.h> #include <libbpf.h> @@ -681,31 +683,247 @@ static int do_pin(int argc, char **argv) return err; } +struct map_replace { + int idx; + int fd; + char *name; +}; + +int map_replace_compar(const void *p1, const void *p2) +{ + const struct map_replace *a = p1, *b = p2; + + return a->idx - b->idx; +} + static int do_load(int argc, char **argv) { + enum bpf_attach_type expected_attach_type; + struct bpf_object_open_attr attr = { + .prog_type = BPF_PROG_TYPE_UNSPEC, + }; + struct map_replace *map_replace = NULL; + unsigned int old_map_fds = 0; + struct bpf_program *prog; struct bpf_object *obj; - int prog_fd; - - if (argc != 2) - usage(); + struct bpf_map *map; + const char *pinfile; + unsigned int i, j; + __u32 ifindex = 0; + int idx, err; - if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) { - p_err("failed to load program"); + if (!REQ_ARGS(2)) return -1; + attr.file = GET_ARG(); + pinfile = GET_ARG(); + + while (argc) { + if (is_prefix(*argv, "type")) { + char *type; + + NEXT_ARG(); + + if (attr.prog_type != BPF_PROG_TYPE_UNSPEC) { + p_err("program type already specified"); + goto err_free_reuse_maps; + } + if (!REQ_ARGS(1)) + goto err_free_reuse_maps; + + /* Put a '/' at the end of type to appease libbpf */ + type = malloc(strlen(*argv) + 2); + if (!type) { + p_err("mem alloc failed"); + goto err_free_reuse_maps; + } + *type = 0; + strcat(type, *argv); + strcat(type, "/"); + + err = libbpf_prog_type_by_name(type, &attr.prog_type, + &expected_attach_type); + free(type); + if (err < 0) { + p_err("unknown program type '%s'", *argv); + goto err_free_reuse_maps; + } + NEXT_ARG(); + } else if (is_prefix(*argv, "map")) { + char *endptr, *name; + int fd; + + NEXT_ARG(); + + if (!REQ_ARGS(4)) + goto err_free_reuse_maps; + + if (is_prefix(*argv, "idx")) { + NEXT_ARG(); + + idx = strtoul(*argv, &endptr, 0); + if (*endptr) { + p_err("can't parse %s as IDX", *argv); + goto err_free_reuse_maps; + } + name = NULL; + } else if (is_prefix(*argv, "name")) { + NEXT_ARG(); + + name = *argv; + idx = -1; + } else { + p_err("expected 'idx' or 'name', got: '%s'?", + *argv); + goto err_free_reuse_maps; + } + NEXT_ARG(); + + fd = map_parse_fd(&argc, &argv); + if (fd < 0) + goto err_free_reuse_maps; + + map_replace = reallocarray(map_replace, old_map_fds + 1, + sizeof(*map_replace)); + if (!map_replace) { + p_err("mem alloc failed"); + goto err_free_reuse_maps; + } + map_replace[old_map_fds].idx = idx; + map_replace[old_map_fds].name = name; + map_replace[old_map_fds].fd = fd; + old_map_fds++; + } else if (is_prefix(*argv, "dev")) { + NEXT_ARG(); + + if (ifindex) { + p_err("offload device already specified"); + goto err_free_reuse_maps; + } + if (!REQ_ARGS(1)) + goto err_free_reuse_maps; + + ifindex = if_nametoindex(*argv); + if (!ifindex) { + p_err("unrecognized netdevice '%s': %s", + *argv, strerror(errno)); + goto err_free_reuse_maps; + } + NEXT_ARG(); + } else { + p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?", + *argv); + goto err_free_reuse_maps; + } + } + + obj = bpf_object__open_xattr(&attr); + if (IS_ERR_OR_NULL(obj)) { + p_err("failed to open object file"); + goto err_free_reuse_maps; + } + + prog = bpf_program__next(NULL, obj); + if (!prog) { + p_err("object file doesn't contain any bpf program"); + goto err_close_obj; + } + + bpf_program__set_ifindex(prog, ifindex); + if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) { + const char *sec_name = bpf_program__title(prog, false); + + err = libbpf_prog_type_by_name(sec_name, &attr.prog_type, + &expected_attach_type); + if (err < 0) { + p_err("failed to guess program type based on section name %s\n", + sec_name); + goto err_close_obj; + } + } + bpf_program__set_type(prog, attr.prog_type); + bpf_program__set_expected_attach_type(prog, expected_attach_type); + + qsort(map_replace, old_map_fds, sizeof(*map_replace), + map_replace_compar); + + /* After the sort maps by name will be first on the list, because they + * have idx == -1. Resolve them. + */ + j = 0; + while (j < old_map_fds && map_replace[j].name) { + i = 0; + bpf_map__for_each(map, obj) { + if (!strcmp(bpf_map__name(map), map_replace[j].name)) { + map_replace[j].idx = i; + break; + } + i++; + } + if (map_replace[j].idx == -1) { + p_err("unable to find map '%s'", map_replace[j].name); + goto err_close_obj; + } + j++; + } + /* Resort if any names were resolved */ + if (j) + qsort(map_replace, old_map_fds, sizeof(*map_replace), + map_replace_compar); + + /* Set ifindex and name reuse */ + j = 0; + idx = 0; + bpf_map__for_each(map, obj) { + if (!bpf_map__is_offload_neutral(map)) + bpf_map__set_ifindex(map, ifindex); + + if (j < old_map_fds && idx == map_replace[j].idx) { + err = bpf_map__reuse_fd(map, map_replace[j++].fd); + if (err) { + p_err("unable to set up map reuse: %d", err); + goto err_close_obj; + } + + /* Next reuse wants to apply to the same map */ + if (j < old_map_fds && map_replace[j].idx == idx) { + p_err("replacement for map idx %d specified more than once", + idx); + goto err_close_obj; + } + } + + idx++; + } + if (j < old_map_fds) { + p_err("map idx '%d' not used", map_replace[j].idx); + goto err_close_obj; + } + + err = bpf_object__load(obj); + if (err) { + p_err("failed to load object file"); + goto err_close_obj; } - if (do_pin_fd(prog_fd, argv[1])) + if (do_pin_fd(bpf_program__fd(prog), pinfile)) goto err_close_obj; if (json_output) jsonw_null(json_wtr); bpf_object__close(obj); + for (i = 0; i < old_map_fds; i++) + close(map_replace[i].fd); + free(map_replace); return 0; err_close_obj: bpf_object__close(obj); +err_free_reuse_maps: + for (i = 0; i < old_map_fds; i++) + close(map_replace[i].fd); + free(map_replace); return -1; } @@ -721,10 +939,19 @@ static int do_help(int argc, char **argv) " %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n" " %s %s dump jited PROG [{ file FILE | opcodes }]\n" " %s %s pin PROG FILE\n" - " %s %s load OBJ FILE\n" + " %s %s load OBJ FILE [type TYPE] [dev NAME] \\\n" + " [map { idx IDX | name NAME } MAP]\n" " %s %s help\n" "\n" + " " HELP_SPEC_MAP "\n" " " HELP_SPEC_PROGRAM "\n" + " TYPE := { socket | kprobe | kretprobe | classifier | action |\n" + " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n" + " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n" + " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n" + " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n" + " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n" + " cgroup/sendmsg4 | cgroup/sendmsg6 }\n" " " HELP_SPEC_OPTIONS "\n" "", bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index b97f1da60dd1..3284759df98a 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -35,6 +35,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#define _GNU_SOURCE #include <stdarg.h> #include <stdio.h> #include <stdlib.h> @@ -66,9 +67,8 @@ void kernel_syms_load(struct dump_data *dd) while (!feof(fp)) { if (!fgets(buff, sizeof(buff), fp)) break; - tmp = realloc(dd->sym_mapping, - (dd->sym_count + 1) * - sizeof(*dd->sym_mapping)); + tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1, + sizeof(*dd->sym_mapping)); if (!tmp) { out: free(dd->sym_mapping); diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 5b6dda3b1ca8..f216b2f5c3d7 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -57,6 +57,7 @@ FEATURE_TESTS_BASIC := \ libunwind-aarch64 \ pthread-attr-setaffinity-np \ pthread-barrier \ + reallocarray \ stackprotector-all \ timerfd \ libdw-dwarf-unwind \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dac9563b5470..0516259be70f 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -14,6 +14,7 @@ FILES= \ test-libaudit.bin \ test-libbfd.bin \ test-disassembler-four-args.bin \ + test-reallocarray.bin \ test-liberty.bin \ test-liberty-z.bin \ test-cplus-demangle.bin \ @@ -204,6 +205,9 @@ $(OUTPUT)test-libbfd.bin: $(OUTPUT)test-disassembler-four-args.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes +$(OUTPUT)test-reallocarray.bin: + $(BUILD) + $(OUTPUT)test-liberty.bin: $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c new file mode 100644 index 000000000000..8170de35150d --- /dev/null +++ b/tools/build/feature/test-reallocarray.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <stdlib.h> + +int main(void) +{ + return !!reallocarray(NULL, 1, 1); +} diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h index 70fe61295733..0d35f18006a1 100644 --- a/tools/include/linux/compiler-gcc.h +++ b/tools/include/linux/compiler-gcc.h @@ -36,3 +36,7 @@ #endif #define __printf(a, b) __attribute__((format(printf, a, b))) #define __scanf(a, b) __attribute__((format(scanf, a, b))) + +#if GCC_VERSION >= 50100 +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 +#endif diff --git a/tools/include/linux/overflow.h b/tools/include/linux/overflow.h new file mode 100644 index 000000000000..8712ff70995f --- /dev/null +++ b/tools/include/linux/overflow.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include <linux/compiler.h> + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +#define is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h new file mode 100644 index 000000000000..664ced8cb1b0 --- /dev/null +++ b/tools/include/tools/libc_compat.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2018 Netronome Systems, Inc. */ + +#ifndef __TOOLS_LIBC_COMPAT_H +#define __TOOLS_LIBC_COMPAT_H + +#include <stdlib.h> +#include <linux/overflow.h> + +#ifdef COMPAT_NEED_REALLOCARRAY +static inline void *reallocarray(void *ptr, size_t nmemb, size_t size) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(nmemb, size, &bytes))) + return NULL; + return realloc(ptr, bytes); +} +#endif +#endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 59b19b6a40d7..870113916cac 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1826,7 +1826,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) + * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -1857,7 +1857,8 @@ union bpf_attr { * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric - * is set to metric from route (IPv4/IPv6 only). + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the @@ -1873,9 +1874,10 @@ union bpf_attr { * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return - * Egress device index on success, 0 if packet needs to continue - * up the stack for further processing or a negative error in case - * of failure. + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description @@ -2031,7 +2033,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * @@ -2051,7 +2052,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * @@ -2555,6 +2555,9 @@ enum { * Arg1: old_state * Arg2: new_state */ + BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after + * socket transition to LISTEN state. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect @@ -2612,6 +2615,18 @@ struct bpf_raw_tracepoint_args { #define BPF_FIB_LOOKUP_DIRECT BIT(0) #define BPF_FIB_LOOKUP_OUTPUT BIT(1) +enum { + BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ + BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ + BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ + BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ + BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ + BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ + BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ + BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ + BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ +}; + struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop @@ -2625,7 +2640,11 @@ struct bpf_fib_lookup { /* total length of packet from network header - used for MTU check */ __u16 tot_len; - __u32 ifindex; /* L3 device index for lookup */ + + /* input: L3 device index for lookup + * output: device index from FIB lookup + */ + __u32 ifindex; union { /* inputs to lookup */ diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 6070e655042d..13a861135127 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 5390e7725e43..7a8e4c98ef1a 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -66,7 +66,7 @@ ifndef VERBOSE endif FEATURE_USER = .libbpf -FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf +FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf reallocarray FEATURE_DISPLAY = libelf bpf INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi -I$(srctree)/tools/perf @@ -120,6 +120,10 @@ ifeq ($(feature-libelf-getphdrnum), 1) override CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT endif +ifeq ($(feature-reallocarray), 0) + override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY +endif + # Append required CFLAGS override CFLAGS += $(EXTRA_WARNINGS) override CFLAGS += -Werror -Wall diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 8c54a4b6f187..03161be094b4 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -17,6 +17,11 @@ #define BTF_MAX_NR_TYPES 65535 +#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \ + ((k) == BTF_KIND_VOLATILE) || \ + ((k) == BTF_KIND_CONST) || \ + ((k) == BTF_KIND_RESTRICT)) + static struct btf_type btf_void; struct btf { @@ -33,14 +38,6 @@ struct btf { int fd; }; -static const char *btf_name_by_offset(const struct btf *btf, uint32_t offset) -{ - if (offset < btf->hdr->str_len) - return &btf->strings[offset]; - else - return NULL; -} - static int btf_add_type(struct btf *btf, struct btf_type *t) { if (btf->types_size - btf->nr_types < 2) { @@ -190,15 +187,6 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) return 0; } -static const struct btf_type *btf_type_by_id(const struct btf *btf, - uint32_t type_id) -{ - if (type_id > btf->nr_types) - return NULL; - - return btf->types[type_id]; -} - static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD; @@ -234,7 +222,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) int64_t size = -1; int i; - t = btf_type_by_id(btf, type_id); + t = btf__type_by_id(btf, type_id); for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { size = btf_type_size(t); @@ -259,7 +247,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) return -EINVAL; } - t = btf_type_by_id(btf, type_id); + t = btf__type_by_id(btf, type_id); } if (size < 0) @@ -271,6 +259,26 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) return nelems * size; } +int btf__resolve_type(const struct btf *btf, __u32 type_id) +{ + const struct btf_type *t; + int depth = 0; + + t = btf__type_by_id(btf, type_id); + while (depth < MAX_RESOLVE_DEPTH && + !btf_type_is_void_or_null(t) && + IS_MODIFIER(BTF_INFO_KIND(t->info))) { + type_id = t->type; + t = btf__type_by_id(btf, type_id); + depth++; + } + + if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) + return -EINVAL; + + return type_id; +} + int32_t btf__find_by_name(const struct btf *btf, const char *type_name) { uint32_t i; @@ -280,7 +288,7 @@ int32_t btf__find_by_name(const struct btf *btf, const char *type_name) for (i = 1; i <= btf->nr_types; i++) { const struct btf_type *t = btf->types[i]; - const char *name = btf_name_by_offset(btf, t->name_off); + const char *name = btf__name_by_offset(btf, t->name_off); if (name && !strcmp(type_name, name)) return i; @@ -371,3 +379,20 @@ int btf__fd(const struct btf *btf) { return btf->fd; } + +const char *btf__name_by_offset(const struct btf *btf, __u32 offset) +{ + if (offset < btf->hdr->str_len) + return &btf->strings[offset]; + else + return NULL; +} + +const struct btf_type *btf__type_by_id(const struct btf *btf, + __u32 type_id) +{ + if (type_id > btf->nr_types) + return NULL; + + return btf->types[type_id]; +} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 74bb344035bb..24f361d99a5e 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -17,6 +17,9 @@ void btf__free(struct btf *btf); struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log); int32_t btf__find_by_name(const struct btf *btf, const char *type_name); int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id); +int btf__resolve_type(const struct btf *btf, __u32 type_id); int btf__fd(const struct btf *btf); +const char *btf__name_by_offset(const struct btf *btf, __u32 offset); +const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id); #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a1e96b5de5ff..955f8eafbf41 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -22,6 +22,7 @@ * License along with this program; if not, see <http://www.gnu.org/licenses> */ +#define _GNU_SOURCE #include <stdlib.h> #include <stdio.h> #include <stdarg.h> @@ -41,6 +42,7 @@ #include <sys/stat.h> #include <sys/types.h> #include <sys/vfs.h> +#include <tools/libc_compat.h> #include <libelf.h> #include <gelf.h> @@ -95,54 +97,6 @@ void libbpf_set_print(libbpf_print_fn_t warn, #define STRERR_BUFSIZE 128 -#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) -#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) -#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) - -static const char *libbpf_strerror_table[NR_ERRNO] = { - [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", - [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", - [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", - [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", - [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", - [ERRCODE_OFFSET(RELOC)] = "Relocation failed", - [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", - [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", - [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", - [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", - [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", - [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", -}; - -int libbpf_strerror(int err, char *buf, size_t size) -{ - if (!buf || !size) - return -1; - - err = err > 0 ? err : -err; - - if (err < __LIBBPF_ERRNO__START) { - int ret; - - ret = strerror_r(err, buf, size); - buf[size - 1] = '\0'; - return ret; - } - - if (err < __LIBBPF_ERRNO__END) { - const char *msg; - - msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; - snprintf(buf, size, "%s", msg); - buf[size - 1] = '\0'; - return 0; - } - - snprintf(buf, size, "Unknown libbpf error %d", err); - buf[size - 1] = '\0'; - return -1; -} - #define CHECK_ERR(action, err, out) do { \ err = action; \ if (err) \ @@ -234,6 +188,7 @@ struct bpf_object { size_t nr_maps; bool loaded; + bool has_pseudo_calls; /* * Information when doing elf related work. Only valid if fd @@ -368,7 +323,7 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, progs = obj->programs; nr_progs = obj->nr_programs; - progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1)); + progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); if (!progs) { /* * In this case the original obj->programs @@ -400,10 +355,6 @@ bpf_object__init_prog_names(struct bpf_object *obj) const char *name = NULL; prog = &obj->programs[pi]; - if (prog->idx == obj->efile.text_shndx) { - name = ".text"; - goto skip_search; - } for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; si++) { @@ -426,12 +377,15 @@ bpf_object__init_prog_names(struct bpf_object *obj) } } + if (!name && prog->idx == obj->efile.text_shndx) + name = ".text"; + if (!name) { pr_warning("failed to find sym for prog %s\n", prog->section_name); return -EINVAL; } -skip_search: + prog->name = strdup(name); if (!prog->name) { pr_warning("failed to allocate memory for prog sym %s\n", @@ -870,8 +824,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) continue; } - reloc = realloc(reloc, - sizeof(*obj->efile.reloc) * nr_reloc); + reloc = reallocarray(reloc, nr_reloc, + sizeof(*obj->efile.reloc)); if (!reloc) { pr_warning("realloc failed\n"); err = -ENOMEM; @@ -981,6 +935,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, prog->reloc_desc[i].type = RELO_CALL; prog->reloc_desc[i].insn_idx = insn_idx; prog->reloc_desc[i].text_off = sym.st_value; + obj->has_pseudo_calls = true; continue; } @@ -1080,6 +1035,53 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) return 0; } +int bpf_map__reuse_fd(struct bpf_map *map, int fd) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + int new_fd, err; + char *new_name; + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) + return err; + + new_name = strdup(info.name); + if (!new_name) + return -errno; + + new_fd = open("/", O_RDONLY | O_CLOEXEC); + if (new_fd < 0) + goto err_free_new_name; + + new_fd = dup3(fd, new_fd, O_CLOEXEC); + if (new_fd < 0) + goto err_close_new_fd; + + err = zclose(map->fd); + if (err) + goto err_close_new_fd; + free(map->name); + + map->fd = new_fd; + map->name = new_name; + map->def.type = info.type; + map->def.key_size = info.key_size; + map->def.value_size = info.value_size; + map->def.max_entries = info.max_entries; + map->def.map_flags = info.map_flags; + map->btf_key_type_id = info.btf_key_type_id; + map->btf_value_type_id = info.btf_value_type_id; + + return 0; + +err_close_new_fd: + close(new_fd); +err_free_new_name: + free(new_name); + return -errno; +} + static int bpf_object__create_maps(struct bpf_object *obj) { @@ -1092,6 +1094,12 @@ bpf_object__create_maps(struct bpf_object *obj) struct bpf_map_def *def = &map->def; int *pfd = &map->fd; + if (map->fd >= 0) { + pr_debug("skip map create (preset) %s: fd=%d\n", + map->name, map->fd); + continue; + } + create_attr.name = map->name; create_attr.map_ifindex = map->map_ifindex; create_attr.map_type = def->type; @@ -1162,7 +1170,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, return -LIBBPF_ERRNO__RELOC; } new_cnt = prog->insns_cnt + text->insns_cnt; - new_insn = realloc(prog->insns, new_cnt * sizeof(*insn)); + new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); if (!new_insn) { pr_warning("oom in prog realloc\n"); return -ENOMEM; @@ -1426,6 +1434,12 @@ out: return err; } +static bool bpf_program__is_function_storage(struct bpf_program *prog, + struct bpf_object *obj) +{ + return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; +} + static int bpf_object__load_progs(struct bpf_object *obj) { @@ -1433,7 +1447,7 @@ bpf_object__load_progs(struct bpf_object *obj) int err; for (i = 0; i < obj->nr_programs; i++) { - if (obj->programs[i].idx == obj->efile.text_shndx) + if (bpf_program__is_function_storage(&obj->programs[i], obj)) continue; err = bpf_program__load(&obj->programs[i], obj->license, @@ -1513,15 +1527,26 @@ out: return ERR_PTR(err); } -struct bpf_object *bpf_object__open(const char *path) +struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) { /* param validation */ - if (!path) + if (!attr->file) return NULL; - pr_debug("loading %s\n", path); + pr_debug("loading %s\n", attr->file); + + return __bpf_object__open(attr->file, NULL, 0, + bpf_prog_type__needs_kver(attr->prog_type)); +} - return __bpf_object__open(path, NULL, 0, true); +struct bpf_object *bpf_object__open(const char *path) +{ + struct bpf_object_open_attr attr = { + .file = path, + .prog_type = BPF_PROG_TYPE_UNSPEC, + }; + + return bpf_object__open_xattr(&attr); } struct bpf_object *bpf_object__open_buffer(void *obj_buf, @@ -1858,8 +1883,8 @@ void *bpf_object__priv(struct bpf_object *obj) return obj ? obj->priv : ERR_PTR(-EINVAL); } -struct bpf_program * -bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) +static struct bpf_program * +__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) { size_t idx; @@ -1880,6 +1905,18 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) return &obj->programs[idx]; } +struct bpf_program * +bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) +{ + struct bpf_program *prog = prev; + + do { + prog = __bpf_program__next(prog, obj); + } while (prog && bpf_program__is_function_storage(prog, obj)); + + return prog; +} + int bpf_program__set_priv(struct bpf_program *prog, void *priv, bpf_program_clear_priv_t clear_priv) { @@ -1896,6 +1933,11 @@ void *bpf_program__priv(struct bpf_program *prog) return prog ? prog->priv : ERR_PTR(-EINVAL); } +void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) +{ + prog->prog_ifindex = ifindex; +} + const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) { const char *title; @@ -2037,9 +2079,11 @@ static const struct { BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), + BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS), BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB), BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG), + BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2), BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND), BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND), BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT), @@ -2055,23 +2099,31 @@ static const struct { #undef BPF_S_PROG_SEC #undef BPF_SA_PROG_SEC -static int bpf_program__identify_section(struct bpf_program *prog) +int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type) { int i; - if (!prog->section_name) - goto err; - - for (i = 0; i < ARRAY_SIZE(section_names); i++) - if (strncmp(prog->section_name, section_names[i].sec, - section_names[i].len) == 0) - return i; + if (!name) + return -EINVAL; -err: - pr_warning("failed to guess program type based on section name %s\n", - prog->section_name); + for (i = 0; i < ARRAY_SIZE(section_names); i++) { + if (strncmp(name, section_names[i].sec, section_names[i].len)) + continue; + *prog_type = section_names[i].prog_type; + *expected_attach_type = section_names[i].expected_attach_type; + return 0; + } + return -EINVAL; +} - return -1; +static int +bpf_program__identify_section(struct bpf_program *prog, + enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type) +{ + return libbpf_prog_type_by_name(prog->section_name, prog_type, + expected_attach_type); } int bpf_map__fd(struct bpf_map *map) @@ -2120,6 +2172,16 @@ void *bpf_map__priv(struct bpf_map *map) return map ? map->priv : ERR_PTR(-EINVAL); } +bool bpf_map__is_offload_neutral(struct bpf_map *map) +{ + return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; +} + +void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +{ + map->map_ifindex = ifindex; +} + struct bpf_map * bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) { @@ -2194,12 +2256,15 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type, int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_object **pobj, int *prog_fd) { + struct bpf_object_open_attr open_attr = { + .file = attr->file, + .prog_type = attr->prog_type, + }; struct bpf_program *prog, *first_prog = NULL; enum bpf_attach_type expected_attach_type; enum bpf_prog_type prog_type; struct bpf_object *obj; struct bpf_map *map; - int section_idx; int err; if (!attr) @@ -2207,8 +2272,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, if (!attr->file) return -EINVAL; - obj = __bpf_object__open(attr->file, NULL, 0, - bpf_prog_type__needs_kver(attr->prog_type)); + obj = bpf_object__open_xattr(&open_attr); if (IS_ERR_OR_NULL(obj)) return -ENOENT; @@ -2221,26 +2285,27 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, prog->prog_ifindex = attr->ifindex; expected_attach_type = attr->expected_attach_type; if (prog_type == BPF_PROG_TYPE_UNSPEC) { - section_idx = bpf_program__identify_section(prog); - if (section_idx < 0) { + err = bpf_program__identify_section(prog, &prog_type, + &expected_attach_type); + if (err < 0) { + pr_warning("failed to guess program type based on section name %s\n", + prog->section_name); bpf_object__close(obj); return -EINVAL; } - prog_type = section_names[section_idx].prog_type; - expected_attach_type = - section_names[section_idx].expected_attach_type; } bpf_program__set_type(prog, prog_type); bpf_program__set_expected_attach_type(prog, expected_attach_type); - if (prog->idx != obj->efile.text_shndx && !first_prog) + if (!bpf_program__is_function_storage(prog, obj) && !first_prog) first_prog = prog; } bpf_map__for_each(map, obj) { - map->map_ifindex = attr->ifindex; + if (!bpf_map__is_offload_neutral(map)) + map->map_ifindex = attr->ifindex; } if (!first_prog) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 09976531aa74..1f8fc2060460 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -66,7 +66,13 @@ void libbpf_set_print(libbpf_print_fn_t warn, /* Hide internal to user */ struct bpf_object; +struct bpf_object_open_attr { + const char *file; + enum bpf_prog_type prog_type; +}; + struct bpf_object *bpf_object__open(const char *path); +struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr); struct bpf_object *bpf_object__open_buffer(void *obj_buf, size_t obj_buf_sz, const char *name); @@ -92,6 +98,9 @@ int bpf_object__set_priv(struct bpf_object *obj, void *priv, bpf_object_clear_priv_t clear_priv); void *bpf_object__priv(struct bpf_object *prog); +int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type); + /* Accessors of bpf_program */ struct bpf_program; struct bpf_program *bpf_program__next(struct bpf_program *prog, @@ -109,6 +118,7 @@ int bpf_program__set_priv(struct bpf_program *prog, void *priv, bpf_program_clear_priv_t clear_priv); void *bpf_program__priv(struct bpf_program *prog); +void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex); const char *bpf_program__title(struct bpf_program *prog, bool needs_copy); @@ -251,6 +261,9 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); void *bpf_map__priv(struct bpf_map *map); +int bpf_map__reuse_fd(struct bpf_map *map, int fd); +bool bpf_map__is_offload_neutral(struct bpf_map *map); +void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); int bpf_map__pin(struct bpf_map *map, const char *path); long libbpf_get_error(const void *ptr); diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c new file mode 100644 index 000000000000..d9ba851bd7f9 --- /dev/null +++ b/tools/lib/bpf/libbpf_errno.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: LGPL-2.1 + +/* + * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> + * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> + * Copyright (C) 2015 Huawei Inc. + * Copyright (C) 2017 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + */ + +#include <stdio.h> +#include <string.h> + +#include "libbpf.h" + +#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) +#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) +#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) + +static const char *libbpf_strerror_table[NR_ERRNO] = { + [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", + [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", + [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", + [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", + [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", + [ERRCODE_OFFSET(RELOC)] = "Relocation failed", + [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", + [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", + [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", + [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", + [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", + [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", +}; + +int libbpf_strerror(int err, char *buf, size_t size) +{ + if (!buf || !size) + return -1; + + err = err > 0 ? err : -err; + + if (err < __LIBBPF_ERRNO__START) { + int ret; + + ret = strerror_r(err, buf, size); + buf[size - 1] = '\0'; + return ret; + } + + if (err < __LIBBPF_ERRNO__END) { + const char *msg; + + msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; + snprintf(buf, size, "%s", msg); + buf[size - 1] = '\0'; + return 0; + } + + snprintf(buf, size, "Unknown libbpf error %d", err); + buf[size - 1] = '\0'; + return -1; +} diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 7a6214e9ae58..478bf1bcbbf5 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -61,6 +61,7 @@ $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c $(OUTPUT)/test_sock: cgroup_helpers.c $(OUTPUT)/test_sock_addr: cgroup_helpers.c $(OUTPUT)/test_sockmap: cgroup_helpers.c +$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c $(OUTPUT)/test_progs: trace_helpers.c $(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index c87b4e052ce9..cf16948aad4a 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -118,7 +118,7 @@ static int join_cgroup_from_top(char *cgroup_path) * * On success, it returns 0, otherwise on failure it returns 1. */ -int join_cgroup(char *path) +int join_cgroup(const char *path) { char cgroup_path[PATH_MAX + 1]; @@ -158,7 +158,7 @@ void cleanup_cgroup_environment(void) * On success, it returns the file descriptor. On failure it returns 0. * If there is a failure, it prints the error to stderr. */ -int create_and_get_cgroup(char *path) +int create_and_get_cgroup(const char *path) { char cgroup_path[PATH_MAX + 1]; int fd; @@ -186,7 +186,7 @@ int create_and_get_cgroup(char *path) * which is an invalid cgroup id. * If there is a failure, it prints the error to stderr. */ -unsigned long long get_cgroup_id(char *path) +unsigned long long get_cgroup_id(const char *path) { int dirfd, err, flags, mount_id, fhsize; union { diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index 20a4a5dcd469..d64bb8957090 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -9,10 +9,10 @@ __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) -int create_and_get_cgroup(char *path); -int join_cgroup(char *path); +int create_and_get_cgroup(const char *path); +int join_cgroup(const char *path); int setup_cgroup_environment(void); void cleanup_cgroup_environment(void); -unsigned long long get_cgroup_id(char *path); +unsigned long long get_cgroup_id(const char *path); #endif diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index be800d0e7a84..b746227eaff2 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -339,6 +339,11 @@ class NetdevSim: self.dfs = DebugfsDir(self.dfs_dir) return self.dfs + def dfs_read(self, f): + path = os.path.join(self.dfs_dir, f) + _, data = cmd('cat %s' % (path)) + return data.strip() + def dfs_num_bound_progs(self): path = os.path.join(self.dfs_dir, "bpf_bound_progs") _, progs = cmd('ls %s' % (path)) @@ -547,11 +552,11 @@ def check_extack(output, reference, args): if skip_extack: return lines = output.split("\n") - comp = len(lines) >= 2 and lines[1] == reference + comp = len(lines) >= 2 and lines[1] == 'Error: ' + reference fail(not comp, "Missing or incorrect netlink extack message") def check_extack_nsim(output, reference, args): - check_extack(output, "Error: netdevsim: " + reference, args) + check_extack(output, "netdevsim: " + reference, args) def check_no_extack(res, needle): fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"), @@ -654,7 +659,7 @@ try: ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "TC filter loaded without enabling TC offloads") - check_extack(err, "Error: TC offload is disabled on net device.", args) + check_extack(err, "TC offload is disabled on net device.", args) sim.wait_for_flush() sim.set_ethtool_tc_offloads(True) @@ -694,7 +699,7 @@ try: skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "Offloaded a filter to chain other than 0") - check_extack(err, "Error: Driver supports only offload of chain 0.", args) + check_extack(err, "Driver supports only offload of chain 0.", args) sim.tc_flush_filters() start_test("Test TC replace...") @@ -814,24 +819,20 @@ try: "Device parameters reported for non-offloaded program") start_test("Test XDP prog replace with bad flags...") - ret, _, err = sim.set_xdp(obj, "offload", force=True, + ret, _, err = sim.set_xdp(obj, "generic", force=True, fail=False, include_stderr=True) fail(ret == 0, "Replaced XDP program with a program in different mode") - check_extack_nsim(err, "program loaded with different flags.", args) + fail(err.count("File exists") != 1, "Replaced driver XDP with generic") ret, _, err = sim.set_xdp(obj, "", force=True, fail=False, include_stderr=True) fail(ret == 0, "Replaced XDP program with a program in different mode") - check_extack_nsim(err, "program loaded with different flags.", args) + check_extack(err, "program loaded with different flags.", args) start_test("Test XDP prog remove with bad flags...") - ret, _, err = sim.unset_xdp("offload", force=True, - fail=False, include_stderr=True) - fail(ret == 0, "Removed program with a bad mode mode") - check_extack_nsim(err, "program loaded with different flags.", args) ret, _, err = sim.unset_xdp("", force=True, fail=False, include_stderr=True) - fail(ret == 0, "Removed program with a bad mode mode") - check_extack_nsim(err, "program loaded with different flags.", args) + fail(ret == 0, "Removed program with a bad mode") + check_extack(err, "program loaded with different flags.", args) start_test("Test MTU restrictions...") ret, _ = sim.set_mtu(9000, fail=False) @@ -891,6 +892,60 @@ try: rm(pin_file) bpftool_prog_list_wait(expected=0) + start_test("Test multi-attachment XDP - attach...") + sim.set_xdp(obj, "offload") + xdp = sim.ip_link_show(xdp=True)["xdp"] + offloaded = sim.dfs_read("bpf_offloaded_id") + fail("prog" not in xdp, "Base program not reported in single program mode") + fail(len(ipl["xdp"]["attached"]) != 1, + "Wrong attached program count with one program") + + sim.set_xdp(obj, "") + two_xdps = sim.ip_link_show(xdp=True)["xdp"] + offloaded2 = sim.dfs_read("bpf_offloaded_id") + + fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs") + fail("prog" in two_xdps, "Base program reported in multi program mode") + fail(xdp["attached"][0] not in two_xdps["attached"], + "Offload program not reported after driver activated") + fail(len(two_xdps["attached"]) != 2, + "Wrong attached program count with two programs") + fail(two_xdps["attached"][0]["prog"]["id"] == + two_xdps["attached"][1]["prog"]["id"], + "offloaded and drv programs have the same id") + fail(offloaded != offloaded2, + "offload ID changed after loading driver program") + + start_test("Test multi-attachment XDP - replace...") + ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True) + fail(err.count("busy") != 1, "Replaced one of programs without -force") + + start_test("Test multi-attachment XDP - detach...") + ret, _, err = sim.unset_xdp("drv", force=True, + fail=False, include_stderr=True) + fail(ret == 0, "Removed program with a bad mode") + check_extack(err, "program loaded with different flags.", args) + + sim.unset_xdp("offload") + xdp = sim.ip_link_show(xdp=True)["xdp"] + offloaded = sim.dfs_read("bpf_offloaded_id") + + fail(xdp["mode"] != 1, "Bad mode reported after multiple programs") + fail("prog" not in xdp, + "Base program not reported after multi program mode") + fail(xdp["attached"][0] not in two_xdps["attached"], + "Offload program not reported after driver activated") + fail(len(ipl["xdp"]["attached"]) != 1, + "Wrong attached program count with remaining programs") + fail(offloaded != "0", "offload ID reported with only driver program left") + + start_test("Test multi-attachment XDP - device remove...") + sim.set_xdp(obj, "offload") + sim.remove() + + sim = NetdevSim() + sim.set_ethtool_tc_offloads(True) + start_test("Test mixing of TC and XDP...") sim.tc_add_ingress() sim.set_xdp(obj, "offload") diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index a5e76b9219b9..2e45c92d1111 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -998,8 +998,9 @@ int init_pktinfo(int domain, struct cmsghdr *cmsg) return 0; } -static int sendmsg_to_server(const struct sockaddr_storage *addr, - socklen_t addr_len, int set_cmsg, int *syscall_err) +static int sendmsg_to_server(int type, const struct sockaddr_storage *addr, + socklen_t addr_len, int set_cmsg, int flags, + int *syscall_err) { union { char buf[CMSG_SPACE(sizeof(struct in6_pktinfo))]; @@ -1022,7 +1023,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr, goto err; } - fd = socket(domain, SOCK_DGRAM, 0); + fd = socket(domain, type, 0); if (fd == -1) { log_err("Failed to create client socket"); goto err; @@ -1052,7 +1053,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr, } } - if (sendmsg(fd, &hdr, 0) != sizeof(data)) { + if (sendmsg(fd, &hdr, flags) != sizeof(data)) { log_err("Fail to send message to server"); *syscall_err = errno; goto err; @@ -1066,6 +1067,15 @@ out: return fd; } +static int fastconnect_to_server(const struct sockaddr_storage *addr, + socklen_t addr_len) +{ + int sendmsg_err; + + return sendmsg_to_server(SOCK_STREAM, addr, addr_len, /*set_cmsg*/0, + MSG_FASTOPEN, &sendmsg_err); +} + static int recvmsg_from_client(int sockfd, struct sockaddr_storage *src_addr) { struct timeval tv; @@ -1185,6 +1195,20 @@ static int run_connect_test_case(const struct sock_addr_test *test) if (cmp_local_ip(clientfd, &expected_src_addr)) goto err; + if (test->type == SOCK_STREAM) { + /* Test TCP Fast Open scenario */ + clientfd = fastconnect_to_server(&requested_addr, addr_len); + if (clientfd == -1) + goto err; + + /* Make sure src and dst addrs were overridden properly */ + if (cmp_peer_addr(clientfd, &expected_addr)) + goto err; + + if (cmp_local_ip(clientfd, &expected_src_addr)) + goto err; + } + goto out; err: err = -1; @@ -1222,8 +1246,9 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test) if (clientfd >= 0) close(clientfd); - clientfd = sendmsg_to_server(&requested_addr, addr_len, - set_cmsg, &err); + clientfd = sendmsg_to_server(test->type, &requested_addr, + addr_len, set_cmsg, /*flags*/0, + &err); if (err) goto out; else if (clientfd == -1) diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h index 2fe43289943c..7bcfa6207005 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf.h +++ b/tools/testing/selftests/bpf/test_tcpbpf.h @@ -12,5 +12,6 @@ struct tcpbpf_globals { __u32 good_cb_test_rv; __u64 bytes_received; __u64 bytes_acked; + __u32 num_listen; }; #endif diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c index 3e645ee41ed5..4b7fd540cea9 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c @@ -96,15 +96,22 @@ int bpf_testcb(struct bpf_sock_ops *skops) if (!gp) break; g = *gp; - g.total_retrans = skops->total_retrans; - g.data_segs_in = skops->data_segs_in; - g.data_segs_out = skops->data_segs_out; - g.bytes_received = skops->bytes_received; - g.bytes_acked = skops->bytes_acked; + if (skops->args[0] == BPF_TCP_LISTEN) { + g.num_listen++; + } else { + g.total_retrans = skops->total_retrans; + g.data_segs_in = skops->data_segs_in; + g.data_segs_out = skops->data_segs_out; + g.bytes_received = skops->bytes_received; + g.bytes_acked = skops->bytes_acked; + } bpf_map_update_elem(&global_map, &key, &g, BPF_ANY); } break; + case BPF_SOCK_OPS_TCP_LISTEN_CB: + bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG); + break; default: rv = -1; } diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index 84ab5163c828..a275c2971376 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -1,27 +1,59 @@ // SPDX-License-Identifier: GPL-2.0 +#include <inttypes.h> #include <stdio.h> #include <stdlib.h> -#include <stdio.h> #include <unistd.h> #include <errno.h> -#include <signal.h> #include <string.h> -#include <assert.h> -#include <linux/perf_event.h> -#include <linux/ptrace.h> #include <linux/bpf.h> -#include <sys/ioctl.h> -#include <sys/time.h> #include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> -#include "bpf_util.h" + #include "bpf_rlimit.h" -#include <linux/perf_event.h> +#include "bpf_util.h" +#include "cgroup_helpers.h" + #include "test_tcpbpf.h" +#define EXPECT_EQ(expected, actual, fmt) \ + do { \ + if ((expected) != (actual)) { \ + printf(" Value of: " #actual "\n" \ + " Actual: %" fmt "\n" \ + " Expected: %" fmt "\n", \ + (actual), (expected)); \ + goto err; \ + } \ + } while (0) + +int verify_result(const struct tcpbpf_globals *result) +{ + __u32 expected_events; + + expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | + (1 << BPF_SOCK_OPS_RWND_INIT) | + (1 << BPF_SOCK_OPS_TCP_CONNECT_CB) | + (1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) | + (1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) | + (1 << BPF_SOCK_OPS_NEEDS_ECN) | + (1 << BPF_SOCK_OPS_STATE_CB) | + (1 << BPF_SOCK_OPS_TCP_LISTEN_CB)); + + EXPECT_EQ(expected_events, result->event_map, "#" PRIx32); + EXPECT_EQ(501ULL, result->bytes_received, "llu"); + EXPECT_EQ(1002ULL, result->bytes_acked, "llu"); + EXPECT_EQ(1, result->data_segs_in, PRIu32); + EXPECT_EQ(1, result->data_segs_out, PRIu32); + EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32); + EXPECT_EQ(0, result->good_cb_test_rv, PRIu32); + EXPECT_EQ(1, result->num_listen, PRIu32); + + return 0; +err: + return -1; +} + static int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) { @@ -35,42 +67,28 @@ static int bpf_find_map(const char *test, struct bpf_object *obj, return bpf_map__fd(map); } -#define SYSTEM(CMD) \ - do { \ - if (system(CMD)) { \ - printf("system(%s) FAILS!\n", CMD); \ - } \ - } while (0) - int main(int argc, char **argv) { const char *file = "test_tcpbpf_kern.o"; struct tcpbpf_globals g = {0}; - int cg_fd, prog_fd, map_fd; - bool debug_flag = false; + const char *cg_path = "/foo"; int error = EXIT_FAILURE; struct bpf_object *obj; - char cmd[100], *dir; - struct stat buffer; + int prog_fd, map_fd; + int cg_fd = -1; __u32 key = 0; - int pid; int rv; - if (argc > 1 && strcmp(argv[1], "-d") == 0) - debug_flag = true; + if (setup_cgroup_environment()) + goto err; - dir = "/tmp/cgroupv2/foo"; + cg_fd = create_and_get_cgroup(cg_path); + if (!cg_fd) + goto err; - if (stat(dir, &buffer) != 0) { - SYSTEM("mkdir -p /tmp/cgroupv2"); - SYSTEM("mount -t cgroup2 none /tmp/cgroupv2"); - SYSTEM("mkdir -p /tmp/cgroupv2/foo"); - } - pid = (int) getpid(); - sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid); - SYSTEM(cmd); + if (join_cgroup(cg_path)) + goto err; - cg_fd = open(dir, O_DIRECTORY, O_RDONLY); if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; @@ -83,7 +101,10 @@ int main(int argc, char **argv) goto err; } - SYSTEM("./tcp_server.py"); + if (system("./tcp_server.py")) { + printf("FAILED: TCP server\n"); + goto err; + } map_fd = bpf_find_map(__func__, obj, "global_map"); if (map_fd < 0) @@ -95,34 +116,16 @@ int main(int argc, char **argv) goto err; } - if (g.bytes_received != 501 || g.bytes_acked != 1002 || - g.data_segs_in != 1 || g.data_segs_out != 1 || - (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 || - g.good_cb_test_rv != 0) { + if (verify_result(&g)) { printf("FAILED: Wrong stats\n"); - if (debug_flag) { - printf("\n"); - printf("bytes_received: %d (expecting 501)\n", - (int)g.bytes_received); - printf("bytes_acked: %d (expecting 1002)\n", - (int)g.bytes_acked); - printf("data_segs_in: %d (expecting 1)\n", - g.data_segs_in); - printf("data_segs_out: %d (expecting 1)\n", - g.data_segs_out); - printf("event_map: 0x%x (at least 0x47e)\n", - g.event_map); - printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n", - g.bad_cb_test_rv); - printf("good_cb_test_rv:0x%x (expecting 0)\n", - g.good_cb_test_rv); - } goto err; } + printf("PASSED!\n"); error = 0; err: bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS); + close(cg_fd); + cleanup_cgroup_environment(); return error; - } diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 3868dcb63420..cabe2a3a3b30 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -88,7 +88,7 @@ static int page_size; static int page_cnt = 8; static struct perf_event_mmap_page *header; -int perf_event_mmap(int fd) +int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header) { void *base; int mmap_size; @@ -102,10 +102,15 @@ int perf_event_mmap(int fd) return -1; } - header = base; + *header = base; return 0; } +int perf_event_mmap(int fd) +{ + return perf_event_mmap_header(fd, &header); +} + static int perf_event_poll(int fd) { struct pollfd pfd = { .fd = fd, .events = POLLIN }; @@ -163,3 +168,42 @@ int perf_event_poller(int fd, perf_event_print_fn output_fn) return ret; } + +int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers, + int num_fds, perf_event_print_fn output_fn) +{ + enum bpf_perf_event_ret ret; + struct pollfd *pfds; + void *buf = NULL; + size_t len = 0; + int i; + + pfds = calloc(num_fds, sizeof(*pfds)); + if (!pfds) + return LIBBPF_PERF_EVENT_ERROR; + + for (i = 0; i < num_fds; i++) { + pfds[i].fd = fds[i]; + pfds[i].events = POLLIN; + } + + for (;;) { + poll(pfds, num_fds, 1000); + for (i = 0; i < num_fds; i++) { + if (!pfds[i].revents) + continue; + + ret = bpf_perf_event_read_simple(headers[i], + page_cnt * page_size, + page_size, &buf, &len, + bpf_perf_event_print, + output_fn); + if (ret != LIBBPF_PERF_EVENT_CONT) + break; + } + } + free(buf); + free(pfds); + + return ret; +} diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h index 3b4bcf7f5084..18924f23db1b 100644 --- a/tools/testing/selftests/bpf/trace_helpers.h +++ b/tools/testing/selftests/bpf/trace_helpers.h @@ -3,6 +3,7 @@ #define __TRACE_HELPER_H #include <libbpf.h> +#include <linux/perf_event.h> struct ksym { long addr; @@ -16,6 +17,9 @@ long ksym_get_addr(const char *name); typedef enum bpf_perf_event_ret (*perf_event_print_fn)(void *data, int size); int perf_event_mmap(int fd); +int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header); /* return LIBBPF_PERF_EVENT_DONE or LIBBPF_PERF_EVENT_ERROR */ int perf_event_poller(int fd, perf_event_print_fn output_fn); +int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers, + int num_fds, perf_event_print_fn output_fn); #endif diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh new file mode 100755 index 000000000000..76f1ab4898d9 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh @@ -0,0 +1,217 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test uses standard topology for testing gretap. See +# ../../../net/forwarding/mirror_gre_topo_lib.sh for more details. +# +# Test offloading various features of offloading gretap mirrors specific to +# mlxsw. + +lib_dir=$(dirname $0)/../../../net/forwarding + +NUM_NETIFS=6 +source $lib_dir/lib.sh +source $lib_dir/mirror_lib.sh +source $lib_dir/mirror_gre_lib.sh +source $lib_dir/mirror_gre_topo_lib.sh + +setup_keyful() +{ + tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \ + ttl 100 tos inherit allow-localremote \ + key 1234 + + tunnel_create h3-gt6-key ip6gretap 2001:db8:3::2 2001:db8:3::1 \ + key 1234 + ip link set h3-gt6-key vrf v$h3 + matchall_sink_create h3-gt6-key + + ip address add dev $swp3 2001:db8:3::1/64 + ip address add dev $h3 2001:db8:3::2/64 +} + +cleanup_keyful() +{ + ip address del dev $h3 2001:db8:3::2/64 + ip address del dev $swp3 2001:db8:3::1/64 + + tunnel_destroy h3-gt6-key + tunnel_destroy gt6-key +} + +setup_soft() +{ + # Set up a topology for testing underlay routes that point at an + # unsupported soft device. + + tunnel_create gt6-soft ip6gretap 2001:db8:4::1 2001:db8:4::2 \ + ttl 100 tos inherit allow-localremote + + tunnel_create h3-gt6-soft ip6gretap 2001:db8:4::2 2001:db8:4::1 + ip link set h3-gt6-soft vrf v$h3 + matchall_sink_create h3-gt6-soft + + ip link add name v1 type veth peer name v2 + ip link set dev v1 up + ip address add dev v1 2001:db8:4::1/64 + + ip link set dev v2 vrf v$h3 + ip link set dev v2 up + ip address add dev v2 2001:db8:4::2/64 +} + +cleanup_soft() +{ + ip link del dev v1 + + tunnel_destroy h3-gt6-soft + tunnel_destroy gt6-soft +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + mirror_gre_topo_create + + ip address add dev $swp3 2001:db8:2::1/64 + ip address add dev $h3 2001:db8:2::2/64 + + ip address add dev $swp3 192.0.2.129/28 + ip address add dev $h3 192.0.2.130/28 + + setup_keyful + setup_soft +} + +cleanup() +{ + pre_cleanup + + cleanup_soft + cleanup_keyful + + ip address del dev $h3 2001:db8:2::2/64 + ip address del dev $swp3 2001:db8:2::1/64 + + ip address del dev $h3 192.0.2.130/28 + ip address del dev $swp3 192.0.2.129/28 + + mirror_gre_topo_destroy + vrf_cleanup +} + +test_span_gre_ttl_inherit() +{ + local tundev=$1; shift + local type=$1; shift + local what=$1; shift + + RET=0 + + ip link set dev $tundev type $type ttl inherit + mirror_install $swp1 ingress $tundev "matchall $tcflags" + fail_test_span_gre_dir $tundev ingress + + ip link set dev $tundev type $type ttl 100 + + quick_test_span_gre_dir $tundev ingress + mirror_uninstall $swp1 ingress + + log_test "$what: no offload on TTL of inherit ($tcflags)" +} + +test_span_gre_tos_fixed() +{ + local tundev=$1; shift + local type=$1; shift + local what=$1; shift + + RET=0 + + ip link set dev $tundev type $type tos 0x10 + mirror_install $swp1 ingress $tundev "matchall $tcflags" + fail_test_span_gre_dir $tundev ingress + + ip link set dev $tundev type $type tos inherit + quick_test_span_gre_dir $tundev ingress + mirror_uninstall $swp1 ingress + + log_test "$what: no offload on a fixed TOS ($tcflags)" +} + +test_span_failable() +{ + local should_fail=$1; shift + local tundev=$1; shift + local what=$1; shift + + RET=0 + + mirror_install $swp1 ingress $tundev "matchall $tcflags" + if ((should_fail)); then + fail_test_span_gre_dir $tundev ingress + else + quick_test_span_gre_dir $tundev ingress + fi + mirror_uninstall $swp1 ingress + + log_test "$what: should_fail=$should_fail ($tcflags)" +} + +test_failable() +{ + local should_fail=$1; shift + + test_span_failable $should_fail gt6-key "mirror to keyful gretap" + test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay" +} + +test_sw() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + test_failable 0 + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +test_hw() +{ + test_failable 1 + + test_span_gre_tos_fixed gt4 gretap "mirror to gretap" + test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap" + + test_span_gre_ttl_inherit gt4 gretap "mirror to gretap" + test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +if ! tc_offload_check; then + check_err 1 "Could not test offloaded functionality" + log_test "mlxsw-specific tests for mirror to gretap" + exit +fi + +tcflags="skip_hw" +test_sw + +tcflags="skip_sw" +test_hw + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh new file mode 100644 index 000000000000..6f3a70df63bc --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Test offloading a number of mirrors-to-gretap. The test creates a number of +# tunnels. Then it adds one flower mirror for each of the tunnels, matching a +# given host IP. Then it generates traffic at each of the host IPs and checks +# that the traffic has been mirrored at the appropriate tunnel. +# +# +--------------------------+ +--------------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 2001:db8:1:X::1/64 | | 2001:db8:1:X::2/64 | | +# +-----|--------------------+ +--------------------|-----+ +# | | +# +-----|-------------------------------------------------------------|-----+ +# | SW o--> mirrors | | +# | +---|-------------------------------------------------------------|---+ | +# | | + $swp1 BR $swp2 + | | +# | +---------------------------------------------------------------------+ | +# | | +# | + $swp3 + gt6-<X> (ip6gretap) | +# | | 2001:db8:2:X::1/64 : loc=2001:db8:2:X::1 | +# | | : rem=2001:db8:2:X::2 | +# | | : ttl=100 | +# | | : tos=inherit | +# | | : | +# +-----|--------------------------------:----------------------------------+ +# | : +# +-----|--------------------------------:----------------------------------+ +# | H3 + $h3 + h3-gt6-<X> (ip6gretap) | +# | 2001:db8:2:X::2/64 loc=2001:db8:2:X::2 | +# | rem=2001:db8:2:X::1 | +# | ttl=100 | +# | tos=inherit | +# | | +# +-------------------------------------------------------------------------+ + +source ../../../../net/forwarding/mirror_lib.sh + +MIRROR_NUM_NETIFS=6 + +mirror_gre_ipv6_addr() +{ + local net=$1; shift + local num=$1; shift + + printf "2001:db8:%x:%x" $net $num +} + +mirror_gre_tunnels_create() +{ + local count=$1; shift + local should_fail=$1; shift + + MIRROR_GRE_BATCH_FILE="$(mktemp)" + for ((i=0; i < count; ++i)); do + local match_dip=$(mirror_gre_ipv6_addr 1 $i)::2 + local htun=h3-gt6-$i + local tun=gt6-$i + + ((mirror_gre_tunnels++)) + + ip address add dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64 + ip address add dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64 + + ip address add dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64 + ip address add dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64 + + tunnel_create $tun ip6gretap \ + $(mirror_gre_ipv6_addr 2 $i)::1 \ + $(mirror_gre_ipv6_addr 2 $i)::2 \ + ttl 100 tos inherit allow-localremote + + tunnel_create $htun ip6gretap \ + $(mirror_gre_ipv6_addr 2 $i)::2 \ + $(mirror_gre_ipv6_addr 2 $i)::1 + ip link set $htun vrf v$h3 + matchall_sink_create $htun + + cat >> $MIRROR_GRE_BATCH_FILE <<-EOF + filter add dev $swp1 ingress pref 1000 \ + protocol ipv6 \ + flower $tcflags dst_ip $match_dip \ + action mirred egress mirror dev $tun + EOF + done + + tc -b $MIRROR_GRE_BATCH_FILE + check_err_fail $should_fail $? "Mirror rule insertion" +} + +mirror_gre_tunnels_destroy() +{ + local count=$1; shift + + for ((i=0; i < count; ++i)); do + local htun=h3-gt6-$i + local tun=gt6-$i + + ip address del dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64 + ip address del dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64 + + ip address del dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64 + ip address del dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64 + + tunnel_destroy $htun + tunnel_destroy $tun + done +} + +__mirror_gre_test() +{ + local count=$1; shift + local should_fail=$1; shift + + mirror_gre_tunnels_create $count $should_fail + if ((should_fail)); then + return + fi + + sleep 5 + + for ((i = 0; i < count; ++i)); do + local dip=$(mirror_gre_ipv6_addr 1 $i)::2 + local htun=h3-gt6-$i + local message + + icmp6_capture_install $htun + mirror_test v$h1 "" $dip $htun 100 10 + icmp6_capture_uninstall $htun + done +} + +mirror_gre_test() +{ + local count=$1; shift + local should_fail=$1; shift + + if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then + check_err 1 "Could not test offloaded functionality" + return + fi + + tcflags="skip_sw" + __mirror_gre_test $count $should_fail +} + +mirror_gre_setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + mirror_gre_tunnels=0 + + vrf_prepare + + simple_if_init $h1 + simple_if_init $h2 + simple_if_init $h3 + + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + tc qdisc add dev $swp1 clsact + + ip link set dev $swp2 master br1 + ip link set dev $swp2 up + + ip link set dev $swp3 up +} + +mirror_gre_cleanup() +{ + mirror_gre_tunnels_destroy $mirror_gre_tunnels + + ip link set dev $swp3 down + + ip link set dev $swp2 down + + tc qdisc del dev $swp1 clsact + ip link set dev $swp1 down + + ip link del dev br1 + + simple_if_fini $h3 + simple_if_fini $h2 + simple_if_fini $h1 + + vrf_cleanup +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh new file mode 100644 index 000000000000..d231649b4f01 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh @@ -0,0 +1,167 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ROUTER_NUM_NETIFS=4 + +router_h1_create() +{ + simple_if_init $h1 192.0.1.1/24 + ip route add 193.0.0.0/8 via 192.0.1.2 dev $h1 +} + +router_h1_destroy() +{ + ip route del 193.0.0.0/8 via 192.0.1.2 dev $h1 + simple_if_fini $h1 192.0.1.1/24 +} + +router_h2_create() +{ + simple_if_init $h2 192.0.2.1/24 + tc qdisc add dev $h2 handle ffff: ingress +} + +router_h2_destroy() +{ + tc qdisc del dev $h2 handle ffff: ingress + simple_if_fini $h2 192.0.2.1/24 +} + +router_create() +{ + ip link set dev $rp1 up + ip link set dev $rp2 up + + ip address add 192.0.1.2/24 dev $rp1 + ip address add 192.0.2.2/24 dev $rp2 +} + +router_destroy() +{ + ip address del 192.0.2.2/24 dev $rp2 + ip address del 192.0.1.2/24 dev $rp1 + + ip link set dev $rp2 down + ip link set dev $rp1 down +} + +router_setup_prepare() +{ + h1=${NETIFS[p1]} + rp1=${NETIFS[p2]} + + rp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + h1mac=$(mac_get $h1) + rp1mac=$(mac_get $rp1) + + vrf_prepare + + router_h1_create + router_h2_create + + router_create +} + +router_offload_validate() +{ + local route_count=$1 + local offloaded_count + + offloaded_count=$(ip route | grep -o 'offload' | wc -l) + [[ $offloaded_count -ge $route_count ]] +} + +router_routes_create() +{ + local route_count=$1 + local count=0 + + ROUTE_FILE="$(mktemp)" + + for i in {0..255} + do + for j in {0..255} + do + for k in {0..255} + do + if [[ $count -eq $route_count ]]; then + break 3 + fi + + echo route add 193.${i}.${j}.${k}/32 via \ + 192.0.2.1 dev $rp2 >> $ROUTE_FILE + ((count++)) + done + done + done + + ip -b $ROUTE_FILE &> /dev/null +} + +router_routes_destroy() +{ + if [[ -v ROUTE_FILE ]]; then + rm -f $ROUTE_FILE + fi +} + +router_test() +{ + local route_count=$1 + local should_fail=$2 + local count=0 + + RET=0 + + router_routes_create $route_count + + router_offload_validate $route_count + check_err_fail $should_fail $? "Offload of $route_count routes" + if [[ $RET -ne 0 ]] || [[ $should_fail -eq 1 ]]; then + return + fi + + tc filter add dev $h2 ingress protocol ip pref 1 flower \ + skip_sw dst_ip 193.0.0.0/8 action drop + + for i in {0..255} + do + for j in {0..255} + do + for k in {0..255} + do + if [[ $count -eq $route_count ]]; then + break 3 + fi + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $rp1mac \ + -A 192.0.1.1 -B 193.${i}.${j}.${k} \ + -t ip -q + ((count++)) + done + done + done + + tc_check_packets "dev $h2 ingress" 1 $route_count + check_err $? "Offload mismatch" + + tc filter del dev $h2 ingress protocol ip pref 1 flower \ + skip_sw dst_ip 193.0.0.0/8 action drop + + router_routes_destroy +} + +router_cleanup() +{ + pre_cleanup + + router_routes_destroy + router_destroy + + router_h2_destroy + router_h1_destroy + + vrf_cleanup +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh new file mode 100644 index 000000000000..73035e25085d --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh @@ -0,0 +1,119 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source "../../../../net/forwarding/devlink_lib.sh" + +if [ "$DEVLINK_VIDDID" != "15b3:cb84" ]; then + echo "SKIP: test is tailored for Mellanox Spectrum" + exit 1 +fi + +# Needed for returning to default +declare -A KVD_DEFAULTS + +KVD_CHILDREN="linear hash_single hash_double" +KVDL_CHILDREN="singles chunks large_chunks" + +devlink_sp_resource_minimize() +{ + local size + local i + + for i in $KVD_CHILDREN; do + size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]') + devlink_resource_size_set "$size" kvd "$i" + done + + for i in $KVDL_CHILDREN; do + size=$(devlink_resource_get kvd linear "$i" | \ + jq '.["size_min"]') + devlink_resource_size_set "$size" kvd linear "$i" + done +} + +devlink_sp_size_kvd_to_default() +{ + local need_reload=0 + local i + + for i in $KVD_CHILDREN; do + local size=$(echo "${KVD_DEFAULTS[kvd_$i]}" | jq '.["size"]') + current_size=$(devlink_resource_size_get kvd "$i") + + if [ "$size" -ne "$current_size" ]; then + devlink_resource_size_set "$size" kvd "$i" + need_reload=1 + fi + done + + for i in $KVDL_CHILDREN; do + local size=$(echo "${KVD_DEFAULTS[kvd_linear_$i]}" | \ + jq '.["size"]') + current_size=$(devlink_resource_size_get kvd linear "$i") + + if [ "$size" -ne "$current_size" ]; then + devlink_resource_size_set "$size" kvd linear "$i" + need_reload=1 + fi + done + + if [ "$need_reload" -ne "0" ]; then + devlink_reload + fi +} + +devlink_sp_read_kvd_defaults() +{ + local key + local i + + KVD_DEFAULTS[kvd]=$(devlink_resource_get "kvd") + for i in $KVD_CHILDREN; do + key=kvd_$i + KVD_DEFAULTS[$key]=$(devlink_resource_get kvd "$i") + done + + for i in $KVDL_CHILDREN; do + key=kvd_linear_$i + KVD_DEFAULTS[$key]=$(devlink_resource_get kvd linear "$i") + done +} + +KVD_PROFILES="default scale ipv4_max" + +devlink_sp_resource_kvd_profile_set() +{ + local profile=$1 + + case "$profile" in + scale) + devlink_resource_size_set 64000 kvd linear + devlink_resource_size_set 15616 kvd linear singles + devlink_resource_size_set 32000 kvd linear chunks + devlink_resource_size_set 16384 kvd linear large_chunks + devlink_resource_size_set 128000 kvd hash_single + devlink_resource_size_set 48000 kvd hash_double + devlink_reload + ;; + ipv4_max) + devlink_resource_size_set 64000 kvd linear + devlink_resource_size_set 15616 kvd linear singles + devlink_resource_size_set 32000 kvd linear chunks + devlink_resource_size_set 16384 kvd linear large_chunks + devlink_resource_size_set 144000 kvd hash_single + devlink_resource_size_set 32768 kvd hash_double + devlink_reload + ;; + default) + devlink_resource_size_set 98304 kvd linear + devlink_resource_size_set 16384 kvd linear singles + devlink_resource_size_set 49152 kvd linear chunks + devlink_resource_size_set 32768 kvd linear large_chunks + devlink_resource_size_set 87040 kvd hash_single + devlink_resource_size_set 60416 kvd hash_double + devlink_reload + ;; + *) + check_err 1 "Unknown profile $profile" + esac +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh new file mode 100755 index 000000000000..b1fe960e398a --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +NUM_NETIFS=1 +source devlink_lib_spectrum.sh + +setup_prepare() +{ + devlink_sp_read_kvd_defaults +} + +cleanup() +{ + pre_cleanup + devlink_sp_size_kvd_to_default +} + +trap cleanup EXIT + +setup_prepare + +profiles_test() +{ + local i + + log_info "Running profile tests" + + for i in $KVD_PROFILES; do + RET=0 + devlink_sp_resource_kvd_profile_set $i + log_test "'$i' profile" + done + + # Default is explicitly tested at end to ensure it's actually applied + RET=0 + devlink_sp_resource_kvd_profile_set "default" + log_test "'default' profile" +} + +resources_min_test() +{ + local size + local i + local j + + log_info "Running KVD-minimum tests" + + for i in $KVD_CHILDREN; do + RET=0 + size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]') + devlink_resource_size_set "$size" kvd "$i" + + # In case of linear, need to minimize sub-resources as well + if [[ "$i" == "linear" ]]; then + for j in $KVDL_CHILDREN; do + devlink_resource_size_set 0 kvd linear "$j" + done + fi + + devlink_reload + devlink_sp_size_kvd_to_default + log_test "'$i' minimize [$size]" + done +} + +resources_max_test() +{ + local min_size + local size + local i + local j + + log_info "Running KVD-maximum tests" + for i in $KVD_CHILDREN; do + RET=0 + devlink_sp_resource_minimize + + # Calculate the maximum possible size for the given partition + size=$(devlink_resource_size_get kvd) + for j in $KVD_CHILDREN; do + if [ "$i" != "$j" ]; then + min_size=$(devlink_resource_get kvd "$j" | \ + jq '.["size_min"]') + size=$((size - min_size)) + fi + done + + # Test almost maximum size + devlink_resource_size_set "$((size - 128))" kvd "$i" + devlink_reload + log_test "'$i' almost maximize [$((size - 128))]" + + # Test above maximum size + devlink resource set "$DEVLINK_DEV" \ + path "kvd/$i" size $((size + 128)) &> /dev/null + check_fail $? "Set kvd/$i to size $((size + 128)) should fail" + log_test "'$i' Overflow rejection [$((size + 128))]" + + # Test maximum size + if [ "$i" == "hash_single" ] || [ "$i" == "hash_double" ]; then + echo "SKIP: Observed problem with exact max $i" + continue + fi + + devlink_resource_size_set "$size" kvd "$i" + devlink_reload + log_test "'$i' maximize [$size]" + + devlink_sp_size_kvd_to_default + done +} + +profiles_test +resources_min_test +resources_max_test + +exit "$RET" diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh new file mode 100644 index 000000000000..8d2186c7c62b --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../mirror_gre_scale.sh + +mirror_gre_get_target() +{ + local should_fail=$1; shift + + if ((! should_fail)); then + echo 3 + else + echo 4 + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh new file mode 100755 index 000000000000..a0a80e1a69e8 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +NUM_NETIFS=6 +source ../../../../net/forwarding/lib.sh +source ../../../../net/forwarding/tc_common.sh +source devlink_lib_spectrum.sh + +current_test="" + +cleanup() +{ + pre_cleanup + if [ ! -z $current_test ]; then + ${current_test}_cleanup + fi + devlink_sp_size_kvd_to_default +} + +devlink_sp_read_kvd_defaults +trap cleanup EXIT + +ALL_TESTS="router tc_flower mirror_gre" +for current_test in ${TESTS:-$ALL_TESTS}; do + source ${current_test}_scale.sh + + num_netifs_var=${current_test^^}_NUM_NETIFS + num_netifs=${!num_netifs_var:-$NUM_NETIFS} + + for profile in $KVD_PROFILES; do + RET=0 + devlink_sp_resource_kvd_profile_set $profile + if [[ $RET -gt 0 ]]; then + log_test "'$current_test' [$profile] setting" + continue + fi + + for should_fail in 0 1; do + RET=0 + target=$(${current_test}_get_target "$should_fail") + ${current_test}_setup_prepare + setup_wait $num_netifs + ${current_test}_test "$target" "$should_fail" + ${current_test}_cleanup + if [[ "$should_fail" -eq 0 ]]; then + log_test "'$current_test' [$profile] $target" + else + log_test "'$current_test' [$profile] overflow $target" + fi + done + done +done +current_test="" + +exit "$RET" diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh new file mode 100644 index 000000000000..21c4697d5bab --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../router_scale.sh + +router_get_target() +{ + local should_fail=$1 + local target + + target=$(devlink_resource_size_get kvd hash_single) + + if [[ $should_fail -eq 0 ]]; then + target=$((target * 85 / 100)) + else + target=$((target + 1)) + fi + + echo $target +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh new file mode 100644 index 000000000000..f9bfd8937765 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../tc_flower_scale.sh + +tc_flower_get_target() +{ + local should_fail=$1; shift + + # 6144 (6x1024) is the theoretical maximum. + # One bank of 512 rules is taken by the 18-byte MC router rule. + # One rule is the ACL catch-all. + # 6144 - 512 - 1 = 5631 + local target=5631 + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh new file mode 100644 index 000000000000..a6d733d2a4b4 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh @@ -0,0 +1,134 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for resource limit of offloaded flower rules. The test adds a given +# number of flower matches for different IPv6 addresses, then generates traffic, +# and ensures each was hit exactly once. This file contains functions to set up +# a testing topology and run the test, and is meant to be sourced from a test +# script that calls the testing routine with a given number of rules. + +TC_FLOWER_NUM_NETIFS=2 + +tc_flower_h1_create() +{ + simple_if_init $h1 + tc qdisc add dev $h1 clsact +} + +tc_flower_h1_destroy() +{ + tc qdisc del dev $h1 clsact + simple_if_fini $h1 +} + +tc_flower_h2_create() +{ + simple_if_init $h2 + tc qdisc add dev $h2 clsact +} + +tc_flower_h2_destroy() +{ + tc qdisc del dev $h2 clsact + simple_if_fini $h2 +} + +tc_flower_setup_prepare() +{ + h1=${NETIFS[p1]} + h2=${NETIFS[p2]} + + vrf_prepare + + tc_flower_h1_create + tc_flower_h2_create +} + +tc_flower_cleanup() +{ + pre_cleanup + + tc_flower_h2_destroy + tc_flower_h1_destroy + + vrf_cleanup + + if [[ -v TC_FLOWER_BATCH_FILE ]]; then + rm -f $TC_FLOWER_BATCH_FILE + fi +} + +tc_flower_addr() +{ + local num=$1; shift + + printf "2001:db8:1::%x" $num +} + +tc_flower_rules_create() +{ + local count=$1; shift + local should_fail=$1; shift + + TC_FLOWER_BATCH_FILE="$(mktemp)" + + for ((i = 0; i < count; ++i)); do + cat >> $TC_FLOWER_BATCH_FILE <<-EOF + filter add dev $h2 ingress \ + prot ipv6 \ + pref 1000 \ + flower $tcflags dst_ip $(tc_flower_addr $i) \ + action drop + EOF + done + + tc -b $TC_FLOWER_BATCH_FILE + check_err_fail $should_fail $? "Rule insertion" +} + +__tc_flower_test() +{ + local count=$1; shift + local should_fail=$1; shift + local last=$((count - 1)) + + tc_flower_rules_create $count $should_fail + + for ((i = 0; i < count; ++i)); do + $MZ $h1 -q -c 1 -t ip -p 20 -b bc -6 \ + -A 2001:db8:2::1 \ + -B $(tc_flower_addr $i) + done + + MISMATCHES=$( + tc -j -s filter show dev $h2 ingress | + jq -r '[ .[] | select(.kind == "flower") | .options | + values as $rule | .actions[].stats.packets | + select(. != 1) | "\(.) on \($rule.keys.dst_ip)" ] | + join(", ")' + ) + + test -z "$MISMATCHES" + check_err $? "Expected to capture 1 packet for each IP, but got $MISMATCHES" +} + +tc_flower_test() +{ + local count=$1; shift + local should_fail=$1; shift + + # We use lower 16 bits of IPv6 address for match. Also there are only 16 + # bits of rule priority space. + if ((count > 65536)); then + check_err 1 "Invalid count of $count. At most 65536 rules supported" + return + fi + + if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then + check_err 1 "Could not test offloaded functionality" + return + fi + + tcflags="skip_sw" + __tc_flower_test $count $should_fail +} diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 663e11e85727..9cca68e440a0 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -13,7 +13,7 @@ TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa -TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict +TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls include ../lib.mk diff --git a/tools/testing/selftests/net/forwarding/README b/tools/testing/selftests/net/forwarding/README index 4a0964c42860..b8a2af8fcfb7 100644 --- a/tools/testing/selftests/net/forwarding/README +++ b/tools/testing/selftests/net/forwarding/README @@ -46,6 +46,8 @@ Guidelines for Writing Tests o Where possible, reuse an existing topology for different tests instead of recreating the same topology. +o Tests that use anything but the most trivial topologies should include + an ASCII art showing the topology. o Where possible, IPv6 and IPv4 addresses shall conform to RFC 3849 and RFC 5737, respectively. o Where possible, tests shall be written so that they can be reused by diff --git a/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh new file mode 100755 index 000000000000..a43b4645c4de --- /dev/null +++ b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh @@ -0,0 +1,151 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS="ping_ipv4 ping_ipv6 flooding" +NUM_NETIFS=6 +CHECK_TC="yes" +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64 +} + +h1_destroy() +{ + simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64 +} + +h2_destroy() +{ + simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64 +} + +h3_create() +{ + simple_if_init $h3 192.0.2.3/24 2001:db8:1::3/64 +} + +h3_destroy() +{ + simple_if_fini $h3 192.0.2.3/24 2001:db8:1::3/64 +} + +switch_create() +{ + ip link add dev br0 type bridge + + ip link set dev $swp1 master br0 + ip link set dev $swp2 master br0 + ip link set dev $swp3 master br0 + + ip link set dev $swp1 type bridge_slave isolated on + check_err $? "Can't set isolation on port $swp1" + ip link set dev $swp2 type bridge_slave isolated on + check_err $? "Can't set isolation on port $swp2" + ip link set dev $swp3 type bridge_slave isolated off + check_err $? "Can't disable isolation on port $swp3" + + ip link set dev br0 up + ip link set dev $swp1 up + ip link set dev $swp2 up + ip link set dev $swp3 up +} + +switch_destroy() +{ + ip link set dev $swp3 down + ip link set dev $swp2 down + ip link set dev $swp1 down + + ip link del dev br0 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + + h1_create + h2_create + h3_create + + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + + h3_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + RET=0 + ping_do $h1 192.0.2.2 + check_fail $? "Ping worked when it should not have" + + RET=0 + ping_do $h3 192.0.2.2 + check_err $? "Ping didn't work when it should have" + + log_test "Isolated port ping" +} + +ping_ipv6() +{ + RET=0 + ping6_do $h1 2001:db8:1::2 + check_fail $? "Ping6 worked when it should not have" + + RET=0 + ping6_do $h3 2001:db8:1::2 + check_err $? "Ping6 didn't work when it should have" + + log_test "Isolated port ping6" +} + +flooding() +{ + local mac=de:ad:be:ef:13:37 + local ip=192.0.2.100 + + RET=0 + flood_test_do false $mac $ip $h1 $h2 + check_err $? "Packet was flooded when it should not have been" + + RET=0 + flood_test_do true $mac $ip $h3 $h2 + check_err $? "Packet was not flooded when it should have been" + + log_test "Isolated port flooding" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh new file mode 100644 index 000000000000..5ab1e5f43022 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +############################################################################## +# Source library + +relative_path="${BASH_SOURCE%/*}" +if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then + relative_path="." +fi + +source "$relative_path/lib.sh" + +############################################################################## +# Defines + +DEVLINK_DEV=$(devlink port show | grep "${NETIFS[p1]}" | \ + grep -v "${NETIFS[p1]}[0-9]" | cut -d" " -f1 | \ + rev | cut -d"/" -f2- | rev) +if [ -z "$DEVLINK_DEV" ]; then + echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it" + exit 1 +fi +if [[ "$(echo $DEVLINK_DEV | grep -c pci)" -eq 0 ]]; then + echo "SKIP: devlink device's bus is not PCI" + exit 1 +fi + +DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \ + -n | cut -d" " -f3) + +############################################################################## +# Sanity checks + +devlink -j resource show "$DEVLINK_DEV" &> /dev/null +if [ $? -ne 0 ]; then + echo "SKIP: iproute2 too old, missing devlink resource support" + exit 1 +fi + +############################################################################## +# Devlink helpers + +devlink_resource_names_to_path() +{ + local resource + local path="" + + for resource in "${@}"; do + if [ "$path" == "" ]; then + path="$resource" + else + path="${path}/$resource" + fi + done + + echo "$path" +} + +devlink_resource_get() +{ + local name=$1 + local resource_name=.[][\"$DEVLINK_DEV\"] + + resource_name="$resource_name | .[] | select (.name == \"$name\")" + + shift + for resource in "${@}"; do + resource_name="${resource_name} | .[\"resources\"][] | \ + select (.name == \"$resource\")" + done + + devlink -j resource show "$DEVLINK_DEV" | jq "$resource_name" +} + +devlink_resource_size_get() +{ + local size=$(devlink_resource_get "$@" | jq '.["size_new"]') + + if [ "$size" == "null" ]; then + devlink_resource_get "$@" | jq '.["size"]' + else + echo "$size" + fi +} + +devlink_resource_size_set() +{ + local new_size=$1 + local path + + shift + path=$(devlink_resource_names_to_path "$@") + devlink resource set "$DEVLINK_DEV" path "$path" size "$new_size" + check_err $? "Failed setting path $path to size $size" +} + +devlink_reload() +{ + local still_pending + + devlink dev reload "$DEVLINK_DEV" &> /dev/null + check_err $? "Failed reload" + + still_pending=$(devlink resource show "$DEVLINK_DEV" | \ + grep -c "size_new") + check_err $still_pending "Failed reload - There are still unset sizes" +} diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh new file mode 100755 index 000000000000..982cc8c23200 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh @@ -0,0 +1,354 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test traffic distribution when a wECMP route forwards traffic to two GRE +# tunnels. +# +# +-------------------------+ +# | H1 | +# | $h1 + | +# | 192.0.2.1/28 | | +# | 2001:db8:1::1/64 | | +# +-------------------|-----+ +# | +# +-------------------|------------------------+ +# | SW1 | | +# | $ol1 + | +# | 192.0.2.2/28 | +# | 2001:db8:1::2/64 | +# | | +# | + g1a (gre) + g1b (gre) | +# | loc=192.0.2.65 loc=192.0.2.81 | +# | rem=192.0.2.66 --. rem=192.0.2.82 --. | +# | tos=inherit | tos=inherit | | +# | .------------------' | | +# | | .------------------' | +# | v v | +# | + $ul1.111 (vlan) + $ul1.222 (vlan) | +# | | 192.0.2.129/28 | 192.0.2.145/28 | +# | \ / | +# | \________________/ | +# | | | +# | + $ul1 | +# +------------|-------------------------------+ +# | +# +------------|-------------------------------+ +# | SW2 + $ul2 | +# | _______|________ | +# | / \ | +# | / \ | +# | + $ul2.111 (vlan) + $ul2.222 (vlan) | +# | ^ 192.0.2.130/28 ^ 192.0.2.146/28 | +# | | | | +# | | '------------------. | +# | '------------------. | | +# | + g2a (gre) | + g2b (gre) | | +# | loc=192.0.2.66 | loc=192.0.2.82 | | +# | rem=192.0.2.65 --' rem=192.0.2.81 --' | +# | tos=inherit tos=inherit | +# | | +# | $ol2 + | +# | 192.0.2.17/28 | | +# | 2001:db8:2::1/64 | | +# +-------------------|------------------------+ +# | +# +-------------------|-----+ +# | H2 | | +# | $h2 + | +# | 192.0.2.18/28 | +# | 2001:db8:2::2/64 | +# +-------------------------+ + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 + multipath_ipv4 + multipath_ipv6 + multipath_ipv6_l4 +" + +NUM_NETIFS=6 +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 + ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2 + ip route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 +} + +h1_destroy() +{ + ip route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 + ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2 + simple_if_fini $h1 192.0.2.1/28 +} + +sw1_create() +{ + simple_if_init $ol1 192.0.2.2/28 2001:db8:1::2/64 + __simple_if_init $ul1 v$ol1 + vlan_create $ul1 111 v$ol1 192.0.2.129/28 + vlan_create $ul1 222 v$ol1 192.0.2.145/28 + + tunnel_create g1a gre 192.0.2.65 192.0.2.66 tos inherit dev v$ol1 + __simple_if_init g1a v$ol1 192.0.2.65/32 + ip route add vrf v$ol1 192.0.2.66/32 via 192.0.2.130 + + tunnel_create g1b gre 192.0.2.81 192.0.2.82 tos inherit dev v$ol1 + __simple_if_init g1b v$ol1 192.0.2.81/32 + ip route add vrf v$ol1 192.0.2.82/32 via 192.0.2.146 + + ip route add vrf v$ol1 192.0.2.16/28 \ + nexthop dev g1a \ + nexthop dev g1b + ip route add vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a \ + nexthop dev g1b + + tc qdisc add dev $ul1 clsact + tc filter add dev $ul1 egress pref 111 prot 802.1q \ + flower vlan_id 111 action pass + tc filter add dev $ul1 egress pref 222 prot 802.1q \ + flower vlan_id 222 action pass +} + +sw1_destroy() +{ + tc qdisc del dev $ul1 clsact + + ip route del vrf v$ol1 2001:db8:2::/64 + ip route del vrf v$ol1 192.0.2.16/28 + + ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146 + __simple_if_fini g1b 192.0.2.81/32 + tunnel_destroy g1b + + ip route del vrf v$ol1 192.0.2.66/32 via 192.0.2.130 + __simple_if_fini g1a 192.0.2.65/32 + tunnel_destroy g1a + + vlan_destroy $ul1 222 + vlan_destroy $ul1 111 + __simple_if_fini $ul1 + simple_if_fini $ol1 192.0.2.2/28 2001:db8:1::2/64 +} + +sw2_create() +{ + simple_if_init $ol2 192.0.2.17/28 2001:db8:2::1/64 + __simple_if_init $ul2 v$ol2 + vlan_create $ul2 111 v$ol2 192.0.2.130/28 + vlan_create $ul2 222 v$ol2 192.0.2.146/28 + + tunnel_create g2a gre 192.0.2.66 192.0.2.65 tos inherit dev v$ol2 + __simple_if_init g2a v$ol2 192.0.2.66/32 + ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129 + + tunnel_create g2b gre 192.0.2.82 192.0.2.81 tos inherit dev v$ol2 + __simple_if_init g2b v$ol2 192.0.2.82/32 + ip route add vrf v$ol2 192.0.2.81/32 via 192.0.2.145 + + ip route add vrf v$ol2 192.0.2.0/28 \ + nexthop dev g2a \ + nexthop dev g2b + ip route add vrf v$ol2 2001:db8:1::/64 \ + nexthop dev g2a \ + nexthop dev g2b +} + +sw2_destroy() +{ + ip route del vrf v$ol2 2001:db8:1::/64 + ip route del vrf v$ol2 192.0.2.0/28 + + ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145 + __simple_if_fini g2b 192.0.2.82/32 + tunnel_destroy g2b + + ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129 + __simple_if_fini g2a 192.0.2.66/32 + tunnel_destroy g2a + + vlan_destroy $ul2 222 + vlan_destroy $ul2 111 + __simple_if_fini $ul2 + simple_if_fini $ol2 192.0.2.17/28 2001:db8:2::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.18/28 2001:db8:2::2/64 + ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17 + ip route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1 +} + +h2_destroy() +{ + ip route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1 + ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17 + simple_if_fini $h2 192.0.2.18/28 2001:db8:2::2/64 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + vrf_prepare + h1_create + sw1_create + sw2_create + h2_create +} + +cleanup() +{ + pre_cleanup + + h2_destroy + sw2_destroy + sw1_destroy + h1_destroy + vrf_cleanup +} + +multipath4_test() +{ + local what=$1; shift + local weight1=$1; shift + local weight2=$1; shift + + sysctl_set net.ipv4.fib_multipath_hash_policy 1 + ip route replace vrf v$ol1 192.0.2.16/28 \ + nexthop dev g1a weight $weight1 \ + nexthop dev g1b weight $weight2 + + local t0_111=$(tc_rule_stats_get $ul1 111 egress) + local t0_222=$(tc_rule_stats_get $ul1 222 egress) + + ip vrf exec v$h1 \ + $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + local t1_111=$(tc_rule_stats_get $ul1 111 egress) + local t1_222=$(tc_rule_stats_get $ul1 222 egress) + + local d111=$((t1_111 - t0_111)) + local d222=$((t1_222 - t0_222)) + multipath_eval "$what" $weight1 $weight2 $d111 $d222 + + ip route replace vrf v$ol1 192.0.2.16/28 \ + nexthop dev g1a \ + nexthop dev g1b + sysctl_restore net.ipv4.fib_multipath_hash_policy +} + +multipath6_l4_test() +{ + local what=$1; shift + local weight1=$1; shift + local weight2=$1; shift + + sysctl_set net.ipv6.fib_multipath_hash_policy 1 + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a weight $weight1 \ + nexthop dev g1b weight $weight2 + + local t0_111=$(tc_rule_stats_get $ul1 111 egress) + local t0_222=$(tc_rule_stats_get $ul1 222 egress) + + ip vrf exec v$h1 \ + $MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + local t1_111=$(tc_rule_stats_get $ul1 111 egress) + local t1_222=$(tc_rule_stats_get $ul1 222 egress) + + local d111=$((t1_111 - t0_111)) + local d222=$((t1_222 - t0_222)) + multipath_eval "$what" $weight1 $weight2 $d111 $d222 + + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a \ + nexthop dev g1b + sysctl_restore net.ipv6.fib_multipath_hash_policy +} + +multipath6_test() +{ + local what=$1; shift + local weight1=$1; shift + local weight2=$1; shift + + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a weight $weight1 \ + nexthop dev g1b weight $weight2 + + local t0_111=$(tc_rule_stats_get $ul1 111 egress) + local t0_222=$(tc_rule_stats_get $ul1 222 egress) + + # Generate 16384 echo requests, each with a random flow label. + for ((i=0; i < 16384; ++i)); do + ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null + done + + local t1_111=$(tc_rule_stats_get $ul1 111 egress) + local t1_222=$(tc_rule_stats_get $ul1 222 egress) + + local d111=$((t1_111 - t0_111)) + local d222=$((t1_222 - t0_222)) + multipath_eval "$what" $weight1 $weight2 $d111 $d222 + + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a \ + nexthop dev g1b +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.18 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:2::2 +} + +multipath_ipv4() +{ + log_info "Running IPv4 multipath tests" + multipath4_test "ECMP" 1 1 + multipath4_test "Weighted MP 2:1" 2 1 + multipath4_test "Weighted MP 11:45" 11 45 +} + +multipath_ipv6() +{ + log_info "Running IPv6 multipath tests" + multipath6_test "ECMP" 1 1 + multipath6_test "Weighted MP 2:1" 2 1 + multipath6_test "Weighted MP 11:45" 11 45 +} + +multipath_ipv6_l4() +{ + log_info "Running IPv6 L4 hash multipath tests" + multipath6_l4_test "ECMP" 1 1 + multipath6_l4_test "Weighted MP 2:1" 2 1 + multipath6_l4_test "Weighted MP 11:45" 11 45 +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 7b18a53aa556..2bb9cf303c53 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -14,8 +14,13 @@ PAUSE_ON_CLEANUP=${PAUSE_ON_CLEANUP:=no} NETIF_TYPE=${NETIF_TYPE:=veth} NETIF_CREATE=${NETIF_CREATE:=yes} -if [[ -f forwarding.config ]]; then - source forwarding.config +relative_path="${BASH_SOURCE%/*}" +if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then + relative_path="." +fi + +if [[ -f $relative_path/forwarding.config ]]; then + source "$relative_path/forwarding.config" fi ############################################################################## @@ -151,6 +156,19 @@ check_fail() fi } +check_err_fail() +{ + local should_fail=$1; shift + local err=$1; shift + local what=$1; shift + + if ((should_fail)); then + check_fail $err "$what succeeded, but should have failed" + else + check_err $err "$what failed" + fi +} + log_test() { local test_name=$1 @@ -185,18 +203,27 @@ log_info() echo "INFO: $msg" } +setup_wait_dev() +{ + local dev=$1; shift + + while true; do + ip link show dev $dev up \ + | grep 'state UP' &> /dev/null + if [[ $? -ne 0 ]]; then + sleep 1 + else + break + fi + done +} + setup_wait() { - for i in $(eval echo {1..$NUM_NETIFS}); do - while true; do - ip link show dev ${NETIFS[p$i]} up \ - | grep 'state UP' &> /dev/null - if [[ $? -ne 0 ]]; then - sleep 1 - else - break - fi - done + local num_netifs=${1:-$NUM_NETIFS} + + for ((i = 1; i <= num_netifs; ++i)); do + setup_wait_dev ${NETIFS[p$i]} done # Make sure links are ready. @@ -287,6 +314,29 @@ __addr_add_del() done } +__simple_if_init() +{ + local if_name=$1; shift + local vrf_name=$1; shift + local addrs=("${@}") + + ip link set dev $if_name master $vrf_name + ip link set dev $if_name up + + __addr_add_del $if_name add "${addrs[@]}" +} + +__simple_if_fini() +{ + local if_name=$1; shift + local addrs=("${@}") + + __addr_add_del $if_name del "${addrs[@]}" + + ip link set dev $if_name down + ip link set dev $if_name nomaster +} + simple_if_init() { local if_name=$1 @@ -298,11 +348,8 @@ simple_if_init() array=("${@}") vrf_create $vrf_name - ip link set dev $if_name master $vrf_name ip link set dev $vrf_name up - ip link set dev $if_name up - - __addr_add_del $if_name add "${array[@]}" + __simple_if_init $if_name $vrf_name "${array[@]}" } simple_if_fini() @@ -315,9 +362,7 @@ simple_if_fini() vrf_name=v$if_name array=("${@}") - __addr_add_del $if_name del "${array[@]}" - - ip link set dev $if_name down + __simple_if_fini $if_name "${array[@]}" vrf_destroy $vrf_name } @@ -383,9 +428,10 @@ tc_rule_stats_get() { local dev=$1; shift local pref=$1; shift + local dir=$1; shift - tc -j -s filter show dev $dev ingress pref $pref | - jq '.[1].options.actions[].stats.packets' + tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \ + | jq '.[1].options.actions[].stats.packets' } mac_get() @@ -437,7 +483,9 @@ forwarding_restore() tc_offload_check() { - for i in $(eval echo {1..$NUM_NETIFS}); do + local num_netifs=${1:-$NUM_NETIFS} + + for ((i = 1; i <= num_netifs; ++i)); do ethtool -k ${NETIFS[p$i]} \ | grep "hw-tc-offload: on" &> /dev/null if [[ $? -ne 0 ]]; then @@ -453,9 +501,15 @@ trap_install() local dev=$1; shift local direction=$1; shift - # For slow-path testing, we need to install a trap to get to - # slow path the packets that would otherwise be switched in HW. - tc filter add dev $dev $direction pref 1 flower skip_sw action trap + # Some devices may not support or need in-hardware trapping of traffic + # (e.g. the veth pairs that this library creates for non-existent + # loopbacks). Use continue instead, so that there is a filter in there + # (some tests check counters), and so that other filters are still + # processed. + tc filter add dev $dev $direction pref 1 \ + flower skip_sw action trap 2>/dev/null \ + || tc filter add dev $dev $direction pref 1 \ + flower action continue } trap_uninstall() @@ -463,11 +517,13 @@ trap_uninstall() local dev=$1; shift local direction=$1; shift - tc filter del dev $dev $direction pref 1 flower skip_sw + tc filter del dev $dev $direction pref 1 flower } slow_path_trap_install() { + # For slow-path testing, we need to install a trap to get to + # slow path the packets that would otherwise be switched in HW. if [ "${tcflags/skip_hw}" != "$tcflags" ]; then trap_install "$@" fi @@ -557,33 +613,86 @@ tests_run() done } +multipath_eval() +{ + local desc="$1" + local weight_rp12=$2 + local weight_rp13=$3 + local packets_rp12=$4 + local packets_rp13=$5 + local weights_ratio packets_ratio diff + + RET=0 + + if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then + weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \ + | bc -l) + else + weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" \ + | bc -l) + fi + + if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then + check_err 1 "Packet difference is 0" + log_test "Multipath" + log_info "Expected ratio $weights_ratio" + return + fi + + if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then + packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \ + | bc -l) + else + packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" \ + | bc -l) + fi + + diff=$(echo $weights_ratio - $packets_ratio | bc -l) + diff=${diff#-} + + test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0 + check_err $? "Too large discrepancy between expected and measured ratios" + log_test "$desc" + log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio" +} + ############################################################################## # Tests -ping_test() +ping_do() { local if_name=$1 local dip=$2 local vrf_name - RET=0 - vrf_name=$(master_name_get $if_name) ip vrf exec $vrf_name $PING $dip -c 10 -i 0.1 -w 2 &> /dev/null +} + +ping_test() +{ + RET=0 + + ping_do $1 $2 check_err $? log_test "ping" } -ping6_test() +ping6_do() { local if_name=$1 local dip=$2 local vrf_name - RET=0 - vrf_name=$(master_name_get $if_name) ip vrf exec $vrf_name $PING6 $dip -c 10 -i 0.1 -w 2 &> /dev/null +} + +ping6_test() +{ + RET=0 + + ping6_do $1 $2 check_err $? log_test "ping6" } diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh new file mode 100755 index 000000000000..c5095da7f6bf --- /dev/null +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for "tc action mirred egress mirror" when the underlay route points at a +# bridge device without vlan filtering (802.1d). +# +# This test uses standard topology for testing mirror-to-gretap. See +# mirror_gre_topo_lib.sh for more details. The full topology is as follows: +# +# +---------------------+ +---------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.2/28 | | +# +-----|---------------+ +---------------|-----+ +# | | +# +-----|-------------------------------------------------------------|-----+ +# | SW o---> mirror | | +# | +---|-------------------------------------------------------------|---+ | +# | | + $swp1 + br1 (802.1q bridge) $swp2 + | | +# | +---------------------------------------------------------------------+ | +# | | +# | +---------------------------------------------------------------------+ | +# | | + br2 (802.1d bridge) | | +# | | 192.0.2.129/28 | | +# | | + $swp3 2001:db8:2::1/64 | | +# | +---|-----------------------------------------------------------------+ | +# | | ^ ^ | +# | | + gt6 (ip6gretap) | + gt4 (gretap) | | +# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | | +# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ | +# | | : ttl=100 : ttl=100 | +# | | : tos=inherit : tos=inherit | +# +-----|---------------------:----------------------:----------------------+ +# | : : +# +-----|---------------------:----------------------:----------------------+ +# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) | +# | 192.0.2.130/28 loc=2001:db8:2::2 loc=192.0.2.130 | +# | 2001:db8:2::2/64 rem=2001:db8:2::1 rem=192.0.2.129 | +# | ttl=100 ttl=100 | +# | tos=inherit tos=inherit | +# +-------------------------------------------------------------------------+ + +ALL_TESTS=" + test_gretap + test_ip6gretap +" + +NUM_NETIFS=6 +source lib.sh +source mirror_lib.sh +source mirror_gre_lib.sh +source mirror_gre_topo_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + mirror_gre_topo_create + + ip link add name br2 type bridge vlan_filtering 0 + ip link set dev br2 up + + ip link set dev $swp3 master br2 + ip route add 192.0.2.130/32 dev br2 + ip -6 route add 2001:db8:2::2/128 dev br2 + + ip address add dev br2 192.0.2.129/28 + ip address add dev br2 2001:db8:2::1/64 + + ip address add dev $h3 192.0.2.130/28 + ip address add dev $h3 2001:db8:2::2/64 +} + +cleanup() +{ + pre_cleanup + + ip address del dev $h3 2001:db8:2::2/64 + ip address del dev $h3 192.0.2.130/28 + ip link del dev br2 + + mirror_gre_topo_destroy + vrf_cleanup +} + +test_gretap() +{ + full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap" + full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap" +} + +test_ip6gretap() +{ + full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap" + full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap" +} + +test_all() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + tests_run + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tcflags="skip_hw" +test_all + +if ! tc_offload_check; then + echo "WARN: Could not test offloaded functionality" +else + tcflags="skip_sw" + test_all +fi + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh index 3bb4c2ba7b14..197e769c2ed1 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh @@ -74,12 +74,14 @@ test_vlan_match() test_gretap() { - test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap" + test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to gretap" } test_ip6gretap() { - test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap" + test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to ip6gretap" } test_gretap_stp() diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh new file mode 100755 index 000000000000..a3402cd8d5b6 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for "tc action mirred egress mirror" when the underlay route points at a +# bridge device with vlan filtering (802.1q). +# +# This test uses standard topology for testing mirror-to-gretap. See +# mirror_gre_topo_lib.sh for more details. The full topology is as follows: +# +# +---------------------+ +---------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.2/28 | | +# +-----|---------------+ +---------------|-----+ +# | | +# +-----|---------------------------------------------------------------|-----+ +# | SW o---> mirror | | +# | +---|---------------------------------------------------------------|---+ | +# | | + $swp1 + br1 (802.1q bridge) $swp2 + | | +# | | 192.0.2.129/28 | | +# | | + $swp3 2001:db8:2::1/64 | | +# | | | vid555 vid555[pvid,untagged] | | +# | +---|-------------------------------------------------------------------+ | +# | | ^ ^ | +# | | + gt6 (ip6gretap) | + gt4 (gretap) | | +# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | | +# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ | +# | | : ttl=100 : ttl=100 | +# | | : tos=inherit : tos=inherit | +# +-----|---------------------:------------------------:----------------------+ +# | : : +# +-----|---------------------:------------------------:----------------------+ +# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) | +# | | loc=2001:db8:2::2 loc=192.0.2.130 | +# | + $h3.555 rem=2001:db8:2::1 rem=192.0.2.129 | +# | 192.0.2.130/28 ttl=100 ttl=100 | +# | 2001:db8:2::2/64 tos=inherit tos=inherit | +# +---------------------------------------------------------------------------+ + +ALL_TESTS=" + test_gretap + test_ip6gretap +" + +NUM_NETIFS=6 +source lib.sh +source mirror_lib.sh +source mirror_gre_lib.sh +source mirror_gre_topo_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + mirror_gre_topo_create + + ip link set dev $swp3 master br1 + bridge vlan add dev br1 vid 555 pvid untagged self + ip address add dev br1 192.0.2.129/28 + ip address add dev br1 2001:db8:2::1/64 + + ip -4 route add 192.0.2.130/32 dev br1 + ip -6 route add 2001:db8:2::2/128 dev br1 + + vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64 + bridge vlan add dev $swp3 vid 555 +} + +cleanup() +{ + pre_cleanup + + ip link set dev $swp3 nomaster + vlan_destroy $h3 555 + + mirror_gre_topo_destroy + vrf_cleanup +} + +test_gretap() +{ + full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap" + full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap" +} + +test_ip6gretap() +{ + full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap" + full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap" +} + +tests() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + tests_run + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tcflags="skip_hw" +tests + +if ! tc_offload_check; then + echo "WARN: Could not test offloaded functionality" +else + tcflags="skip_sw" + tests +fi + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index aa29d46186a8..135902aa8b11 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -122,15 +122,8 @@ test_span_gre_egress_up() # After setting the device up, wait for neighbor to get resolved so that # we can expect mirroring to work. ip link set dev $swp3 up - while true; do - ip neigh sh dev $swp3 $remote_ip nud reachable | - grep -q ^ - if [[ $? -ne 0 ]]; then - sleep 1 - else - break - fi - done + setup_wait_dev $swp3 + ping -c 1 -I $swp3 $remote_ip &>/dev/null quick_test_span_gre_dir $tundev ingress mirror_uninstall $swp1 ingress diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh index 619b469365be..fac486178ef7 100644 --- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -source mirror_lib.sh +source "$relative_path/mirror_lib.sh" quick_test_span_gre_dir_ips() { @@ -62,7 +62,7 @@ full_test_span_gre_dir_vlan_ips() "$backward_type" "$ip1" "$ip2" tc filter add dev $h3 ingress pref 77 prot 802.1q \ - flower $vlan_match ip_proto 0x2f \ + flower $vlan_match \ action pass mirror_test v$h1 $ip1 $ip2 $h3 77 10 tc filter del dev $h3 ingress pref 77 diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh index 8fa681eb90e7..6f9ef1820e93 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh @@ -35,6 +35,8 @@ setup_prepare() vrf_prepare mirror_gre_topo_create + sysctl_set net.ipv4.conf.v$h3.rp_filter 0 + ip address add dev $swp3 192.0.2.161/28 ip address add dev $h3 192.0.2.162/28 ip address add dev gt4 192.0.2.129/32 @@ -61,6 +63,8 @@ cleanup() ip address del dev $h3 192.0.2.162/28 ip address del dev $swp3 192.0.2.161/28 + sysctl_restore net.ipv4.conf.v$h3.rp_filter 0 + mirror_gre_topo_destroy vrf_cleanup diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh index 253419564708..39c03e2867f4 100644 --- a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh @@ -33,7 +33,7 @@ # | | # +-------------------------------------------------------------------------+ -source mirror_topo_lib.sh +source "$relative_path/mirror_topo_lib.sh" mirror_gre_topo_h3_create() { diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh index 5dbc7a08f4bd..d3e75bb6a2d8 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh @@ -39,6 +39,12 @@ setup_prepare() swp3=${NETIFS[p5]} h3=${NETIFS[p6]} + # gt4's remote address is at $h3.555, not $h3. Thus the packets arriving + # directly to $h3 for test_gretap_untagged_egress() are rejected by + # rp_filter and the test spuriously fails. + sysctl_set net.ipv4.conf.all.rp_filter 0 + sysctl_set net.ipv4.conf.$h3.rp_filter 0 + vrf_prepare mirror_gre_topo_create @@ -65,6 +71,9 @@ cleanup() mirror_gre_topo_destroy vrf_cleanup + + sysctl_restore net.ipv4.conf.$h3.rp_filter + sysctl_restore net.ipv4.conf.all.rp_filter } test_vlan_match() @@ -79,12 +88,14 @@ test_vlan_match() test_gretap() { - test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap" + test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to gretap" } test_ip6gretap() { - test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap" + test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to ip6gretap" } test_span_gre_forbidden_cpu() diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh index d36dc26c6c51..07991e1025c7 100644 --- a/tools/testing/selftests/net/forwarding/mirror_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh @@ -105,7 +105,7 @@ do_test_span_vlan_dir_ips() # Install the capture as skip_hw to avoid double-counting of packets. # The traffic is meant for local box anyway, so will be trapped to # kernel. - vlan_capture_install $dev "skip_hw vlan_id $vid" + vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype ip" mirror_test v$h1 $ip1 $ip2 $dev 100 $expect mirror_test v$h2 $ip2 $ip1 $dev 100 $expect vlan_capture_uninstall $dev diff --git a/tools/testing/selftests/net/forwarding/router_bridge.sh b/tools/testing/selftests/net/forwarding/router_bridge.sh new file mode 100755 index 000000000000..ebc596a272f7 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/router_bridge.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 +" +NUM_NETIFS=4 +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 + ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2 + ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2 +} + +h1_destroy() +{ + ip -6 route del 2001:db8:2::/64 vrf v$h1 + ip -4 route del 192.0.2.128/28 vrf v$h1 + simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64 + ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129 + ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1 +} + +h2_destroy() +{ + ip -6 route del 2001:db8:1::/64 vrf v$h2 + ip -4 route del 192.0.2.0/28 vrf v$h2 + simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64 +} + +router_create() +{ + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64 + + ip link set dev $swp2 up + __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64 +} + +router_destroy() +{ + __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64 + ip link set dev $swp2 down + + __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64 + ip link set dev $swp1 down + ip link set dev $swp1 nomaster + + ip link del dev br1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + h1_create + h2_create + + router_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + router_destroy + + h2_destroy + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.130 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:2::2 +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh new file mode 100755 index 000000000000..fef88eb4b873 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 + vlan +" +NUM_NETIFS=4 +source lib.sh + +h1_create() +{ + simple_if_init $h1 + vlan_create $h1 555 v$h1 192.0.2.1/28 2001:db8:1::1/64 + ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2 + ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2 +} + +h1_destroy() +{ + ip -6 route del 2001:db8:2::/64 vrf v$h1 + ip -4 route del 192.0.2.128/28 vrf v$h1 + vlan_destroy $h1 555 + simple_if_fini $h1 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64 + ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129 + ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1 +} + +h2_destroy() +{ + ip -6 route del 2001:db8:1::/64 vrf v$h2 + ip -4 route del 192.0.2.0/28 vrf v$h2 + simple_if_fini $h2 192.0.2.130/28 +} + +router_create() +{ + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + + bridge vlan add dev br1 vid 555 self pvid untagged + bridge vlan add dev $swp1 vid 555 + + __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64 + + ip link set dev $swp2 up + __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64 +} + +router_destroy() +{ + __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64 + ip link set dev $swp2 down + + __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64 + ip link set dev $swp1 down + ip link set dev $swp1 nomaster + + ip link del dev br1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + h1_create + h2_create + + router_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + router_destroy + + h2_destroy + h1_destroy + + vrf_cleanup +} + +vlan() +{ + RET=0 + + bridge vlan add dev br1 vid 333 self + check_err $? "Can't add a non-PVID VLAN" + bridge vlan del dev br1 vid 333 self + check_err $? "Can't remove a non-PVID VLAN" + + log_test "vlan" +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.130 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:2::2 +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh index 8b6d0fb6d604..79a209927962 100755 --- a/tools/testing/selftests/net/forwarding/router_multipath.sh +++ b/tools/testing/selftests/net/forwarding/router_multipath.sh @@ -159,45 +159,6 @@ router2_destroy() vrf_destroy "vrf-r2" } -multipath_eval() -{ - local desc="$1" - local weight_rp12=$2 - local weight_rp13=$3 - local packets_rp12=$4 - local packets_rp13=$5 - local weights_ratio packets_ratio diff - - RET=0 - - if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then - check_err 1 "Packet difference is 0" - log_test "Multipath" - log_info "Expected ratio $weights_ratio" - return - fi - - if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then - weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \ - | bc -l) - packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \ - | bc -l) - else - weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" | \ - bc -l) - packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" | \ - bc -l) - fi - - diff=$(echo $weights_ratio - $packets_ratio | bc -l) - diff=${diff#-} - - test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0 - check_err $? "Too large discrepancy between expected and measured ratios" - log_test "$desc" - log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio" -} - multipath4_test() { local desc="$1" diff --git a/tools/testing/selftests/net/ip6_gre_headroom.sh b/tools/testing/selftests/net/ip6_gre_headroom.sh new file mode 100755 index 000000000000..5b41e8bb6e2d --- /dev/null +++ b/tools/testing/selftests/net/ip6_gre_headroom.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test that enough headroom is reserved for the first packet passing through an +# IPv6 GRE-like netdevice. + +setup_prepare() +{ + ip link add h1 type veth peer name swp1 + ip link add h3 type veth peer name swp3 + + ip link set dev h1 up + ip address add 192.0.2.1/28 dev h1 + + ip link add dev vh3 type vrf table 20 + ip link set dev h3 master vh3 + ip link set dev vh3 up + ip link set dev h3 up + + ip link set dev swp3 up + ip address add dev swp3 2001:db8:2::1/64 + ip address add dev swp3 2001:db8:2::3/64 + + ip link set dev swp1 up + tc qdisc add dev swp1 clsact + + ip link add name er6 type ip6erspan \ + local 2001:db8:2::1 remote 2001:db8:2::2 oseq okey 123 + ip link set dev er6 up + + ip link add name gt6 type ip6gretap \ + local 2001:db8:2::3 remote 2001:db8:2::4 + ip link set dev gt6 up + + sleep 1 +} + +cleanup() +{ + ip link del dev gt6 + ip link del dev er6 + ip link del dev swp1 + ip link del dev swp3 + ip link del dev vh3 +} + +test_headroom() +{ + local type=$1; shift + local tundev=$1; shift + + tc filter add dev swp1 ingress pref 1000 matchall skip_hw \ + action mirred egress mirror dev $tundev + ping -I h1 192.0.2.2 -c 1 -w 2 &> /dev/null + tc filter del dev swp1 ingress pref 1000 + + # If it doesn't panic, it passes. + printf "TEST: %-60s [PASS]\n" "$type headroom" +} + +trap cleanup EXIT + +setup_prepare +test_headroom ip6gretap gt6 +test_headroom ip6erspan er6 diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 0d7a44fa30af..08c341b49760 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -525,18 +525,21 @@ kci_test_macsec() #------------------------------------------------------------------- kci_test_ipsec() { - srcip="14.0.0.52" - dstip="14.0.0.70" + ret=0 algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128" + srcip=192.168.123.1 + dstip=192.168.123.2 + spi=7 + + ip addr add $srcip dev $devdummy # flush to be sure there's nothing configured ip x s flush ; ip x p flush check_err $? # start the monitor in the background - tmpfile=`mktemp ipsectestXXX` - ip x m > $tmpfile & - mpid=$! + tmpfile=`mktemp /var/run/ipsectestXXX` + mpid=`(ip x m > $tmpfile & echo $!) 2>/dev/null` sleep 0.2 ipsecid="proto esp src $srcip dst $dstip spi 0x07" @@ -599,6 +602,7 @@ kci_test_ipsec() check_err $? ip x p flush check_err $? + ip addr del $srcip/32 dev $devdummy if [ $ret -ne 0 ]; then echo "FAIL: ipsec" @@ -607,6 +611,119 @@ kci_test_ipsec() echo "PASS: ipsec" } +#------------------------------------------------------------------- +# Example commands +# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \ +# spi 0x07 mode transport reqid 0x07 replay-window 32 \ +# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \ +# sel src 14.0.0.52/24 dst 14.0.0.70/24 +# offload dev sim1 dir out +# ip x p add dir out src 14.0.0.52/24 dst 14.0.0.70/24 \ +# tmpl proto esp src 14.0.0.52 dst 14.0.0.70 \ +# spi 0x07 mode transport reqid 0x07 +# +#------------------------------------------------------------------- +kci_test_ipsec_offload() +{ + ret=0 + algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128" + srcip=192.168.123.3 + dstip=192.168.123.4 + dev=simx1 + sysfsd=/sys/kernel/debug/netdevsim/$dev + sysfsf=$sysfsd/ipsec + + # setup netdevsim since dummydev doesn't have offload support + modprobe netdevsim + check_err $? + if [ $ret -ne 0 ]; then + echo "FAIL: ipsec_offload can't load netdevsim" + return 1 + fi + + ip link add $dev type netdevsim + ip addr add $srcip dev $dev + ip link set $dev up + if [ ! -d $sysfsd ] ; then + echo "FAIL: ipsec_offload can't create device $dev" + return 1 + fi + if [ ! -f $sysfsf ] ; then + echo "FAIL: ipsec_offload netdevsim doesn't support IPsec offload" + return 1 + fi + + # flush to be sure there's nothing configured + ip x s flush ; ip x p flush + + # create offloaded SAs, both in and out + ip x p add dir out src $srcip/24 dst $dstip/24 \ + tmpl proto esp src $srcip dst $dstip spi 9 \ + mode transport reqid 42 + check_err $? + ip x p add dir out src $dstip/24 dst $srcip/24 \ + tmpl proto esp src $dstip dst $srcip spi 9 \ + mode transport reqid 42 + check_err $? + + ip x s add proto esp src $srcip dst $dstip spi 9 \ + mode transport reqid 42 $algo sel src $srcip/24 dst $dstip/24 \ + offload dev $dev dir out + check_err $? + ip x s add proto esp src $dstip dst $srcip spi 9 \ + mode transport reqid 42 $algo sel src $dstip/24 dst $srcip/24 \ + offload dev $dev dir in + check_err $? + if [ $ret -ne 0 ]; then + echo "FAIL: ipsec_offload can't create SA" + return 1 + fi + + # does offload show up in ip output + lines=`ip x s list | grep -c "crypto offload parameters: dev $dev dir"` + if [ $lines -ne 2 ] ; then + echo "FAIL: ipsec_offload SA offload missing from list output" + check_err 1 + fi + + # use ping to exercise the Tx path + ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null + + # does driver have correct offload info + diff $sysfsf - << EOF +SA count=2 tx=3 +sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000 +sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 +sa[0] key=0x34333231 38373635 32313039 36353433 +sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0 +sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 +sa[1] key=0x34333231 38373635 32313039 36353433 +EOF + if [ $? -ne 0 ] ; then + echo "FAIL: ipsec_offload incorrect driver data" + check_err 1 + fi + + # does offload get removed from driver + ip x s flush + ip x p flush + lines=`grep -c "SA count=0" $sysfsf` + if [ $lines -ne 1 ] ; then + echo "FAIL: ipsec_offload SA not removed from driver" + check_err 1 + fi + + # clean up any leftovers + ip link del $dev + rmmod netdevsim + + if [ $ret -ne 0 ]; then + echo "FAIL: ipsec_offload" + return 1 + fi + echo "PASS: ipsec_offload" +} + kci_test_gretap() { testns="testns" @@ -861,6 +978,7 @@ kci_test_rtnl() kci_test_encap kci_test_macsec kci_test_ipsec + kci_test_ipsec_offload kci_del_dummy } diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c new file mode 100644 index 000000000000..b3ebf2646e52 --- /dev/null +++ b/tools/testing/selftests/net/tls.c @@ -0,0 +1,692 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include <arpa/inet.h> +#include <errno.h> +#include <error.h> +#include <fcntl.h> +#include <poll.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> + +#include <linux/tls.h> +#include <linux/tcp.h> +#include <linux/socket.h> + +#include <sys/types.h> +#include <sys/sendfile.h> +#include <sys/socket.h> +#include <sys/stat.h> + +#include "../kselftest_harness.h" + +#define TLS_PAYLOAD_MAX_LEN 16384 +#define SOL_TLS 282 + +FIXTURE(tls) +{ + int fd, cfd; + bool notls; +}; + +FIXTURE_SETUP(tls) +{ + struct tls12_crypto_info_aes_gcm_128 tls12; + struct sockaddr_in addr; + socklen_t len; + int sfd, ret; + + self->notls = false; + len = sizeof(addr); + + memset(&tls12, 0, sizeof(tls12)); + tls12.info.version = TLS_1_2_VERSION; + tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; + + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(INADDR_ANY); + addr.sin_port = 0; + + self->fd = socket(AF_INET, SOCK_STREAM, 0); + sfd = socket(AF_INET, SOCK_STREAM, 0); + + ret = bind(sfd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + ret = listen(sfd, 10); + ASSERT_EQ(ret, 0); + + ret = getsockname(sfd, &addr, &len); + ASSERT_EQ(ret, 0); + + ret = connect(self->fd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + if (ret != 0) { + self->notls = true; + printf("Failure setting TCP_ULP, testing without tls\n"); + } + + if (!self->notls) { + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + ASSERT_EQ(ret, 0); + } + + self->cfd = accept(sfd, &addr, &len); + ASSERT_GE(self->cfd, 0); + + if (!self->notls) { + ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", + sizeof("tls")); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + ASSERT_EQ(ret, 0); + } + + close(sfd); +} + +FIXTURE_TEARDOWN(tls) +{ + close(self->fd); + close(self->cfd); +} + +TEST_F(tls, sendfile) +{ + int filefd = open("/proc/self/exe", O_RDONLY); + struct stat st; + + EXPECT_GE(filefd, 0); + fstat(filefd, &st); + EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0); +} + +TEST_F(tls, send_then_sendfile) +{ + int filefd = open("/proc/self/exe", O_RDONLY); + char const *test_str = "test_send"; + int to_send = strlen(test_str) + 1; + char recv_buf[10]; + struct stat st; + char *buf; + + EXPECT_GE(filefd, 0); + fstat(filefd, &st); + buf = (char *)malloc(st.st_size); + + EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send); + EXPECT_EQ(recv(self->cfd, recv_buf, to_send, 0), to_send); + EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0); + + EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0); + EXPECT_EQ(recv(self->cfd, buf, st.st_size, 0), st.st_size); +} + +TEST_F(tls, recv_max) +{ + unsigned int send_len = TLS_PAYLOAD_MAX_LEN; + char recv_mem[TLS_PAYLOAD_MAX_LEN]; + char buf[TLS_PAYLOAD_MAX_LEN]; + + EXPECT_GE(send(self->fd, buf, send_len, 0), 0); + EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1); + EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0); +} + +TEST_F(tls, recv_small) +{ + char const *test_str = "test_read"; + int send_len = 10; + char buf[10]; + + send_len = strlen(test_str) + 1; + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_F(tls, msg_more) +{ + char const *test_str = "test_read"; + int send_len = 10; + char buf[10 * 2]; + + EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1); + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_DONTWAIT), + send_len * 2); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_F(tls, sendmsg_single) +{ + struct msghdr msg; + + char const *test_str = "test_sendmsg"; + size_t send_len = 13; + struct iovec vec; + char buf[13]; + + vec.iov_base = (char *)test_str; + vec.iov_len = send_len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_F(tls, sendmsg_large) +{ + void *mem = malloc(16384); + size_t send_len = 16384; + size_t sends = 128; + struct msghdr msg; + size_t recvs = 0; + size_t sent = 0; + + memset(&msg, 0, sizeof(struct msghdr)); + while (sent++ < sends) { + struct iovec vec = { (void *)mem, send_len }; + + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len); + } + + while (recvs++ < sends) + EXPECT_NE(recv(self->fd, mem, send_len, 0), -1); + + free(mem); +} + +TEST_F(tls, sendmsg_multiple) +{ + char const *test_str = "test_sendmsg_multiple"; + struct iovec vec[5]; + char *test_strs[5]; + struct msghdr msg; + int total_len = 0; + int len_cmp = 0; + int iov_len = 5; + char *buf; + int i; + + memset(&msg, 0, sizeof(struct msghdr)); + for (i = 0; i < iov_len; i++) { + test_strs[i] = (char *)malloc(strlen(test_str) + 1); + snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str); + vec[i].iov_base = (void *)test_strs[i]; + vec[i].iov_len = strlen(test_strs[i]) + 1; + total_len += vec[i].iov_len; + } + msg.msg_iov = vec; + msg.msg_iovlen = iov_len; + + EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len); + buf = malloc(total_len); + EXPECT_NE(recv(self->fd, buf, total_len, 0), -1); + for (i = 0; i < iov_len; i++) { + EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp, + strlen(test_strs[i])), + 0); + len_cmp += strlen(buf + len_cmp) + 1; + } + for (i = 0; i < iov_len; i++) + free(test_strs[i]); + free(buf); +} + +TEST_F(tls, sendmsg_multiple_stress) +{ + char const *test_str = "abcdefghijklmno"; + struct iovec vec[1024]; + char *test_strs[1024]; + int iov_len = 1024; + int total_len = 0; + char buf[1 << 14]; + struct msghdr msg; + int len_cmp = 0; + int i; + + memset(&msg, 0, sizeof(struct msghdr)); + for (i = 0; i < iov_len; i++) { + test_strs[i] = (char *)malloc(strlen(test_str) + 1); + snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str); + vec[i].iov_base = (void *)test_strs[i]; + vec[i].iov_len = strlen(test_strs[i]) + 1; + total_len += vec[i].iov_len; + } + msg.msg_iov = vec; + msg.msg_iovlen = iov_len; + + EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len); + EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1); + + for (i = 0; i < iov_len; i++) + len_cmp += strlen(buf + len_cmp) + 1; + + for (i = 0; i < iov_len; i++) + free(test_strs[i]); +} + +TEST_F(tls, splice_from_pipe) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_GE(write(p[1], mem_send, send_len), 0); + EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0); + EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, splice_from_pipe2) +{ + int send_len = 16000; + char mem_send[16000]; + char mem_recv[16000]; + int p2[2]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + ASSERT_GE(pipe(p2), 0); + EXPECT_GE(write(p[1], mem_send, 8000), 0); + EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0); + EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0); + EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0); + EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, send_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + char const *test_str = "test_read"; + int send_len2 = 10; + char buf[10]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2); + EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len2), 0); + + EXPECT_GE(write(p[1], mem_send, send_len), send_len); + EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len); + + EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, splice_to_pipe) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0); + EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0); + EXPECT_GE(read(p[0], mem_recv, send_len), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, recvmsg_single) +{ + char const *test_str = "test_recvmsg_single"; + int send_len = strlen(test_str) + 1; + char buf[20]; + struct msghdr hdr; + struct iovec vec; + + memset(&hdr, 0, sizeof(hdr)); + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + vec.iov_base = (char *)buf; + vec.iov_len = send_len; + hdr.msg_iovlen = 1; + hdr.msg_iov = &vec; + EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recvmsg_single_max) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char send_mem[TLS_PAYLOAD_MAX_LEN]; + char recv_mem[TLS_PAYLOAD_MAX_LEN]; + struct iovec vec; + struct msghdr hdr; + + EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len); + vec.iov_base = (char *)recv_mem; + vec.iov_len = TLS_PAYLOAD_MAX_LEN; + + hdr.msg_iovlen = 1; + hdr.msg_iov = &vec; + EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1); + EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0); +} + +TEST_F(tls, recvmsg_multiple) +{ + unsigned int msg_iovlen = 1024; + unsigned int len_compared = 0; + struct iovec vec[1024]; + char *iov_base[1024]; + unsigned int iov_len = 16; + int send_len = 1 << 14; + char buf[1 << 14]; + struct msghdr hdr; + int i; + + EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len); + for (i = 0; i < msg_iovlen; i++) { + iov_base[i] = (char *)malloc(iov_len); + vec[i].iov_base = iov_base[i]; + vec[i].iov_len = iov_len; + } + + hdr.msg_iovlen = msg_iovlen; + hdr.msg_iov = vec; + EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1); + for (i = 0; i < msg_iovlen; i++) + len_compared += iov_len; + + for (i = 0; i < msg_iovlen; i++) + free(iov_base[i]); +} + +TEST_F(tls, single_send_multiple_recv) +{ + unsigned int total_len = TLS_PAYLOAD_MAX_LEN * 2; + unsigned int send_len = TLS_PAYLOAD_MAX_LEN; + char send_mem[TLS_PAYLOAD_MAX_LEN * 2]; + char recv_mem[TLS_PAYLOAD_MAX_LEN * 2]; + + EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0); + memset(recv_mem, 0, total_len); + + EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1); + EXPECT_NE(recv(self->cfd, recv_mem + send_len, send_len, 0), -1); + EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0); +} + +TEST_F(tls, multiple_send_single_recv) +{ + unsigned int total_len = 2 * 10; + unsigned int send_len = 10; + char recv_mem[2 * 10]; + char send_mem[10]; + + EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0); + EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0); + memset(recv_mem, 0, total_len); + EXPECT_EQ(recv(self->cfd, recv_mem, total_len, 0), total_len); + + EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0); + EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0); +} + +TEST_F(tls, recv_partial) +{ + char const *test_str = "test_read_partial"; + char const *test_str_first = "test_read"; + char const *test_str_second = "_partial"; + int send_len = strlen(test_str) + 1; + char recv_mem[18]; + + memset(recv_mem, 0, sizeof(recv_mem)); + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1); + EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0); + memset(recv_mem, 0, sizeof(recv_mem)); + EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1); + EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)), + 0); +} + +TEST_F(tls, recv_nonblock) +{ + char buf[4096]; + bool err; + + EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1); + err = (errno == EAGAIN || errno == EWOULDBLOCK); + EXPECT_EQ(err, true); +} + +TEST_F(tls, recv_peek) +{ + char const *test_str = "test_read_peek"; + int send_len = strlen(test_str) + 1; + char buf[15]; + + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); + memset(buf, 0, sizeof(buf)); + EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recv_peek_multiple) +{ + char const *test_str = "test_read_peek"; + int send_len = strlen(test_str) + 1; + unsigned int num_peeks = 100; + char buf[15]; + int i; + + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + for (i = 0; i < num_peeks; i++) { + EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); + memset(buf, 0, sizeof(buf)); + } + EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, pollin) +{ + char const *test_str = "test_poll"; + struct pollfd fd = { 0, 0, 0 }; + char buf[10]; + int send_len = 10; + + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + fd.fd = self->cfd; + fd.events = POLLIN; + + EXPECT_EQ(poll(&fd, 1, 20), 1); + EXPECT_EQ(fd.revents & POLLIN, 1); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); + /* Test timing out */ + EXPECT_EQ(poll(&fd, 1, 20), 0); +} + +TEST_F(tls, poll_wait) +{ + char const *test_str = "test_poll_wait"; + int send_len = strlen(test_str) + 1; + struct pollfd fd = { 0, 0, 0 }; + char recv_mem[15]; + + fd.fd = self->cfd; + fd.events = POLLIN; + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + /* Set timeout to inf. secs */ + EXPECT_EQ(poll(&fd, 1, -1), 1); + EXPECT_EQ(fd.revents & POLLIN, 1); + EXPECT_EQ(recv(self->cfd, recv_mem, send_len, 0), send_len); +} + +TEST_F(tls, blocking) +{ + size_t data = 100000; + int res = fork(); + + EXPECT_NE(res, -1); + + if (res) { + /* parent */ + size_t left = data; + char buf[16384]; + int status; + int pid2; + + while (left) { + int res = send(self->fd, buf, + left > 16384 ? 16384 : left, 0); + + EXPECT_GE(res, 0); + left -= res; + } + + pid2 = wait(&status); + EXPECT_EQ(status, 0); + EXPECT_EQ(res, pid2); + } else { + /* child */ + size_t left = data; + char buf[16384]; + + while (left) { + int res = recv(self->cfd, buf, + left > 16384 ? 16384 : left, 0); + + EXPECT_GE(res, 0); + left -= res; + } + } +} + +TEST_F(tls, nonblocking) +{ + size_t data = 100000; + int sendbuf = 100; + int flags; + int res; + + flags = fcntl(self->fd, F_GETFL, 0); + fcntl(self->fd, F_SETFL, flags | O_NONBLOCK); + fcntl(self->cfd, F_SETFL, flags | O_NONBLOCK); + + /* Ensure nonblocking behavior by imposing a small send + * buffer. + */ + EXPECT_EQ(setsockopt(self->fd, SOL_SOCKET, SO_SNDBUF, + &sendbuf, sizeof(sendbuf)), 0); + + res = fork(); + EXPECT_NE(res, -1); + + if (res) { + /* parent */ + bool eagain = false; + size_t left = data; + char buf[16384]; + int status; + int pid2; + + while (left) { + int res = send(self->fd, buf, + left > 16384 ? 16384 : left, 0); + + if (res == -1 && errno == EAGAIN) { + eagain = true; + usleep(10000); + continue; + } + EXPECT_GE(res, 0); + left -= res; + } + + EXPECT_TRUE(eagain); + pid2 = wait(&status); + + EXPECT_EQ(status, 0); + EXPECT_EQ(res, pid2); + } else { + /* child */ + bool eagain = false; + size_t left = data; + char buf[16384]; + + while (left) { + int res = recv(self->cfd, buf, + left > 16384 ? 16384 : left, 0); + + if (res == -1 && errno == EAGAIN) { + eagain = true; + usleep(10000); + continue; + } + EXPECT_GE(res, 0); + left -= res; + } + EXPECT_TRUE(eagain); + } +} + +TEST_F(tls, control_msg) +{ + if (self->notls) + return; + + char cbuf[CMSG_SPACE(sizeof(char))]; + char const *test_str = "test_read"; + int cmsg_len = sizeof(char); + char record_type = 100; + struct cmsghdr *cmsg; + struct msghdr msg; + int send_len = 10; + struct iovec vec; + char buf[10]; + + vec.iov_base = (char *)test_str; + vec.iov_len = 10; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_TLS; + /* test sending non-record types. */ + cmsg->cmsg_type = TLS_SET_RECORD_TYPE; + cmsg->cmsg_len = CMSG_LEN(cmsg_len); + *CMSG_DATA(cmsg) = record_type; + msg.msg_controllen = cmsg->cmsg_len; + + EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + /* Should fail because we didn't provide a control message */ + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + + vec.iov_base = buf; + EXPECT_EQ(recvmsg(self->cfd, &msg, 0), send_len); + cmsg = CMSG_FIRSTHDR(&msg); + EXPECT_NE(cmsg, NULL); + EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); + EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); + record_type = *((unsigned char *)CMSG_DATA(cmsg)); + EXPECT_EQ(record_type, 100); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json index 3a2f51fc7fd4..a022792d392a 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json @@ -336,6 +336,30 @@ ] }, { + "id": "b10b", + "name": "Add all 7 csum actions", + "category": [ + "actions", + "csum" + ], + "setup": [ + [ + "$TC actions flush action csum", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action csum icmp ip4h sctp igmp udplite udp tcp index 7", + "expExitCode": "0", + "verifyCmd": "$TC actions get action csum index 7", + "matchPattern": "action order [0-9]*: csum \\(iph, icmp, igmp, tcp, udp, udplite, sctp\\).*index 7 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action csum" + ] + }, + { "id": "ce92", "name": "Add csum udp action with cookie", "category": [ diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json new file mode 100644 index 000000000000..10b2d894e436 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json @@ -0,0 +1,917 @@ +[ + { + "id": "2b11", + "name": "Add tunnel_key set action with mandatory parameters", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1", + "expExitCode": "0", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "dc6b", + "name": "Add tunnel_key set action with missing mandatory src_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set dst_ip 20.20.20.2 id 100", + "expExitCode": "255", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key set.*dst_ip 20.20.20.2.*key_id 100", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "7f25", + "name": "Add tunnel_key set action with missing mandatory dst_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 id 100", + "expExitCode": "255", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*key_id 100", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "ba4e", + "name": "Add tunnel_key set action with missing mandatory id parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2", + "expExitCode": "255", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "a5e0", + "name": "Add tunnel_key set action with invalid src_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 300.168.100.1 dst_ip 192.168.200.1 id 7 index 1", + "expExitCode": "1", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 300.168.100.1.*dst_ip 192.168.200.1.*key_id 7.*index 1 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "eaa8", + "name": "Add tunnel_key set action with invalid dst_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.800.1 id 10 index 11", + "expExitCode": "1", + "verifyCmd": "$TC actions get action tunnel_key index 11", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 192.168.100.1.*dst_ip 192.168.800.1.*key_id 10.*index 11 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "3b09", + "name": "Add tunnel_key set action with invalid id parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 112233445566778899 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 112233445566778899.*index 1 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "9625", + "name": "Add tunnel_key set action with invalid dst_port parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 dst_port 998877 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 11.*dst_port 998877.*index 1 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "05af", + "name": "Add tunnel_key set action with optional dst_port parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.200.1 id 789 dst_port 4000 index 10", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 10", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.100.1.*dst_ip 192.168.200.1.*key_id 789.*dst_port 4000.*index 10 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "da80", + "name": "Add tunnel_key set action with index at 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 4294967295", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*id 11.*index 4294967295 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "d407", + "name": "Add tunnel_key set action with index exceeding 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295678", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 4294967295678", + "matchPattern": "action order [0-9]+: tunnel_key set.*index 4294967295678 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "5cba", + "name": "Add tunnel_key set action with id value at 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 4294967295 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 4294967295.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "e84a", + "name": "Add tunnel_key set action with id value exceeding 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42949672955 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 4294967295", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42949672955.*index 1", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "9c19", + "name": "Add tunnel_key set action with dst_port value at 16-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "3bd9", + "name": "Add tunnel_key set action with dst_port value exceeding 16-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535789 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535789.*index 1", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "68e2", + "name": "Add tunnel_key unset action", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key unset index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*unset.*index 1 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "6192", + "name": "Add tunnel_key unset continue action", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key unset continue index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*unset continue.*index 1 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "061d", + "name": "Add tunnel_key set continue action with cookie", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "8acb", + "name": "Add tunnel_key set continue action with invalid cookie", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "a07e", + "name": "Add tunnel_key action with no set/unset command specified", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "b227", + "name": "Add tunnel_key action with csum option", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 csum index 99", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 99", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*csum pipe.*index 99", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "58a7", + "name": "Add tunnel_key action with nocsum option", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7823 nocsum index 234", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 234", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7823.*nocsum pipe.*index 234", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "2575", + "name": "Add tunnel_key action with not-supported parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 foobar 999 index 4", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 4", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*foobar 999.*index 4", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "7a88", + "name": "Add tunnel_key action with cookie parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 4", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "4f20", + "name": "Add tunnel_key action with a single geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "e33d", + "name": "Add tunnel_key action with multiple geneve options parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "0778", + "name": "Add tunnel_key action with invalid class geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "4ae8", + "name": "Add tunnel_key action with invalid type geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "4039", + "name": "Add tunnel_key action with short data length geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "26a6", + "name": "Add tunnel_key action with non-multiple of 4 data length geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "f44d", + "name": "Add tunnel_key action with incomplete geneve options parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "7afc", + "name": "Replace tunnel_key set action with all parameters", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 csum id 1 index 1" + ], + "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 nocsum id 11 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*nocsum pipe.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "364d", + "name": "Replace tunnel_key set action with all parameters and cookie", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a" + ], + "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "937c", + "name": "Fetch all existing tunnel_key actions", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1", + "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 jump 10 index 2", + "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3", + "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4" + ], + "cmdUnderTest": "$TC actions list action tunnel_key", + "expExitCode": "0", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*nocsum pipe.*index 1.*set.*src_ip 11.10.10.1.*dst_ip 21.20.20.2.*key_id 2.*dst_port 3129.*csum jump 10.*index 2.*set.*src_ip 12.10.10.1.*dst_ip 22.20.20.2.*key_id 3.*dst_port 3130.*csum pass.*index 3.*set.*src_ip 13.10.10.1.*dst_ip 23.20.20.2.*key_id 4.*dst_port 3131.*nocsum continue.*index 4", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "6783", + "name": "Flush all existing tunnel_key actions", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1", + "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 reclassify index 2", + "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3", + "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4" + ], + "cmdUnderTest": "$TC actions flush action tunnel_key", + "expExitCode": "0", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+:.*", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + } +] diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json new file mode 100644 index 000000000000..3b97cfd7e0f8 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json @@ -0,0 +1,1049 @@ +[ + { + "id": "901f", + "name": "Add fw filter with prio at 32-bit maxixum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65535 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65535 protocol all fw", + "matchPattern": "pref 65535 fw.*handle 0x1.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "51e2", + "name": "Add fw filter with prio exceeding 32-bit maxixum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65536 fw action ok", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65536 protocol all fw", + "matchPattern": "pref 65536 fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "d987", + "name": "Add fw filter with action ok", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "affe", + "name": "Add fw filter with action continue", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action continue", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action continue", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "28bc", + "name": "Add fw filter with action pipe", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pipe", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8da2", + "name": "Add fw filter with action drop", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action drop", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol all prio 1 fw", + "matchPattern": "handle 0x1.*gact action drop", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9436", + "name": "Add fw filter with action reclassify", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action reclassify", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action reclassify", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "95bb", + "name": "Add fw filter with action jump 10", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action jump 10", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action jump 10", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "3d74", + "name": "Add fw filter with action goto chain 5", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action goto chain 5", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action goto chain 5", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "eb8f", + "name": "Add fw filter with invalid action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pump", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action pump", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6a79", + "name": "Add fw filter with missing mandatory action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "filter protocol all pref [0-9]+ fw.*handle 0x1", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8298", + "name": "Add fw filter with cookie", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "a88c", + "name": "Add fw filter with invalid cookie", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action continue cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action continue.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "10f6", + "name": "Add fw filter with handle in hex", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa1b2ff.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9d51", + "name": "Add fw filter with handle at 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967295 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xffffffff.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "d939", + "name": "Add fw filter with handle exceeding 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296 prio 1 fw action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967296 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "658c", + "name": "Add fw filter with mask in hex", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/0xa1b2f prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa/0xa1b2f", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "86be", + "name": "Add fw filter with mask at 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967295 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa[^/]", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "e635", + "name": "Add fw filter with mask exceeding 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967296 prio 1 fw action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6cab", + "name": "Add fw filter with handle/mask in hex", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2cdff/0x1a2bffdc prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2cdff prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa1b2cdff/0x1a2bffdc", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8700", + "name": "Add fw filter with handle/mask at 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295/4294967295 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xffffffff prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xffffffff[^/]", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "7d62", + "name": "Add fw filter with handle/mask exceeding 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296/4294967296 prio 1 fw action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "7b69", + "name": "Add fw filter with missing mandatory handle", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 1 fw action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "filter protocol all.*fw.*handle.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "d68b", + "name": "Add fw filter with invalid parent", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent aa11b1b2: handle 1 prio 1 fw action ok", + "expExitCode": "255", + "verifyCmd": "$TC filter dev $DEV1 parent aa11b1b2: handle 1 prio 1 protocol all fw", + "matchPattern": "filter protocol all pref 1 fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "66e0", + "name": "Add fw filter with missing mandatory parent id", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 handle 1 prio 1 fw action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "pref [0-9]+ fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "0ff3", + "name": "Add fw filter with classid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 3 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x1 classid :3.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9849", + "name": "Add fw filter with classid at root", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid ffff:ffff action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "pref 1 fw.*handle 0x1 classid root.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "b7ff", + "name": "Add fw filter with classid - keeps last 8 (hex) digits", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 98765fedcb action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x1 classid 765f:edcb.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "2b18", + "name": "Add fw filter with invalid classid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 6789defg action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x1 classid 6789:defg.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "fade", + "name": "Add fw filter with flowid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10 prio 1 fw flowid 1:10 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xa classid 1:10.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "33af", + "name": "Add fw filter with flowid then classid (same arg, takes second)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw flowid 10 classid 4 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :4.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8a8c", + "name": "Add fw filter with classid then flowid (same arg, takes second)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw classid 4 flowid 10 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :10.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "b50d", + "name": "Add fw filter with handle val/mask and flowid 10:1000", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 10/0xff fw flowid 10:1000 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 3 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0xa/0xff classid 10:1000.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "7207", + "name": "Add fw filter with protocol ip", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 handle 3 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 1 protocol ip fw", + "matchPattern": "filter parent ffff: protocol ip pref 1 fw.*handle 0x3.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "306d", + "name": "Add fw filter with protocol ipv6", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ipv6 prio 2 handle 4 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol ipv6 fw", + "matchPattern": "filter parent ffff: protocol ipv6 pref 2 fw.*handle 0x4.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9a78", + "name": "Add fw filter with protocol arp", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol arp prio 5 handle 7 fw action drop", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 7 prio 5 protocol arp fw", + "matchPattern": "filter parent ffff: protocol arp pref 5 fw.*handle 0x7.*gact action drop.*index [0-9]+ ref [0-9]+ bind [0-9]+", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "1821", + "name": "Add fw filter with protocol 802_3", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol 802_3 handle 1 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol 802_3 fw", + "matchPattern": "filter parent ffff: protocol 802_3 pref 1 fw.*handle 0x1.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "2260", + "name": "Add fw filter with invalid protocol", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol igmp handle 1 prio 1 fw action ok", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol igmp fw", + "matchPattern": "filter parent ffff: protocol igmp pref 1 fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "09d7", + "name": "Add fw filters protocol 802_3 and ip with conflicting priorities", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: protocol 802_3 prio 3 handle 7 fw action ok" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 3 handle 8 fw action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 8 prio 3 protocol ip fw", + "matchPattern": "filter parent ffff: protocol ip pref 3 fw.*handle 0x8", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6973", + "name": "Add fw filters with same index, same action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: prio 6 handle 2 fw action continue index 5" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 8 handle 4 fw action continue index 5", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 8 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 8 fw.*handle 0x4.*gact action continue.*index 5 ref 2 bind 2", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "fc06", + "name": "Add fw filters with action police", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 1kbit burst 10k index 5", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x5 rate 1Kbit burst 10Kb mtu 2Kb action reclassify overhead 0b.*ref 1 bind 1", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "aac7", + "name": "Add fw filters with action police linklayer atm", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 2mbit burst 200k linklayer atm index 8", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b linklayer atm.*ref 1 bind 1", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "5339", + "name": "Del entire fw filter", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff:", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "protocol all pref.*handle.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "0e99", + "name": "Del single fw filter x1", + "__comment__": "First of two tests to check that one filter is there and the other isn't", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "protocol all pref 7.*handle 0x5.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "f54c", + "name": "Del single fw filter x2", + "__comment__": "Second of two tests to check that one filter is there and the other isn't", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "protocol all pref 9.*handle 0x3.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "ba94", + "name": "Del fw filter by prio", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 4 fw action ok", + "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action ok" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: prio 4", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 4 fw.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "4acb", + "name": "Del fw filter by chain", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 chain 13 fw action pipe", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 5 chain 13 fw action pipe" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: chain 13", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "fw chain 13 handle.*gact action pipe", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "3424", + "name": "Del fw filter by action (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action drop" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw action drop", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 2 prio 4 protocol all fw", + "matchPattern": "handle 0x2.*gact action drop", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "da89", + "name": "Del fw filter by handle (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 4 fw action continue" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 fw", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 4 protocol all fw", + "matchPattern": "handle 0x3.*gact action continue", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "4d95", + "name": "Del fw filter by protocol (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw action pipe" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: protocol arp fw", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw", + "matchPattern": "filter parent ffff: protocol arp.*handle 0x4.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "4736", + "name": "Del fw filter by flowid (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 fw action pipe flowid 45" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw flowid 45", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "handle 0x4.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "3dcb", + "name": "Replace fw filter action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "eb4d", + "name": "Replace fw filter classid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe classid 2", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 2 fw.*handle 0x1 classid :2.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "67ec", + "name": "Replace fw filter index", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 3" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 16", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action pass.*index 16", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + } +] |