diff options
186 files changed, 16447 insertions, 5580 deletions
diff --git a/Documentation/devicetree/bindings/net/adi,adin1110.yaml b/Documentation/devicetree/bindings/net/adi,adin1110.yaml index b6bd8ee38a18..9de865295d7a 100644 --- a/Documentation/devicetree/bindings/net/adi,adin1110.yaml +++ b/Documentation/devicetree/bindings/net/adi,adin1110.yaml @@ -46,6 +46,10 @@ properties: interrupts: maxItems: 1 + reset-gpios: + maxItems: 1 + description: GPIO connected to active low reset + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml b/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml index 7ca9c19a157c..0a0d62b6c00e 100644 --- a/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml +++ b/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml @@ -74,10 +74,10 @@ properties: properties: pcs-handle: + maxItems: 1 description: phandle pointing to a PCS sub-node compatible with renesas,rzn1-miic.yaml# - $ref: /schemas/types.yaml#/definitions/phandle unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index 4b3c590fcebf..3aef506fa158 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -108,11 +108,17 @@ properties: $ref: "#/properties/phy-connection-type" pcs-handle: - $ref: /schemas/types.yaml#/definitions/phandle + $ref: /schemas/types.yaml#/definitions/phandle-array + items: + maxItems: 1 description: Specifies a reference to a node representing a PCS PHY device on a MDIO bus to link with an external PHY (phy-handle) if exists. + pcs-handle-names: + description: + The name of each PCS in pcs-handle. + phy-handle: $ref: /schemas/types.yaml#/definitions/phandle description: @@ -216,6 +222,9 @@ properties: required: - speed +dependencies: + pcs-handle-names: [pcs-handle] + allOf: - if: properties: diff --git a/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml index 3a35ac1c260d..c80c880a9dab 100644 --- a/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml +++ b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml @@ -85,9 +85,39 @@ properties: $ref: /schemas/types.yaml#/definitions/phandle description: A reference to the IEEE1588 timer + phys: + description: A reference to the SerDes lane(s) + maxItems: 1 + + phy-names: + items: + - const: serdes + pcsphy-handle: - $ref: /schemas/types.yaml#/definitions/phandle - description: A reference to the PCS (typically found on the SerDes) + $ref: /schemas/types.yaml#/definitions/phandle-array + minItems: 1 + maxItems: 3 + deprecated: true + description: See pcs-handle. + + pcs-handle: + minItems: 1 + maxItems: 3 + description: | + A reference to the various PCSs (typically found on the SerDes). If + pcs-handle-names is absent, and phy-connection-type is "xgmii", then the first + reference will be assumed to be for "xfi". Otherwise, if pcs-handle-names is + absent, then the first reference will be assumed to be for "sgmii". + + pcs-handle-names: + minItems: 1 + maxItems: 3 + items: + enum: + - sgmii + - qsgmii + - xfi + description: The type of each PCS in pcsphy-handle. tbi-handle: $ref: /schemas/types.yaml#/definitions/phandle @@ -100,6 +130,10 @@ required: - fsl,fman-ports - ptp-timer +dependencies: + pcs-handle-names: + - pcs-handle + allOf: - $ref: ethernet-controller.yaml# - if: @@ -110,14 +144,6 @@ allOf: then: required: - tbi-handle - - if: - properties: - compatible: - contains: - const: fsl,fman-memac - then: - required: - - pcsphy-handle unevaluatedProperties: false @@ -138,8 +164,9 @@ examples: reg = <0xe8000 0x1000>; fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy4>; - phy-handle = <&sgmii_phy1>; - phy-connection-type = "sgmii"; + pcs-handle = <&pcsphy4>, <&qsgmiib_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; + phys = <&serdes1 1>; + phy-names = "serdes"; }; ... diff --git a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml index 7f620a71a972..600240281e8c 100644 --- a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml +++ b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml @@ -31,7 +31,7 @@ properties: phy-mode: true pcs-handle: - $ref: /schemas/types.yaml#/definitions/phandle + maxItems: 1 description: A reference to a node representing a PCS PHY device found on the internal MDIO bus. diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt index b9055335db3b..bda4b41af074 100644 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt @@ -320,8 +320,9 @@ For internal PHY device on internal mdio bus, a PHY node should be created. See the definition of the PHY node in booting-without-of.txt for an example of how to define a PHY (Internal PHY has no interrupt line). - For "fsl,fman-mdio" compatible internal mdio bus, the PHY is TBI PHY. -- For "fsl,fman-memac-mdio" compatible internal mdio bus, the PHY is PCS PHY, - PCS PHY addr must be '0'. +- For "fsl,fman-memac-mdio" compatible internal mdio bus, the PHY is PCS PHY. + The PCS PHY address should correspond to the value of the appropriate + MDEV_PORT. EXAMPLE diff --git a/Documentation/devicetree/bindings/net/marvell,pp2.yaml b/Documentation/devicetree/bindings/net/marvell,pp2.yaml new file mode 100644 index 000000000000..4eadafc43d4f --- /dev/null +++ b/Documentation/devicetree/bindings/net/marvell,pp2.yaml @@ -0,0 +1,305 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/marvell,pp2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Marvell CN913X / Marvell Armada 375, 7K, 8K Ethernet Controller + +maintainers: + - Marcin Wojtas <mw@semihalf.com> + - Russell King <linux@armlinux.org> + +description: | + Marvell Armada 375 Ethernet Controller (PPv2.1) + Marvell Armada 7K/8K Ethernet Controller (PPv2.2) + Marvell CN913X Ethernet Controller (PPv2.3) + +properties: + compatible: + enum: + - marvell,armada-375-pp2 + - marvell,armada-7k-pp22 + + reg: + minItems: 3 + maxItems: 4 + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + clocks: + minItems: 2 + items: + - description: main controller clock + - description: GOP clock + - description: MG clock + - description: MG Core clock + - description: AXI clock + + clock-names: + minItems: 2 + items: + - const: pp_clk + - const: gop_clk + - const: mg_clk + - const: mg_core_clk + - const: axi_clk + + dma-coherent: true + + marvell,system-controller: + $ref: /schemas/types.yaml#/definitions/phandle + description: a phandle to the system controller. + +patternProperties: + '^(ethernet-)?port@[0-2]$': + type: object + description: subnode for each ethernet port. + $ref: ethernet-controller.yaml# + unevaluatedProperties: false + + properties: + reg: + description: ID of the port from the MAC point of view. + maximum: 2 + + interrupts: + minItems: 1 + maxItems: 10 + description: interrupt(s) for the port + + interrupt-names: + minItems: 1 + items: + - const: hif0 + - const: hif1 + - const: hif2 + - const: hif3 + - const: hif4 + - const: hif5 + - const: hif6 + - const: hif7 + - const: hif8 + - const: link + + description: > + if more than a single interrupt for is given, must be the + name associated to the interrupts listed. Valid names are: + "hifX", with X in [0..8], and "link". The names "tx-cpu0", + "tx-cpu1", "tx-cpu2", "tx-cpu3" and "rx-shared" are supported + for backward compatibility but shouldn't be used for new + additions. + + phys: + minItems: 1 + maxItems: 2 + description: > + Generic PHY, providing SerDes connectivity. For most modes, + one lane is sufficient, but some (e.g. RXAUI) may require two. + + phy-mode: + enum: + - gmii + - sgmii + - rgmii-id + - 1000base-x + - 2500base-x + - 5gbase-r + - rxaui + - 10gbase-r + + port-id: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: > + ID of the port from the MAC point of view. + Legacy binding for backward compatibility. + + marvell,loopback: + $ref: /schemas/types.yaml#/definitions/flag + description: port is loopback mode. + + gop-port-id: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + only for marvell,armada-7k-pp22, ID of the port from the + GOP (Group Of Ports) point of view. This ID is used to index the + per-port registers in the second register area. + + required: + - reg + - interrupts + - phy-mode + - port-id + +required: + - compatible + - reg + - clocks + - clock-names + +allOf: + - if: + properties: + compatible: + const: marvell,armada-7k-pp22 + then: + properties: + reg: + items: + - description: Packet Processor registers + - description: Networking interfaces registers + - description: CM3 address space used for TX Flow Control + + clocks: + minItems: 5 + + clock-names: + minItems: 5 + + patternProperties: + '^(ethernet-)?port@[0-2]$': + required: + - gop-port-id + + required: + - marvell,system-controller + else: + properties: + reg: + items: + - description: Packet Processor registers + - description: LMS registers + - description: Register area per eth0 + - description: Register area per eth1 + + clocks: + maxItems: 2 + + clock-names: + maxItems: 2 + + patternProperties: + '^(ethernet-)?port@[0-1]$': + properties: + reg: + maximum: 1 + + gop-port-id: false + +additionalProperties: false + +examples: + - | + // For Armada 375 variant + #include <dt-bindings/interrupt-controller/mvebu-icu.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + + ethernet@f0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "marvell,armada-375-pp2"; + reg = <0xf0000 0xa000>, + <0xc0000 0x3060>, + <0xc4000 0x100>, + <0xc5000 0x100>; + clocks = <&gateclk 3>, <&gateclk 19>; + clock-names = "pp_clk", "gop_clk"; + + ethernet-port@0 { + interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; + reg = <0>; + port-id = <0>; /* For backward compatibility. */ + phy = <&phy0>; + phy-mode = "rgmii-id"; + }; + + ethernet-port@1 { + interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; + reg = <1>; + port-id = <1>; /* For backward compatibility. */ + phy = <&phy3>; + phy-mode = "gmii"; + }; + }; + + - | + // For Armada 7k/8k and Cn913x variants + #include <dt-bindings/interrupt-controller/mvebu-icu.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + + ethernet@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "marvell,armada-7k-pp22"; + reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>; + clocks = <&cp0_clk 1 3>, <&cp0_clk 1 9>, + <&cp0_clk 1 5>, <&cp0_clk 1 6>, <&cp0_clk 1 18>; + clock-names = "pp_clk", "gop_clk", "mg_clk", "mg_core_clk", "axi_clk"; + marvell,system-controller = <&cp0_syscon0>; + + ethernet-port@0 { + interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 43 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 47 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 51 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 55 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 59 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 63 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 67 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 71 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 129 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", + "hif5", "hif6", "hif7", "hif8", "link"; + phy-mode = "10gbase-r"; + phys = <&cp0_comphy4 0>; + reg = <0>; + port-id = <0>; /* For backward compatibility. */ + gop-port-id = <0>; + }; + + ethernet-port@1 { + interrupts = <ICU_GRP_NSR 40 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 44 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 48 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 52 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 56 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 60 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 64 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 68 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 72 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 128 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", + "hif5", "hif6", "hif7", "hif8", "link"; + phy-mode = "rgmii-id"; + reg = <1>; + port-id = <1>; /* For backward compatibility. */ + gop-port-id = <2>; + }; + + ethernet-port@2 { + interrupts = <ICU_GRP_NSR 41 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 45 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 49 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 53 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 57 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 61 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 65 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 69 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 73 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 127 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", + "hif5", "hif6", "hif7", "hif8", "link"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + phys = <&cp0_comphy5 2>; + sfp = <&sfp_eth3>; + reg = <2>; + port-id = <2>; /* For backward compatibility. */ + gop-port-id = <3>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt deleted file mode 100644 index ce15c173f43f..000000000000 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ /dev/null @@ -1,141 +0,0 @@ -* Marvell Armada 375 Ethernet Controller (PPv2.1) - Marvell Armada 7K/8K Ethernet Controller (PPv2.2) - Marvell CN913X Ethernet Controller (PPv2.3) - -Required properties: - -- compatible: should be one of: - "marvell,armada-375-pp2" - "marvell,armada-7k-pp2" -- reg: addresses and length of the register sets for the device. - For "marvell,armada-375-pp2", must contain the following register - sets: - - common controller registers - - LMS registers - - one register area per Ethernet port - For "marvell,armada-7k-pp2" used by 7K/8K and CN913X, must contain the following register - sets: - - packet processor registers - - networking interfaces registers - - CM3 address space used for TX Flow Control - -- clocks: pointers to the reference clocks for this device, consequently: - - main controller clock (for both armada-375-pp2 and armada-7k-pp2) - - GOP clock (for both armada-375-pp2 and armada-7k-pp2) - - MG clock (only for armada-7k-pp2) - - MG Core clock (only for armada-7k-pp2) - - AXI clock (only for armada-7k-pp2) -- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk", - "mg_core_clk" and "axi_clk" (the 3 latter only for armada-7k-pp2). - -The ethernet ports are represented by subnodes. At least one port is -required. - -Required properties (port): - -- interrupts: interrupt(s) for the port -- port-id: ID of the port from the MAC point of view -- gop-port-id: only for marvell,armada-7k-pp2, ID of the port from the - GOP (Group Of Ports) point of view. This ID is used to index the - per-port registers in the second register area. -- phy-mode: See ethernet.txt file in the same directory - -Optional properties (port): - -- marvell,loopback: port is loopback mode -- phy: a phandle to a phy node defining the PHY address (as the reg - property, a single integer). -- interrupt-names: if more than a single interrupt for is given, must be the - name associated to the interrupts listed. Valid names are: - "hifX", with X in [0..8], and "link". The names "tx-cpu0", - "tx-cpu1", "tx-cpu2", "tx-cpu3" and "rx-shared" are supported - for backward compatibility but shouldn't be used for new - additions. -- marvell,system-controller: a phandle to the system controller. - -Example for marvell,armada-375-pp2: - -ethernet@f0000 { - compatible = "marvell,armada-375-pp2"; - reg = <0xf0000 0xa000>, - <0xc0000 0x3060>, - <0xc4000 0x100>, - <0xc5000 0x100>; - clocks = <&gateclk 3>, <&gateclk 19>; - clock-names = "pp_clk", "gop_clk"; - - eth0: eth0@c4000 { - interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; - port-id = <0>; - phy = <&phy0>; - phy-mode = "gmii"; - }; - - eth1: eth1@c5000 { - interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; - port-id = <1>; - phy = <&phy3>; - phy-mode = "gmii"; - }; -}; - -Example for marvell,armada-7k-pp2: - -cpm_ethernet: ethernet@0 { - compatible = "marvell,armada-7k-pp22"; - reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>; - clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, - <&cpm_syscon0 1 5>, <&cpm_syscon0 1 6>, <&cpm_syscon0 1 18>; - clock-names = "pp_clk", "gop_clk", "mg_clk", "mg_core_clk", "axi_clk"; - - eth0: eth0 { - interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 43 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 47 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 51 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 55 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 59 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 63 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 67 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 71 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 129 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", - "hif5", "hif6", "hif7", "hif8", "link"; - port-id = <0>; - gop-port-id = <0>; - }; - - eth1: eth1 { - interrupts = <ICU_GRP_NSR 40 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 44 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 48 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 52 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 56 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 60 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 64 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 68 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 72 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 128 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", - "hif5", "hif6", "hif7", "hif8", "link"; - port-id = <1>; - gop-port-id = <2>; - }; - - eth2: eth2 { - interrupts = <ICU_GRP_NSR 41 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 45 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 49 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 53 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 57 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 61 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 65 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 69 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 73 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 127 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", - "hif5", "hif6", "hif7", "hif8", "link"; - port-id = <2>; - gop-port-id = <3>; - }; -}; diff --git a/Documentation/devicetree/bindings/net/pcs/fsl,lynx-pcs.yaml b/Documentation/devicetree/bindings/net/pcs/fsl,lynx-pcs.yaml new file mode 100644 index 000000000000..fbedf696c555 --- /dev/null +++ b/Documentation/devicetree/bindings/net/pcs/fsl,lynx-pcs.yaml @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/pcs/fsl,lynx-pcs.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP Lynx PCS + +maintainers: + - Ioana Ciornei <ioana.ciornei@nxp.com> + +description: | + NXP Lynx 10G and 28G SerDes have Ethernet PCS devices which can be used as + protocol controllers. They are accessible over the Ethernet interface's MDIO + bus. + +properties: + compatible: + const: fsl,lynx-pcs + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + mdio-bus { + #address-cells = <1>; + #size-cells = <0>; + + qsgmii_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <1>; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 3bb30c0d1cb4..657b223ed6b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2439,6 +2439,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported T: git git://github.com/microchip-ung/linux-upstream.git F: arch/arm64/boot/dts/microchip/ +F: drivers/net/ethernet/microchip/vcap/ F: drivers/pinctrl/pinctrl-microchip-sgpio.c N: sparx5 @@ -6326,6 +6327,7 @@ F: drivers/net/ethernet/freescale/dpaa2/Kconfig F: drivers/net/ethernet/freescale/dpaa2/Makefile F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth* F: drivers/net/ethernet/freescale/dpaa2/dpaa2-mac* +F: drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk* F: drivers/net/ethernet/freescale/dpaa2/dpkg.h F: drivers/net/ethernet/freescale/dpaa2/dpmac* F: drivers/net/ethernet/freescale/dpaa2/dpni* @@ -12311,7 +12313,7 @@ M: Marcin Wojtas <mw@semihalf.com> M: Russell King <linux@armlinux.org.uk> L: netdev@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/net/marvell-pp2.txt +F: Documentation/devicetree/bindings/net/marvell,pp2.yaml F: drivers/net/ethernet/marvell/mvpp2/ MARVELL MWIFIEX WIRELESS DRIVER diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index 929deaf312a5..9fbe0cfec48f 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -178,6 +178,8 @@ /* Network controller */ ethernet: ethernet@f0000 { + #address-cells = <1>; + #size-cells = <0>; compatible = "marvell,armada-375-pp2"; reg = <0xf0000 0xa000>, /* Packet Processor regs */ <0xc0000 0x3060>, /* LMS regs */ @@ -187,15 +189,17 @@ clock-names = "pp_clk", "gop_clk"; status = "disabled"; - eth0: eth0 { + eth0: ethernet-port@0 { interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; - port-id = <0>; + reg = <0>; + port-id = <0>; /* For backward compatibility. */ status = "disabled"; }; - eth1: eth1 { + eth1: ethernet-port@1 { interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; - port-id = <1>; + reg = <1>; + port-id = <1>; /* For backward compatibility. */ status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi index d237162a8744..5c4d7eef8b61 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi @@ -24,9 +24,12 @@ /* these aliases provide the FMan ports mapping */ enet0: ethernet@e0000 { + pcs-handle-names = "qsgmii"; }; enet1: ethernet@e2000 { + pcsphy-handle = <&pcsphy1>, <&qsgmiib_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; }; enet2: ethernet@e4000 { @@ -36,11 +39,32 @@ }; enet4: ethernet@e8000 { + pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs2>; + pcs-handle-names = "sgmii", "qsgmii"; }; enet5: ethernet@ea000 { + pcsphy-handle = <&pcsphy5>, <&qsgmiib_pcs3>; + pcs-handle-names = "sgmii", "qsgmii"; }; enet6: ethernet@f0000 { }; + + mdio@e1000 { + qsgmiib_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <0x1>; + }; + + qsgmiib_pcs2: ethernet-pcs@2 { + compatible = "fsl,lynx-pcs"; + reg = <0x2>; + }; + + qsgmiib_pcs3: ethernet-pcs@3 { + compatible = "fsl,lynx-pcs"; + reg = <0x3>; + }; + }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi index d6caaea57d90..4e3345093943 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi @@ -23,6 +23,8 @@ &fman0 { /* these aliases provide the FMan ports mapping */ enet0: ethernet@e0000 { + pcsphy-handle = <&qsgmiib_pcs3>; + pcs-handle-names = "qsgmii"; }; enet1: ethernet@e2000 { @@ -35,14 +37,37 @@ }; enet4: ethernet@e8000 { + pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; }; enet5: ethernet@ea000 { + pcsphy-handle = <&pcsphy5>, <&pcsphy5>; + pcs-handle-names = "sgmii", "qsgmii"; }; enet6: ethernet@f0000 { }; enet7: ethernet@f2000 { + pcsphy-handle = <&pcsphy7>, <&qsgmiib_pcs2>, <&pcsphy7>; + pcs-handle-names = "sgmii", "qsgmii", "xfi"; + }; + + mdio@eb000 { + qsgmiib_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <0x1>; + }; + + qsgmiib_pcs2: ethernet-pcs@2 { + compatible = "fsl,lynx-pcs"; + reg = <0x2>; + }; + + qsgmiib_pcs3: ethernet-pcs@3 { + compatible = "fsl,lynx-pcs"; + reg = <0x3>; + }; }; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi index d6c0990a267d..7d0043824f2a 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi @@ -58,6 +58,8 @@ ranges = <0x0 0x0 ADDRESSIFY(CP11X_BASE) 0x2000000>; CP11X_LABEL(ethernet): ethernet@0 { + #address-cells = <1>; + #size-cells = <0>; compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>; clocks = <&CP11X_LABEL(clk) 1 3>, <&CP11X_LABEL(clk) 1 9>, @@ -69,7 +71,7 @@ status = "disabled"; dma-coherent; - CP11X_LABEL(eth0): eth0 { + CP11X_LABEL(eth0): ethernet-port@0 { interrupts = <39 IRQ_TYPE_LEVEL_HIGH>, <43 IRQ_TYPE_LEVEL_HIGH>, <47 IRQ_TYPE_LEVEL_HIGH>, @@ -83,12 +85,13 @@ interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", "hif5", "hif6", "hif7", "hif8", "link"; - port-id = <0>; + reg = <0>; + port-id = <0>; /* For backward compatibility. */ gop-port-id = <0>; status = "disabled"; }; - CP11X_LABEL(eth1): eth1 { + CP11X_LABEL(eth1): ethernet-port@1 { interrupts = <40 IRQ_TYPE_LEVEL_HIGH>, <44 IRQ_TYPE_LEVEL_HIGH>, <48 IRQ_TYPE_LEVEL_HIGH>, @@ -102,12 +105,13 @@ interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", "hif5", "hif6", "hif7", "hif8", "link"; - port-id = <1>; + reg = <1>; + port-id = <1>; /* For backward compatibility. */ gop-port-id = <2>; status = "disabled"; }; - CP11X_LABEL(eth2): eth2 { + CP11X_LABEL(eth2): ethernet-port@2 { interrupts = <41 IRQ_TYPE_LEVEL_HIGH>, <45 IRQ_TYPE_LEVEL_HIGH>, <49 IRQ_TYPE_LEVEL_HIGH>, @@ -121,7 +125,8 @@ interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", "hif5", "hif6", "hif7", "hif8", "link"; - port-id = <2>; + reg = <2>; + port-id = <2>; /* For backward compatibility. */ gop-port-id = <3>; status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi index baa0c503e741..7e70977f282a 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi @@ -55,7 +55,8 @@ fman@400000 { reg = <0xe0000 0x1000>; fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy0>; + pcsphy-handle = <&pcsphy0>, <&pcsphy0>; + pcs-handle-names = "sgmii", "qsgmii"; }; mdio@e1000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi index 93095600e808..5f89f7c1761f 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi @@ -52,7 +52,15 @@ fman@400000 { compatible = "fsl,fman-memac"; reg = <0xf0000 0x1000>; fsl,fman-ports = <&fman0_rx_0x10 &fman0_tx_0x30>; - pcsphy-handle = <&pcsphy6>; + pcsphy-handle = <&pcsphy6>, <&qsgmiib_pcs2>, <&pcsphy6>; + pcs-handle-names = "sgmii", "qsgmii", "xfi"; + }; + + mdio@e9000 { + qsgmiib_pcs2: ethernet-pcs@2 { + compatible = "fsl,lynx-pcs"; + reg = <2>; + }; }; mdio@f1000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi index ff4bd38f0645..71eb75e82c2e 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi @@ -55,7 +55,15 @@ fman@400000 { reg = <0xe2000 0x1000>; fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy1>; + pcsphy-handle = <&pcsphy1>, <&qsgmiia_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e1000 { + qsgmiia_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <1>; + }; }; mdio@e3000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi index 1fa38ed6f59e..fb7032ddb7fc 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi @@ -52,7 +52,15 @@ fman@400000 { compatible = "fsl,fman-memac"; reg = <0xf2000 0x1000>; fsl,fman-ports = <&fman0_rx_0x11 &fman0_tx_0x31>; - pcsphy-handle = <&pcsphy7>; + pcsphy-handle = <&pcsphy7>, <&qsgmiib_pcs3>, <&pcsphy7>; + pcs-handle-names = "sgmii", "qsgmii", "xfi"; + }; + + mdio@e9000 { + qsgmiib_pcs3: ethernet-pcs@3 { + compatible = "fsl,lynx-pcs"; + reg = <3>; + }; }; mdio@f3000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi new file mode 100644 index 000000000000..6b3609574b0f --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later +/* + * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ] + * + * Copyright 2022 Sean Anderson <sean.anderson@seco.com> + * Copyright 2012 - 2015 Freescale Semiconductor Inc. + */ + +fman@400000 { + fman0_rx_0x08: port@88000 { + cell-index = <0x8>; + compatible = "fsl,fman-v3-port-rx"; + reg = <0x88000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x28: port@a8000 { + cell-index = <0x28>; + compatible = "fsl,fman-v3-port-tx"; + reg = <0xa8000 0x1000>; + fsl,fman-10g-port; + }; + + ethernet@e0000 { + cell-index = <0>; + compatible = "fsl,fman-memac"; + reg = <0xe0000 0x1000>; + fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; + ptp-timer = <&ptp_timer0>; + pcsphy-handle = <&pcsphy0>, <&pcsphy0>; + pcs-handle-names = "sgmii", "xfi"; + }; + + mdio@e1000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; + reg = <0xe1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ + + pcsphy0: ethernet-phy@0 { + reg = <0x0>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi new file mode 100644 index 000000000000..28ed1a85a436 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later +/* + * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ] + * + * Copyright 2022 Sean Anderson <sean.anderson@seco.com> + * Copyright 2012 - 2015 Freescale Semiconductor Inc. + */ + +fman@400000 { + fman0_rx_0x09: port@89000 { + cell-index = <0x9>; + compatible = "fsl,fman-v3-port-rx"; + reg = <0x89000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x29: port@a9000 { + cell-index = <0x29>; + compatible = "fsl,fman-v3-port-tx"; + reg = <0xa9000 0x1000>; + fsl,fman-10g-port; + }; + + ethernet@e2000 { + cell-index = <1>; + compatible = "fsl,fman-memac"; + reg = <0xe2000 0x1000>; + fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; + ptp-timer = <&ptp_timer0>; + pcsphy-handle = <&pcsphy1>, <&pcsphy1>; + pcs-handle-names = "sgmii", "xfi"; + }; + + mdio@e3000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; + reg = <0xe3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ + + pcsphy1: ethernet-phy@0 { + reg = <0x0>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi index a8cc9780c0c4..1089d6861bfb 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi @@ -51,7 +51,8 @@ fman@400000 { reg = <0xe0000 0x1000>; fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy0>; + pcsphy-handle = <&pcsphy0>, <&pcsphy0>; + pcs-handle-names = "sgmii", "qsgmii"; }; mdio@e1000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi index 8b8bd70c9382..a95bbb4fc827 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi @@ -51,7 +51,15 @@ fman@400000 { reg = <0xe2000 0x1000>; fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy1>; + pcsphy-handle = <&pcsphy1>, <&qsgmiia_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e1000 { + qsgmiia_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <1>; + }; }; mdio@e3000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi index 619c880b54d8..7d5af0147a25 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi @@ -51,7 +51,15 @@ fman@400000 { reg = <0xe4000 0x1000>; fsl,fman-ports = <&fman0_rx_0x0a &fman0_tx_0x2a>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy2>; + pcsphy-handle = <&pcsphy2>, <&qsgmiia_pcs2>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e1000 { + qsgmiia_pcs2: ethernet-pcs@2 { + compatible = "fsl,lynx-pcs"; + reg = <2>; + }; }; mdio@e5000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi index d7ebb73a400d..61e5466ec854 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi @@ -51,7 +51,15 @@ fman@400000 { reg = <0xe6000 0x1000>; fsl,fman-ports = <&fman0_rx_0x0b &fman0_tx_0x2b>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy3>; + pcsphy-handle = <&pcsphy3>, <&qsgmiia_pcs3>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e1000 { + qsgmiia_pcs3: ethernet-pcs@3 { + compatible = "fsl,lynx-pcs"; + reg = <3>; + }; }; mdio@e7000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi index b151d696a069..3ba0cdafc069 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi @@ -51,7 +51,8 @@ fman@400000 { reg = <0xe8000 0x1000>; fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy4>; + pcsphy-handle = <&pcsphy4>, <&pcsphy4>; + pcs-handle-names = "sgmii", "qsgmii"; }; mdio@e9000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi index adc0ae0013a3..51748de0a289 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi @@ -51,7 +51,15 @@ fman@400000 { reg = <0xea000 0x1000>; fsl,fman-ports = <&fman0_rx_0x0d &fman0_tx_0x2d>; ptp-timer = <&ptp_timer0>; - pcsphy-handle = <&pcsphy5>; + pcsphy-handle = <&pcsphy5>, <&qsgmiib_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e9000 { + qsgmiib_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <1>; + }; }; mdio@eb000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi index 435047e0e250..ee4f5170f632 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi @@ -52,7 +52,15 @@ fman@500000 { compatible = "fsl,fman-memac"; reg = <0xf0000 0x1000>; fsl,fman-ports = <&fman1_rx_0x10 &fman1_tx_0x30>; - pcsphy-handle = <&pcsphy14>; + pcsphy-handle = <&pcsphy14>, <&qsgmiid_pcs2>, <&pcsphy14>; + pcs-handle-names = "sgmii", "qsgmii", "xfi"; + }; + + mdio@e9000 { + qsgmiid_pcs2: ethernet-pcs@2 { + compatible = "fsl,lynx-pcs"; + reg = <2>; + }; }; mdio@f1000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi index c098657cca0a..83d2e0ce8f7b 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi @@ -52,7 +52,15 @@ fman@500000 { compatible = "fsl,fman-memac"; reg = <0xf2000 0x1000>; fsl,fman-ports = <&fman1_rx_0x11 &fman1_tx_0x31>; - pcsphy-handle = <&pcsphy15>; + pcsphy-handle = <&pcsphy15>, <&qsgmiid_pcs3>, <&pcsphy15>; + pcs-handle-names = "sgmii", "qsgmii", "xfi"; + }; + + mdio@e9000 { + qsgmiid_pcs3: ethernet-pcs@3 { + compatible = "fsl,lynx-pcs"; + reg = <3>; + }; }; mdio@f3000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi index 9d06824815f3..3132fc73f133 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi @@ -51,7 +51,8 @@ fman@500000 { reg = <0xe0000 0x1000>; fsl,fman-ports = <&fman1_rx_0x08 &fman1_tx_0x28>; ptp-timer = <&ptp_timer1>; - pcsphy-handle = <&pcsphy8>; + pcsphy-handle = <&pcsphy8>, <&pcsphy8>; + pcs-handle-names = "sgmii", "qsgmii"; }; mdio@e1000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi index 70e947730c4b..75e904d96602 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi @@ -51,7 +51,15 @@ fman@500000 { reg = <0xe2000 0x1000>; fsl,fman-ports = <&fman1_rx_0x09 &fman1_tx_0x29>; ptp-timer = <&ptp_timer1>; - pcsphy-handle = <&pcsphy9>; + pcsphy-handle = <&pcsphy9>, <&qsgmiic_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e1000 { + qsgmiic_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <1>; + }; }; mdio@e3000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi index ad96e6529595..69f2cc7b8f19 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi @@ -51,7 +51,15 @@ fman@500000 { reg = <0xe4000 0x1000>; fsl,fman-ports = <&fman1_rx_0x0a &fman1_tx_0x2a>; ptp-timer = <&ptp_timer1>; - pcsphy-handle = <&pcsphy10>; + pcsphy-handle = <&pcsphy10>, <&qsgmiic_pcs2>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e1000 { + qsgmiic_pcs2: ethernet-pcs@2 { + compatible = "fsl,lynx-pcs"; + reg = <2>; + }; }; mdio@e5000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi index 034bc4b71f7a..b3aaf01d7da0 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi @@ -51,7 +51,15 @@ fman@500000 { reg = <0xe6000 0x1000>; fsl,fman-ports = <&fman1_rx_0x0b &fman1_tx_0x2b>; ptp-timer = <&ptp_timer1>; - pcsphy-handle = <&pcsphy11>; + pcsphy-handle = <&pcsphy11>, <&qsgmiic_pcs3>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e1000 { + qsgmiic_pcs3: ethernet-pcs@3 { + compatible = "fsl,lynx-pcs"; + reg = <3>; + }; }; mdio@e7000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi index 93ca23d82b39..18e020432807 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi @@ -51,7 +51,8 @@ fman@500000 { reg = <0xe8000 0x1000>; fsl,fman-ports = <&fman1_rx_0x0c &fman1_tx_0x2c>; ptp-timer = <&ptp_timer1>; - pcsphy-handle = <&pcsphy12>; + pcsphy-handle = <&pcsphy12>, <&pcsphy12>; + pcs-handle-names = "sgmii", "qsgmii"; }; mdio@e9000 { diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi index 23b3117a2fd2..55f329d13f19 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi @@ -51,7 +51,15 @@ fman@500000 { reg = <0xea000 0x1000>; fsl,fman-ports = <&fman1_rx_0x0d &fman1_tx_0x2d>; ptp-timer = <&ptp_timer1>; - pcsphy-handle = <&pcsphy13>; + pcsphy-handle = <&pcsphy13>, <&qsgmiid_pcs1>; + pcs-handle-names = "sgmii", "qsgmii"; + }; + + mdio@e9000 { + qsgmiid_pcs1: ethernet-pcs@1 { + compatible = "fsl,lynx-pcs"; + reg = <1>; + }; }; mdio@eb000 { diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index ecbb447920bc..74e17e134387 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi @@ -609,8 +609,8 @@ /include/ "qoriq-bman1.dtsi" /include/ "qoriq-fman3-0.dtsi" -/include/ "qoriq-fman3-0-1g-0.dtsi" -/include/ "qoriq-fman3-0-1g-1.dtsi" +/include/ "qoriq-fman3-0-10g-2.dtsi" +/include/ "qoriq-fman3-0-10g-3.dtsi" /include/ "qoriq-fman3-0-1g-2.dtsi" /include/ "qoriq-fman3-0-1g-3.dtsi" /include/ "qoriq-fman3-0-1g-4.dtsi" diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 00127abd89ee..51afd6d0c05f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1239,8 +1239,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image /* speculation barrier */ case BPF_ST | BPF_NOSPEC: - if (boot_cpu_has(X86_FEATURE_XMM2)) - EMIT_LFENCE(); + EMIT_LFENCE(); break; /* ST: *(u8*)(dst_reg + off) = imm */ diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index e58a1e0cadd2..455b555275f1 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -75,6 +75,7 @@ enum ad_link_speed_type { AD_LINK_SPEED_100000MBPS, AD_LINK_SPEED_200000MBPS, AD_LINK_SPEED_400000MBPS, + AD_LINK_SPEED_800000MBPS, }; /* compare MAC addresses */ @@ -251,6 +252,7 @@ static inline int __check_agg_selection_timer(struct port *port) * %AD_LINK_SPEED_100000MBPS * %AD_LINK_SPEED_200000MBPS * %AD_LINK_SPEED_400000MBPS + * %AD_LINK_SPEED_800000MBPS */ static u16 __get_link_speed(struct port *port) { @@ -326,6 +328,10 @@ static u16 __get_link_speed(struct port *port) speed = AD_LINK_SPEED_400000MBPS; break; + case SPEED_800000: + speed = AD_LINK_SPEED_800000MBPS; + break; + default: /* unknown speed value from ethtool. shouldn't happen */ if (slave->speed != SPEED_UNKNOWN) @@ -753,6 +759,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) case AD_LINK_SPEED_400000MBPS: bandwidth = nports * 400000; break; + case AD_LINK_SPEED_800000MBPS: + bandwidth = nports * 800000; + break; default: bandwidth = 0; /* to silence the compiler */ } diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 1744d623999d..1c0015b55993 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -196,7 +196,7 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val) { u32 header_len = ADIN1110_RD_HEADER_LEN; u32 read_len = ADIN1110_REG_LEN; - struct spi_transfer t[2] = {0}; + struct spi_transfer t = {0}; int ret; priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg); @@ -209,17 +209,15 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val) header_len++; } - t[0].tx_buf = &priv->data[0]; - t[0].len = header_len; - if (priv->append_crc) read_len++; memset(&priv->data[header_len], 0, read_len); - t[1].rx_buf = &priv->data[header_len]; - t[1].len = read_len; + t.tx_buf = &priv->data[0]; + t.rx_buf = &priv->data[0]; + t.len = read_len + header_len; - ret = spi_sync_transfer(priv->spidev, t, 2); + ret = spi_sync_transfer(priv->spidev, &t, 1); if (ret) return ret; @@ -296,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) { struct adin1110_priv *priv = port_priv->priv; u32 header_len = ADIN1110_RD_HEADER_LEN; - struct spi_transfer t[2] = {0}; + struct spi_transfer t; u32 frame_size_no_fcs; struct sk_buff *rxb; u32 frame_size; @@ -327,12 +325,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) return ret; frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN; - - rxb = netdev_alloc_skb(port_priv->netdev, round_len); - if (!rxb) - return -ENOMEM; - - memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN); + memset(priv->data, 0, ADIN1110_RD_HEADER_LEN); priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg); priv->data[1] = FIELD_GET(GENMASK(7, 0), reg); @@ -342,21 +335,23 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) header_len++; } - skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN); + rxb = netdev_alloc_skb(port_priv->netdev, round_len + header_len); + if (!rxb) + return -ENOMEM; - t[0].tx_buf = &priv->data[0]; - t[0].len = header_len; + skb_put(rxb, frame_size_no_fcs + header_len + ADIN1110_FRAME_HEADER_LEN); - t[1].rx_buf = &rxb->data[0]; - t[1].len = round_len; + t.tx_buf = &priv->data[0]; + t.rx_buf = &rxb->data[0]; + t.len = header_len + round_len; - ret = spi_sync_transfer(priv->spidev, t, 2); + ret = spi_sync_transfer(priv->spidev, &t, 1); if (ret) { kfree_skb(rxb); return ret; } - skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN); + skb_pull(rxb, header_len + ADIN1110_FRAME_HEADER_LEN); rxb->protocol = eth_type_trans(rxb, port_priv->netdev); if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) || @@ -1087,9 +1082,30 @@ static void adin1110_adjust_link(struct net_device *dev) */ static int adin1110_check_spi(struct adin1110_priv *priv) { + struct gpio_desc *reset_gpio; int ret; u32 val; + reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset", + GPIOD_OUT_LOW); + if (reset_gpio) { + /* MISO pin is used for internal configuration, can't have + * anyone else disturbing the SDO line. + */ + spi_bus_lock(priv->spidev->controller); + + gpiod_set_value(reset_gpio, 1); + fsleep(10000); + gpiod_set_value(reset_gpio, 0); + + /* Need to wait 90 ms before interacting with + * the MAC after a HW reset. + */ + fsleep(90000); + + spi_bus_unlock(priv->spidev->controller); + } + ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 25c450606985..a8ce8d0cf9c4 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1387,7 +1387,8 @@ static int bcmgenet_validate_flow(struct net_device *dev, struct ethtool_usrip4_spec *l4_mask; struct ethhdr *eth_mask; - if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && + cmd->fs.location != RX_CLS_LOC_ANY) { netdev_err(dev, "rxnfc: Invalid location (%d)\n", cmd->fs.location); return -EINVAL; @@ -1452,7 +1453,7 @@ static int bcmgenet_insert_flow(struct net_device *dev, { struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rxnfc_rule *loc_rule; - int err; + int err, i; if (priv->hw_params->hfb_filter_size < 128) { netdev_err(dev, "rxnfc: Not supported by this device\n"); @@ -1470,7 +1471,29 @@ static int bcmgenet_insert_flow(struct net_device *dev, if (err) return err; - loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (cmd->fs.location == RX_CLS_LOC_ANY) { + list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { + cmd->fs.location = loc_rule->fs.location; + err = memcmp(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + if (!err) + /* rule exists so return current location */ + return 0; + } + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + loc_rule = &priv->rxnfc_rules[i]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + cmd->fs.location = i; + break; + } + } + if (i == MAX_NUM_OF_FS_RULES) { + cmd->fs.location = RX_CLS_LOC_ANY; + return -ENOSPC; + } + } else { + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + } if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { @@ -1583,7 +1606,7 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = bcmgenet_get_num_flows(priv); - cmd->data = MAX_NUM_OF_FS_RULES; + cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; break; case ETHTOOL_GRXCLSRULE: err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index a0964b629ffc..300ad05ee05b 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -226,21 +226,6 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) return enic->rq_count + wq; } -static inline unsigned int enic_legacy_io_intr(void) -{ - return 0; -} - -static inline unsigned int enic_legacy_err_intr(void) -{ - return 1; -} - -static inline unsigned int enic_legacy_notify_intr(void) -{ - return 2; -} - static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq) { @@ -258,6 +243,10 @@ static inline unsigned int enic_msix_err_intr(struct enic *enic) return enic->rq_count + enic->wq_count; } +#define ENIC_LEGACY_IO_INTR 0 +#define ENIC_LEGACY_ERR_INTR 1 +#define ENIC_LEGACY_NOTIFY_INTR 2 + static inline unsigned int enic_msix_notify_intr(struct enic *enic) { return enic->rq_count + enic->wq_count + 1; @@ -267,7 +256,7 @@ static inline bool enic_is_err_intr(struct enic *enic, int intr) { switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: - return intr == enic_legacy_err_intr(); + return intr == ENIC_LEGACY_ERR_INTR; case VNIC_DEV_INTR_MODE_MSIX: return intr == enic_msix_err_intr(enic); case VNIC_DEV_INTR_MODE_MSI: @@ -280,7 +269,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr) { switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: - return intr == enic_legacy_notify_intr(); + return intr == ENIC_LEGACY_NOTIFY_INTR; case VNIC_DEV_INTR_MODE_MSIX: return intr == enic_msix_notify_intr(enic); case VNIC_DEV_INTR_MODE_MSI: diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 29500d32e362..37bd38d772e8 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -448,9 +448,9 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) { struct net_device *netdev = data; struct enic *enic = netdev_priv(netdev); - unsigned int io_intr = enic_legacy_io_intr(); - unsigned int err_intr = enic_legacy_err_intr(); - unsigned int notify_intr = enic_legacy_notify_intr(); + unsigned int io_intr = ENIC_LEGACY_IO_INTR; + unsigned int err_intr = ENIC_LEGACY_ERR_INTR; + unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR; u32 pba; vnic_intr_mask(&enic->intr[io_intr]); @@ -1507,7 +1507,7 @@ static int enic_poll(struct napi_struct *napi, int budget) struct enic *enic = netdev_priv(netdev); unsigned int cq_rq = enic_cq_rq(enic, 0); unsigned int cq_wq = enic_cq_wq(enic, 0); - unsigned int intr = enic_legacy_io_intr(); + unsigned int intr = ENIC_LEGACY_IO_INTR; unsigned int rq_work_to_do = budget; unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; unsigned int work_done, rq_work_done = 0, wq_work_done; @@ -1856,8 +1856,7 @@ static int enic_dev_notify_set(struct enic *enic) spin_lock_bh(&enic->devcmd_lock); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: - err = vnic_dev_notify_set(enic->vdev, - enic_legacy_notify_intr()); + err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR); break; case VNIC_DEV_INTR_MODE_MSIX: err = vnic_dev_notify_set(enic->vdev, diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig index 0e1439fd00bd..2b560661c82a 100644 --- a/drivers/net/ethernet/freescale/dpaa/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig @@ -2,8 +2,8 @@ menuconfig FSL_DPAA_ETH tristate "DPAA Ethernet" depends on FSL_DPAA && FSL_FMAN - select PHYLIB - select FIXED_PHY + select PHYLINK + select PCS_LYNX help Data Path Acceleration Architecture Ethernet driver, supporting the Freescale QorIQ chips. diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index fc68a32ce2f7..3f8032947d86 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -264,8 +264,19 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->needed_headroom = priv->tx_headroom; net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); - mac_dev->net_dev = net_dev; + /* The rest of the config is filled in by the mac device already */ + mac_dev->phylink_config.dev = &net_dev->dev; + mac_dev->phylink_config.type = PHYLINK_NETDEV; mac_dev->update_speed = dpaa_eth_cgr_set_speed; + mac_dev->phylink = phylink_create(&mac_dev->phylink_config, + dev_fwnode(mac_dev->dev), + mac_dev->phy_if, + mac_dev->phylink_ops); + if (IS_ERR(mac_dev->phylink)) { + err = PTR_ERR(mac_dev->phylink); + dev_err_probe(dev, err, "Could not create phylink\n"); + return err; + } /* start without the RUNNING flag, phylib controls it later */ netif_carrier_off(net_dev); @@ -273,6 +284,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, err = register_netdev(net_dev); if (err < 0) { dev_err(dev, "register_netdev() = %d\n", err); + phylink_destroy(mac_dev->phylink); return err; } @@ -294,8 +306,7 @@ static int dpaa_stop(struct net_device *net_dev) */ msleep(200); - if (mac_dev->phy_dev) - phy_stop(mac_dev->phy_dev); + phylink_stop(mac_dev->phylink); mac_dev->disable(mac_dev->fman_mac); for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { @@ -304,8 +315,7 @@ static int dpaa_stop(struct net_device *net_dev) err = error; } - if (net_dev->phydev) - phy_disconnect(net_dev->phydev); + phylink_disconnect_phy(mac_dev->phylink); net_dev->phydev = NULL; msleep(200); @@ -833,10 +843,10 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv) /* Set different thresholds based on the configured MAC speed. * This may turn suboptimal if the MAC is reconfigured at another - * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link + * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up * callback. */ - if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) + if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD) cs_th = DPAA_CS_THRESHOLD_10G; else cs_th = DPAA_CS_THRESHOLD_1G; @@ -865,7 +875,7 @@ out_error: static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed) { - struct net_device *net_dev = mac_dev->net_dev; + struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev); struct dpaa_priv *priv = netdev_priv(net_dev); struct qm_mcc_initcgr opts = { }; u32 cs_th; @@ -2904,58 +2914,6 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv) } } -static void dpaa_adjust_link(struct net_device *net_dev) -{ - struct mac_device *mac_dev; - struct dpaa_priv *priv; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - mac_dev->adjust_link(mac_dev); -} - -/* The Aquantia PHYs are capable of performing rate adaptation */ -#define PHY_VEND_AQUANTIA 0x03a1b400 -#define PHY_VEND_AQUANTIA2 0x31c31c00 - -static int dpaa_phy_init(struct net_device *net_dev) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - struct mac_device *mac_dev; - struct phy_device *phy_dev; - struct dpaa_priv *priv; - u32 phy_vendor; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - - phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, - &dpaa_adjust_link, 0, - mac_dev->phy_if); - if (!phy_dev) { - netif_err(priv, ifup, net_dev, "init_phy() failed\n"); - return -ENODEV; - } - - phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10); - /* Unless the PHY is capable of rate adaptation */ - if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || - (phy_vendor != PHY_VEND_AQUANTIA && - phy_vendor != PHY_VEND_AQUANTIA2)) { - /* remove any features not supported by the controller */ - ethtool_convert_legacy_u32_to_link_mode(mask, - mac_dev->if_support); - linkmode_and(phy_dev->supported, phy_dev->supported, mask); - } - - phy_support_asym_pause(phy_dev); - - mac_dev->phy_dev = phy_dev; - net_dev->phydev = phy_dev; - - return 0; -} - static int dpaa_open(struct net_device *net_dev) { struct mac_device *mac_dev; @@ -2966,7 +2924,8 @@ static int dpaa_open(struct net_device *net_dev) mac_dev = priv->mac_dev; dpaa_eth_napi_enable(priv); - err = dpaa_phy_init(net_dev); + err = phylink_of_phy_connect(mac_dev->phylink, + mac_dev->dev->of_node, 0); if (err) goto phy_init_failed; @@ -2981,7 +2940,7 @@ static int dpaa_open(struct net_device *net_dev) netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err); goto mac_start_failed; } - phy_start(priv->mac_dev->phy_dev); + phylink_start(mac_dev->phylink); netif_tx_start_all_queues(net_dev); @@ -2990,6 +2949,7 @@ static int dpaa_open(struct net_device *net_dev) mac_start_failed: for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) fman_port_disable(mac_dev->port[i]); + phylink_disconnect_phy(mac_dev->phylink); phy_init_failed: dpaa_eth_napi_disable(priv); @@ -3145,10 +3105,12 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { int ret = -EINVAL; + struct dpaa_priv *priv = netdev_priv(net_dev); if (cmd == SIOCGMIIREG) { if (net_dev->phydev) - return phy_mii_ioctl(net_dev->phydev, rq, cmd); + return phylink_mii_ioctl(priv->mac_dev->phylink, rq, + cmd); } if (cmd == SIOCSHWTSTAMP) @@ -3551,6 +3513,7 @@ static int dpaa_remove(struct platform_device *pdev) dev_set_drvdata(dev, NULL); unregister_netdev(net_dev); + phylink_destroy(priv->mac_dev->phylink); err = dpaa_fq_free(dev, &priv->dpaa_fq_list); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 769e936a263c..9c71cbbb13d8 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -54,27 +54,19 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = { static int dpaa_get_link_ksettings(struct net_device *net_dev, struct ethtool_link_ksettings *cmd) { - if (!net_dev->phydev) - return 0; + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - phy_ethtool_ksettings_get(net_dev->phydev, cmd); - - return 0; + return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd); } static int dpaa_set_link_ksettings(struct net_device *net_dev, const struct ethtool_link_ksettings *cmd) { - int err; - - if (!net_dev->phydev) - return -ENODEV; - - err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); - if (err < 0) - netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - return err; + return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd); } static void dpaa_get_drvinfo(struct net_device *net_dev, @@ -99,80 +91,28 @@ static void dpaa_set_msglevel(struct net_device *net_dev, static int dpaa_nway_reset(struct net_device *net_dev) { - int err; - - if (!net_dev->phydev) - return -ENODEV; - - err = 0; - if (net_dev->phydev->autoneg) { - err = phy_start_aneg(net_dev->phydev); - if (err < 0) - netdev_err(net_dev, "phy_start_aneg() = %d\n", - err); - } + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - return err; + return phylink_ethtool_nway_reset(mac_dev->phylink); } static void dpaa_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *epause) { - struct mac_device *mac_dev; - struct dpaa_priv *priv; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - - if (!net_dev->phydev) - return; + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - epause->autoneg = mac_dev->autoneg_pause; - epause->rx_pause = mac_dev->rx_pause_active; - epause->tx_pause = mac_dev->tx_pause_active; + phylink_ethtool_get_pauseparam(mac_dev->phylink, epause); } static int dpaa_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *epause) { - struct mac_device *mac_dev; - struct phy_device *phydev; - bool rx_pause, tx_pause; - struct dpaa_priv *priv; - int err; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - - phydev = net_dev->phydev; - if (!phydev) { - netdev_err(net_dev, "phy device not initialized\n"); - return -ENODEV; - } - - if (!phy_validate_pause(phydev, epause)) - return -EINVAL; - - /* The MAC should know how to handle PAUSE frame autonegotiation before - * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE - * settings. - */ - mac_dev->autoneg_pause = !!epause->autoneg; - mac_dev->rx_pause_req = !!epause->rx_pause; - mac_dev->tx_pause_req = !!epause->tx_pause; - - /* Determine the sym/asym advertised PAUSE capabilities from the desired - * rx/tx pause settings. - */ - - phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); - - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); - if (err < 0) - netdev_err(net_dev, "set_mac_active_pause() = %d\n", err); + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - return err; + return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause); } static int dpaa_get_sset_count(struct net_device *net_dev, int type) diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 3d9842af7f10..1b05ba8d1cbf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o -fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c index 8356af4631fd..1af254caeb0d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) int i; seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); - seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", - "CHID", "CPU", "Deq busy", "Frames", "CDANs", + seq_printf(file, "%s %5s%16s%16s%16s%16s%16s%16s\n", + "IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs", "Avg Frm/CDAN", "Buf count"); for (i = 0; i < priv->num_channels; i++) { ch = priv->channel[i]; - seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n", - ch->ch_id, + seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n", + "CH#", i, ch->ch_id, ch->nctx.desired_cpu, ch->stats.dequeue_portal_busy, ch->stats.frames, @@ -119,6 +119,51 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch); +static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + int i, j, num_queues, buf_cnt; + struct dpaa2_eth_bp *bp; + char ch_name[10]; + int err; + + /* Print out the header */ + seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name); + seq_printf(file, "%s %10s%15s", "IDX", "BPID", "Buf count"); + num_queues = dpaa2_eth_queue_count(priv); + for (i = 0; i < num_queues; i++) { + snprintf(ch_name, sizeof(ch_name), "CH#%d", i); + seq_printf(file, "%10s", ch_name); + } + seq_printf(file, "\n"); + + /* For each buffer pool, print out its BPID, the number of buffers in + * that buffer pool and the channels which are using it. + */ + for (i = 0; i < priv->num_bps; i++) { + bp = priv->bp[i]; + + err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt); + if (err) { + netdev_warn(priv->net_dev, "Buffer count query error %d\n", err); + return err; + } + + seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt); + for (j = 0; j < num_queues; j++) { + if (priv->channel[j]->bp == bp) + seq_printf(file, "%10s", "x"); + else + seq_printf(file, "%10s", ""); + } + seq_printf(file, "\n"); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp); + void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) { struct fsl_mc_device *dpni_dev; @@ -139,6 +184,10 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) /* per-fq stats file */ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops); + + /* per buffer pool stats file */ + debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops); + } void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h index 5fb5f14e01ec..9b43fadb9b11 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h @@ -73,6 +73,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, TP_ARGS(netdev, fd) ); +/* Tx (egress) XSK fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + /* Rx fd */ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, TP_PROTO(struct net_device *netdev, @@ -81,6 +89,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, TP_ARGS(netdev, fd) ); +/* Rx XSK fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + /* Tx confirmation fd */ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, TP_PROTO(struct net_device *netdev, @@ -90,57 +106,81 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, ); /* Log data about raw buffers. Useful for tracing DPBP content. */ -TRACE_EVENT(dpaa2_eth_buf_seed, - /* Trace function prototype */ - TP_PROTO(struct net_device *netdev, - /* virtual address and size */ - void *vaddr, - size_t size, - /* dma map address and size */ - dma_addr_t dma_addr, - size_t map_size, - /* buffer pool id, if relevant */ - u16 bpid), - - /* Repeat argument list here */ - TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), - - /* A structure containing the relevant information we want - * to record. Declare name and type for each normal element, - * name, type and size for arrays. Use __string for variable - * length strings. - */ - TP_STRUCT__entry( - __field(void *, vaddr) - __field(size_t, size) - __field(dma_addr_t, dma_addr) - __field(size_t, map_size) - __field(u16, bpid) - __string(name, netdev->name) - ), - - /* The function that assigns values to the above declared - * fields - */ - TP_fast_assign( - __entry->vaddr = vaddr; - __entry->size = size; - __entry->dma_addr = dma_addr; - __entry->map_size = map_size; - __entry->bpid = bpid; - __assign_str(name, netdev->name); - ), - - /* This is what gets printed when the trace event is - * triggered. - */ - TP_printk(TR_BUF_FMT, - __get_str(name), - __entry->vaddr, - __entry->size, - &__entry->dma_addr, - __entry->map_size, - __entry->bpid) +DECLARE_EVENT_CLASS(dpaa2_eth_buf, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + /* virtual address and size */ + void *vaddr, + size_t size, + /* dma map address and size */ + dma_addr_t dma_addr, + size_t map_size, + /* buffer pool id, if relevant */ + u16 bpid), + + /* Repeat argument list here */ + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(void *, vaddr) + __field(size_t, size) + __field(dma_addr_t, dma_addr) + __field(size_t, map_size) + __field(u16, bpid) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->size = size; + __entry->dma_addr = dma_addr; + __entry->map_size = map_size; + __entry->bpid = bpid; + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_BUF_FMT, + __get_str(name), + __entry->vaddr, + __entry->size, + &__entry->dma_addr, + __entry->map_size, + __entry->bpid) +); + +/* Main memory buff seeding */ +DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed, + TP_PROTO(struct net_device *netdev, + void *vaddr, + size_t size, + dma_addr_t dma_addr, + size_t map_size, + u16 bpid), + + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid) +); + +/* UMEM buff seeding on AF_XDP fast path */ +DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed, + TP_PROTO(struct net_device *netdev, + void *vaddr, + size_t size, + dma_addr_t dma_addr, + size_t map_size, + u16 bpid), + + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid) ); /* If only one event of a certain type needs to be declared, use TRACE_EVENT(). diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 8d029addddad..281d7e3905c1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2022 NXP */ #include <linux/init.h> #include <linux/module.h> @@ -19,6 +19,7 @@ #include <net/pkt_cls.h> #include <net/sock.h> #include <net/tso.h> +#include <net/xdp_sock_drv.h> #include "dpaa2-eth.h" @@ -104,8 +105,8 @@ static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv) priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; } -static void *dpaa2_iova_to_virt(struct iommu_domain *domain, - dma_addr_t iova_addr) +void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) { phys_addr_t phys_addr; @@ -279,23 +280,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, * be released in the pool */ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, - int count) + int count, bool xsk_zc) { struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_eth_swa *swa; + struct xdp_buff *xdp_buff; void *vaddr; int i; for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, - DMA_BIDIRECTIONAL); - free_pages((unsigned long)vaddr, 0); + + if (!xsk_zc) { + dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); + } else { + swa = (struct dpaa2_eth_swa *) + (vaddr + DPAA2_ETH_RX_HWA_SIZE); + xdp_buff = swa->xsk.xdp_buff; + xsk_buff_free(xdp_buff); + } } } -static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - dma_addr_t addr) +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) { int retries = 0; int err; @@ -304,7 +315,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) return; - while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, ch->recycled_bufs, ch->recycled_bufs_cnt)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) @@ -313,7 +324,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, } if (err) { - dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt); + dpaa2_eth_free_bufs(priv, ch->recycled_bufs, + ch->recycled_bufs_cnt, ch->xsk_zc); ch->buf_count -= ch->recycled_bufs_cnt; } @@ -377,10 +389,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, fq->xdp_tx_fds.num = 0; } -static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_fd *fd, - void *buf_start, u16 queue_id) +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id) { struct dpaa2_faead *faead; struct dpaa2_fd *dest_fd; @@ -485,19 +497,15 @@ out: return xdp_act; } -static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - void *fd_vaddr) +struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, u32 fd_length, + void *fd_vaddr) { u16 fd_offset = dpaa2_fd_get_offset(fd); - struct dpaa2_eth_priv *priv = ch->priv; - u32 fd_length = dpaa2_fd_get_len(fd); struct sk_buff *skb = NULL; unsigned int skb_len; - if (fd_length > priv->rx_copybreak) - return NULL; - skb_len = fd_length + dpaa2_eth_needed_headroom(NULL); skb = napi_alloc_skb(&ch->napi, skb_len); @@ -514,11 +522,66 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, return skb; } +static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + struct dpaa2_eth_priv *priv = ch->priv; + u32 fd_length = dpaa2_fd_get_len(fd); + + if (fd_length > priv->rx_copybreak) + return NULL; + + return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr); +} + +void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, void *vaddr, + struct dpaa2_eth_fq *fq, + struct rtnl_link_stats64 *percpu_stats, + struct sk_buff *skb) +{ + struct dpaa2_fas *fas; + u32 status = 0; + + fas = dpaa2_get_fas(vaddr, false); + prefetch(fas); + prefetch(skb->data); + + /* Get the timestamp value */ + if (priv->rx_tstamp) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + __le64 *ts = dpaa2_get_ts(vaddr, false); + u64 ns; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + } + + /* Check if we need to validate the L4 csum */ + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { + status = le32_to_cpu(fas->status); + dpaa2_eth_validate_rx_csum(priv, status, skb); + } + + skb->protocol = eth_type_trans(skb, priv->net_dev); + skb_record_rx_queue(skb, fq->flowid); + + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); + + list_add_tail(&skb->list, ch->rx_list); +} + /* Main Rx frame processing routine */ -static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq) +void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq) { dma_addr_t addr = dpaa2_fd_get_addr(fd); u8 fd_format = dpaa2_fd_get_format(fd); @@ -527,9 +590,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct rtnl_link_stats64 *percpu_stats; struct dpaa2_eth_drv_stats *percpu_extras; struct device *dev = priv->net_dev->dev.parent; - struct dpaa2_fas *fas; void *buf_data; - u32 status = 0; u32 xdp_act; /* Tracing point */ @@ -539,8 +600,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); - fas = dpaa2_get_fas(vaddr, false); - prefetch(fas); buf_data = vaddr + dpaa2_fd_get_offset(fd); prefetch(buf_data); @@ -578,35 +637,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, if (unlikely(!skb)) goto err_build_skb; - prefetch(skb->data); - - /* Get the timestamp value */ - if (priv->rx_tstamp) { - struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - __le64 *ts = dpaa2_get_ts(vaddr, false); - u64 ns; - - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - - ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - } - - /* Check if we need to validate the L4 csum */ - if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { - status = le32_to_cpu(fas->status); - dpaa2_eth_validate_rx_csum(priv, status, skb); - } - - skb->protocol = eth_type_trans(skb, priv->net_dev); - skb_record_rx_queue(skb, fq->flowid); - - percpu_stats->rx_packets++; - percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); - ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); - - list_add_tail(&skb->list, ch->rx_list); - + dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); return; err_build_skb: @@ -827,7 +858,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, } } -static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) +void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_sgt_cache *sgt_cache; void *sgt_buf = NULL; @@ -849,7 +880,7 @@ static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) return sgt_buf; } -static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) +void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) { struct dpaa2_eth_sgt_cache *sgt_cache; @@ -1084,9 +1115,10 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, * This can be called either from dpaa2_eth_tx_conf() or on the error path of * dpaa2_eth_tx(). */ -static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq, - const struct dpaa2_fd *fd, bool in_napi) +void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr, sg_addr; @@ -1153,6 +1185,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, if (!swa->tso.is_last_fd) should_free_skb = 0; + } else if (swa->type == DPAA2_ETH_SWA_XSK) { + /* Unmap the SGT Buffer */ + dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size, + DMA_BIDIRECTIONAL); } else { skb = swa->single.skb; @@ -1170,6 +1206,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, return; } + if (swa->type == DPAA2_ETH_SWA_XSK) { + ch->xsk_tx_pkts_sent++; + dpaa2_eth_sgt_recycle(priv, buffer_start); + return; + } + if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { fq->dq_frames++; fq->dq_bytes += fd_len; @@ -1344,7 +1386,7 @@ err_alloc_tso_hdr: err_sgt_get: /* Free all the other FDs that were already fully created */ for (i = 0; i < index; i++) - dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false); + dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false); return err; } @@ -1460,7 +1502,7 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb, if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - dpaa2_eth_free_tx_fd(priv, fq, fd, false); + dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false); netdev_tx_completed_queue(nq, 1, fd_len); } else { percpu_stats->tx_packets += total_enqueued; @@ -1553,7 +1595,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - dpaa2_eth_free_tx_fd(priv, fq, fd, true); + dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true); if (likely(!fd_errors)) return; @@ -1631,44 +1673,76 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) * to the specified buffer pool */ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, u16 bpid) + struct dpaa2_eth_channel *ch) { + struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD]; struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + struct dpaa2_eth_swa *swa; struct page *page; dma_addr_t addr; int retries = 0; - int i, err; - - for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { - /* Allocate buffer visible to WRIOP + skb shared info + - * alignment padding - */ - /* allocate one page for each Rx buffer. WRIOP sees - * the entire page except for a tailroom reserved for - * skb shared info + int i = 0, err; + u32 batch; + + /* Allocate buffers visible to WRIOP */ + if (!ch->xsk_zc) { + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Also allocate skb shared info and alignment padding. + * There is one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) + goto err_alloc; + + addr = dma_map_page(dev, page, 0, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; + + buf_array[i] = addr; + + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + page_address(page), + DPAA2_ETH_RX_BUF_RAW_SIZE, + addr, priv->rx_buf_size, + ch->bp->bpid); + } + } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { + /* Allocate XSK buffers for AF_XDP fast path in batches + * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot + * provide enough buffers at the moment */ - page = dev_alloc_pages(0); - if (!page) + batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs, + DPAA2_ETH_BUFS_PER_CMD); + if (!batch) goto err_alloc; - addr = dma_map_page(dev, page, 0, priv->rx_buf_size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(dev, addr))) - goto err_map; + for (i = 0; i < batch; i++) { + swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start + + DPAA2_ETH_RX_HWA_SIZE); + swa->xsk.xdp_buff = xdp_buffs[i]; + + addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; - buf_array[i] = addr; + buf_array[i] = addr; - /* tracing point */ - trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page), - DPAA2_ETH_RX_BUF_RAW_SIZE, - addr, priv->rx_buf_size, - bpid); + trace_dpaa2_xsk_buf_seed(priv->net_dev, + xdp_buffs[i]->data_hard_start, + DPAA2_ETH_RX_BUF_RAW_SIZE, + addr, priv->rx_buf_size, + ch->bp->bpid); + } } release_bufs: /* In case the portal is busy, retry until successful */ - while ((err = dpaa2_io_service_release(ch->dpio, bpid, + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, buf_array, i)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) break; @@ -1679,14 +1753,19 @@ release_bufs: * not much else we can do about it */ if (err) { - dpaa2_eth_free_bufs(priv, buf_array, i); + dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc); return 0; } return i; err_map: - __free_pages(page, 0); + if (!ch->xsk_zc) { + __free_pages(page, 0); + } else { + for (; i < batch; i++) + xsk_buff_free(xdp_buffs[i]); + } err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware @@ -1697,39 +1776,64 @@ err_alloc: return 0; } -static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) +static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch) { - int i, j; + int i; int new_count; - for (j = 0; j < priv->num_channels; j++) { - for (i = 0; i < DPAA2_ETH_NUM_BUFS; - i += DPAA2_ETH_BUFS_PER_CMD) { - new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); - priv->channel[j]->buf_count += new_count; + for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { + new_count = dpaa2_eth_add_bufs(priv, ch); + ch->buf_count += new_count; - if (new_count < DPAA2_ETH_BUFS_PER_CMD) { - return -ENOMEM; - } - } + if (new_count < DPAA2_ETH_BUFS_PER_CMD) + return -ENOMEM; } return 0; } +static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct dpaa2_eth_channel *channel; + int i, err = 0; + + for (i = 0; i < priv->num_channels; i++) { + channel = priv->channel[i]; + + err = dpaa2_eth_seed_pool(priv, channel); + + /* Not much to do; the buffer pool, though not filled up, + * may still contain some buffers which would enable us + * to limp on. + */ + if (err) + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", + channel->bp->dev->obj_desc.id, + channel->bp->bpid); + } +} + /* - * Drain the specified number of buffers from the DPNI's private buffer pool. + * Drain the specified number of buffers from one of the DPNI's private buffer + * pools. * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD */ -static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) +static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, + int count) { u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + bool xsk_zc = false; int retries = 0; - int ret; + int i, ret; + + for (i = 0; i < priv->num_channels; i++) + if (priv->channel[i]->bp->bpid == bpid) + xsk_zc = priv->channel[i]->xsk_zc; do { - ret = dpaa2_io_service_acquire(NULL, priv->bpid, - buf_array, count); + ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count); if (ret < 0) { if (ret == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) @@ -1737,28 +1841,40 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; } - dpaa2_eth_free_bufs(priv, buf_array, ret); + dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc); retries = 0; } while (ret); } -static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid) { int i; - dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); - dpaa2_eth_drain_bufs(priv, 1); + /* Drain the buffer pool */ + dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD); + dpaa2_eth_drain_bufs(priv, bpid, 1); + /* Setup to zero the buffer count of all channels which were + * using this buffer pool. + */ for (i = 0; i < priv->num_channels; i++) - priv->channel[i]->buf_count = 0; + if (priv->channel[i]->bp->bpid == bpid) + priv->channel[i]->buf_count = 0; +} + +static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_bps; i++) + dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid); } /* Function is called from softirq context only, so we don't need to guard * the access to percpu count */ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - u16 bpid) + struct dpaa2_eth_channel *ch) { int new_count; @@ -1766,7 +1882,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, return 0; do { - new_count = dpaa2_eth_add_bufs(priv, ch, bpid); + new_count = dpaa2_eth_add_bufs(priv, ch); if (unlikely(!new_count)) { /* Out of memory; abort for now, we'll try later on */ break; @@ -1830,6 +1946,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) struct dpaa2_eth_fq *fq, *txc_fq = NULL; struct netdev_queue *nq; int store_cleaned, work_done; + bool work_done_zc = false; struct list_head rx_list; int retries = 0; u16 flowid; @@ -1842,13 +1959,22 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) INIT_LIST_HEAD(&rx_list); ch->rx_list = &rx_list; + if (ch->xsk_zc) { + work_done_zc = dpaa2_xsk_tx(priv, ch); + /* If we reached the XSK Tx per NAPI threshold, we're done */ + if (work_done_zc) { + work_done = budget; + goto out; + } + } + do { err = dpaa2_eth_pull_channel(ch); if (unlikely(err)) break; /* Refill pool if appropriate */ - dpaa2_eth_refill_pool(priv, ch, priv->bpid); + dpaa2_eth_refill_pool(priv, ch); store_cleaned = dpaa2_eth_consume_frames(ch, &fq); if (store_cleaned <= 0) @@ -1894,6 +2020,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) out: netif_receive_skb_list(ch->rx_list); + if (ch->xsk_tx_pkts_sent) { + xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent); + ch->xsk_tx_pkts_sent = 0; + } + if (txc_fq && txc_fq->dq_frames) { nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); netdev_tx_completed_queue(nq, txc_fq->dq_frames, @@ -2047,15 +2178,7 @@ static int dpaa2_eth_open(struct net_device *net_dev) struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int err; - err = dpaa2_eth_seed_pool(priv, priv->bpid); - if (err) { - /* Not much to do; the buffer pool, though not filled up, - * may still contain some buffers which would enable us - * to limp on. - */ - netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", - priv->dpbp_dev->obj_desc.id, priv->bpid); - } + dpaa2_eth_seed_pools(priv); if (!dpaa2_eth_is_type_phy(priv)) { /* We'll only start the txqs when the link is actually ready; @@ -2088,7 +2211,7 @@ static int dpaa2_eth_open(struct net_device *net_dev) enable_err: dpaa2_eth_disable_ch_napi(priv); - dpaa2_eth_drain_pool(priv); + dpaa2_eth_drain_pools(priv); return err; } @@ -2193,7 +2316,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) dpaa2_eth_disable_ch_napi(priv); /* Empty the buffer pool */ - dpaa2_eth_drain_pool(priv); + dpaa2_eth_drain_pools(priv); /* Empty the Scatter-Gather Buffer cache */ dpaa2_eth_sgt_cache_drain(priv); @@ -2602,7 +2725,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) need_update = (!!priv->xdp_prog != !!prog); if (up) - dpaa2_eth_stop(dev); + dev_close(dev); /* While in xdp mode, enforce a maximum Rx frame size based on MTU. * Also, when switching between xdp/non-xdp modes we need to reconfigure @@ -2630,7 +2753,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) } if (up) { - err = dpaa2_eth_open(dev); + err = dev_open(dev, NULL); if (err) return err; } @@ -2641,7 +2764,7 @@ out_err: if (prog) bpf_prog_sub(prog, priv->num_channels); if (up) - dpaa2_eth_open(dev); + dev_open(dev, NULL); return err; } @@ -2651,6 +2774,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return dpaa2_eth_setup_xdp(dev, xdp->prog); + case XDP_SETUP_XSK_POOL: + return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } @@ -2881,6 +3006,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, + .ndo_xsk_wakeup = dpaa2_xsk_wakeup, .ndo_setup_tc = dpaa2_eth_setup_tc, .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid @@ -2895,7 +3021,11 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) /* Update NAPI statistics */ ch->stats.cdan++; - napi_schedule(&ch->napi); + /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed + * so that it can be rescheduled again. + */ + if (!napi_if_scheduled_mark_missed(&ch->napi)) + napi_schedule(&ch->napi); } /* Allocate and configure a DPCON object */ @@ -3204,13 +3334,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) dpaa2_eth_set_fq_affinity(priv); } -/* Allocate and configure one buffer pool for each interface */ -static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) +/* Allocate and configure a buffer pool */ +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv) { - int err; - struct fsl_mc_device *dpbp_dev; struct device *dev = priv->net_dev->dev.parent; + struct fsl_mc_device *dpbp_dev; struct dpbp_attr dpbp_attrs; + struct dpaa2_eth_bp *bp; + int err; err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, &dpbp_dev); @@ -3219,12 +3350,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) err = -EPROBE_DEFER; else dev_err(dev, "DPBP device allocation failed\n"); - return err; + return ERR_PTR(err); } - priv->dpbp_dev = dpbp_dev; + bp = kzalloc(sizeof(*bp), GFP_KERNEL); + if (!bp) { + err = -ENOMEM; + goto err_alloc; + } - err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, + err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id, &dpbp_dev->mc_handle); if (err) { dev_err(dev, "dpbp_open() failed\n"); @@ -3249,9 +3384,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) dev_err(dev, "dpbp_get_attributes() failed\n"); goto err_get_attr; } - priv->bpid = dpbp_attrs.bpid; - return 0; + bp->dev = dpbp_dev; + bp->bpid = dpbp_attrs.bpid; + + return bp; err_get_attr: dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); @@ -3259,17 +3396,58 @@ err_enable: err_reset: dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); err_open: + kfree(bp); +err_alloc: fsl_mc_object_free(dpbp_dev); - return err; + return ERR_PTR(err); } -static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv) { - dpaa2_eth_drain_pool(priv); - dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - fsl_mc_object_free(priv->dpbp_dev); + struct dpaa2_eth_bp *bp; + int i; + + bp = dpaa2_eth_allocate_dpbp(priv); + if (IS_ERR(bp)) + return PTR_ERR(bp); + + priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp; + priv->num_bps++; + + for (i = 0; i < priv->num_channels; i++) + priv->channel[i]->bp = bp; + + return 0; +} + +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp) +{ + int idx_bp; + + /* Find the index at which this BP is stored */ + for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++) + if (priv->bp[idx_bp] == bp) + break; + + /* Drain the pool and disable the associated MC object */ + dpaa2_eth_drain_pool(priv, bp->bpid); + dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle); + dpbp_close(priv->mc_io, 0, bp->dev->mc_handle); + fsl_mc_object_free(bp->dev); + kfree(bp); + + /* Move the last in use DPBP over in this position */ + priv->bp[idx_bp] = priv->bp[priv->num_bps - 1]; + priv->num_bps--; +} + +static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_bps; i++) + dpaa2_eth_free_dpbp(priv, priv->bp[i]); } static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) @@ -4154,15 +4332,16 @@ out: */ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) { + struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; struct net_device *net_dev = priv->net_dev; + struct dpni_pools_cfg pools_params = { 0 }; struct device *dev = net_dev->dev.parent; - struct dpni_pools_cfg pools_params; struct dpni_error_cfg err_cfg; int err = 0; int i; pools_params.num_dpbp = 1; - pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id; pools_params.pools[0].backup_pool = 0; pools_params.pools[0].buffer_size = priv->rx_buf_size; err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); @@ -4641,7 +4820,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) dpaa2_eth_setup_fqs(priv); - err = dpaa2_eth_setup_dpbp(priv); + err = dpaa2_eth_setup_default_dpbp(priv); if (err) goto err_dpbp_setup; @@ -4777,7 +4956,7 @@ err_alloc_percpu_extras: err_alloc_percpu_stats: dpaa2_eth_del_ch_napi(priv); err_bind: - dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpbps(priv); err_dpbp_setup: dpaa2_eth_free_dpio(priv); err_dpio_setup: @@ -4830,7 +5009,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) free_percpu(priv->percpu_extras); dpaa2_eth_del_ch_napi(priv); - dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpbps(priv); dpaa2_eth_free_dpio(priv); dpaa2_eth_free_dpni(priv); if (priv->onestep_reg_base) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 447718483ef4..5d0fc432e5b2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2022 NXP */ #ifndef __DPAA2_ETH_H @@ -53,6 +53,12 @@ */ #define DPAA2_ETH_TXCONF_PER_NAPI 256 +/* Maximum number of Tx frames to be processed in a single NAPI + * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI + * to maximize the throughput. + */ +#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI + /* Buffer qouta per channel. We want to keep in check number of ingress frames * in flight: for small sized frames, congestion group taildrop may kick in * first; for large sizes, Rx FQ taildrop threshold will ensure only a @@ -109,6 +115,14 @@ #define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 #define DPAA2_ETH_RX_BUF_ALIGN 64 +/* The firmware allows assigning multiple buffer pools to a single DPNI - + * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for + * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs + * object: the default and 8 other distinct buffer pools, one for each queue. + */ +#define DPAA2_ETH_DEFAULT_BP_IDX 0 +#define DPAA2_ETH_MAX_BPS 9 + /* We are accommodating a skb backpointer and some S/G info * in the frame's software annotation. The hardware * options are either 0 or 64, so we choose the latter. @@ -122,6 +136,7 @@ enum dpaa2_eth_swa_type { DPAA2_ETH_SWA_SINGLE, DPAA2_ETH_SWA_SG, DPAA2_ETH_SWA_XDP, + DPAA2_ETH_SWA_XSK, DPAA2_ETH_SWA_SW_TSO, }; @@ -144,6 +159,10 @@ struct dpaa2_eth_swa { struct xdp_frame *xdpf; } xdp; struct { + struct xdp_buff *xdp_buff; + int sgt_size; + } xsk; + struct { struct sk_buff *skb; int num_sg; int sgt_size; @@ -421,12 +440,19 @@ enum dpaa2_eth_fq_type { }; struct dpaa2_eth_priv; +struct dpaa2_eth_channel; +struct dpaa2_eth_fq; struct dpaa2_eth_xdp_fds { struct dpaa2_fd fds[DEV_MAP_BULK_SIZE]; ssize_t num; }; +typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; @@ -439,10 +465,7 @@ struct dpaa2_eth_fq { struct dpaa2_eth_channel *channel; enum dpaa2_eth_fq_type type; - void (*consume)(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq); + dpaa2_eth_consume_cb_t *consume; struct dpaa2_eth_fq_stats stats; struct dpaa2_eth_xdp_fds xdp_redirect_fds; @@ -454,6 +477,11 @@ struct dpaa2_eth_ch_xdp { unsigned int res; }; +struct dpaa2_eth_bp { + struct fsl_mc_device *dev; + int bpid; +}; + struct dpaa2_eth_channel { struct dpaa2_io_notification_ctx nctx; struct fsl_mc_device *dpcon; @@ -472,6 +500,11 @@ struct dpaa2_eth_channel { /* Buffers to be recycled back in the buffer pool */ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD]; int recycled_bufs_cnt; + + bool xsk_zc; + int xsk_tx_pkts_sent; + struct xsk_buff_pool *xsk_pool; + struct dpaa2_eth_bp *bp; }; struct dpaa2_eth_dist_fields { @@ -506,7 +539,7 @@ struct dpaa2_eth_trap_data { #define DPAA2_ETH_DEFAULT_COPYBREAK 512 -#define DPAA2_ETH_ENQUEUE_MAX_FDS 200 +#define DPAA2_ETH_ENQUEUE_MAX_FDS 256 struct dpaa2_eth_fds { struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS]; }; @@ -535,14 +568,16 @@ struct dpaa2_eth_priv { u8 ptp_correction_off; void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv, u32 offset, u8 udp); - struct fsl_mc_device *dpbp_dev; u16 rx_buf_size; - u16 bpid; struct iommu_domain *iommu_domain; enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */ bool rx_tstamp; /* Rx timestamping enabled */ + /* Buffer pool management */ + struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS]; + int num_bps; + u16 tx_qdid; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. @@ -771,4 +806,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv); struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv, struct dpaa2_fapr *fapr); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp); + +struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, u32 fd_length, + void *fd_vaddr); + +void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, void *vaddr, + struct dpaa2_eth_fq *fq, + struct rtnl_link_stats64 *percpu_stats, + struct sk_buff *skb); + +void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_bp *bp); + +void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr); +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr); + +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id); + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid); + +void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi); +bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch); + +/* SGT (Scatter-Gather Table) cache management */ +void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf); + #endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index eea7d7a07c00..32a38a03db57 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016 NXP - * Copyright 2020 NXP + * Copyright 2016-2022 NXP */ #include <linux/net_tstamp.h> @@ -227,17 +226,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) { - int i = 0; - int j, k, err; - int num_cnt; - union dpni_statistics dpni_stats; - u32 fcnt, bcnt; - u32 fcnt_rx_total = 0, fcnt_tx_total = 0; - u32 bcnt_rx_total = 0, bcnt_tx_total = 0; - u32 buf_cnt; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - struct dpaa2_eth_drv_stats *extras; - struct dpaa2_eth_ch_stats *ch_stats; + union dpni_statistics dpni_stats; int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { sizeof(dpni_stats.page_0), sizeof(dpni_stats.page_1), @@ -247,6 +237,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, sizeof(dpni_stats.page_5), sizeof(dpni_stats.page_6), }; + u32 fcnt_rx_total = 0, fcnt_tx_total = 0; + u32 bcnt_rx_total = 0, bcnt_tx_total = 0; + struct dpaa2_eth_ch_stats *ch_stats; + struct dpaa2_eth_drv_stats *extras; + u32 buf_cnt, buf_cnt_total = 0; + int j, k, err, num_cnt, i = 0; + u32 fcnt, bcnt; memset(data, 0, sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); @@ -308,12 +305,15 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, *(data + i++) = fcnt_tx_total; *(data + i++) = bcnt_tx_total; - err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); - if (err) { - netdev_warn(net_dev, "Buffer count query error %d\n", err); - return; + for (j = 0; j < priv->num_bps; j++) { + err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt); + if (err) { + netdev_warn(net_dev, "Buffer count query error %d\n", err); + return; + } + buf_cnt_total += buf_cnt; } - *(data + i++) = buf_cnt; + *(data + i++) = buf_cnt_total; if (dpaa2_eth_has_mac(priv)) dpaa2_mac_get_ethtool_stats(priv->mac, data + i); @@ -876,6 +876,29 @@ restore_rx_usecs: return err; } +static void dpaa2_eth_get_channels(struct net_device *net_dev, + struct ethtool_channels *channels) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int queue_count = dpaa2_eth_queue_count(priv); + + channels->max_rx = queue_count; + channels->max_tx = queue_count; + channels->rx_count = queue_count; + channels->tx_count = queue_count; + + /* Tx confirmation and Rx error */ + channels->max_other = queue_count + 1; + channels->max_combined = channels->max_rx + + channels->max_tx + + channels->max_other; + /* Tx conf and Rx err */ + channels->other_count = queue_count + 1; + channels->combined_count = channels->rx_count + + channels->tx_count + + channels->other_count; +} + const struct ethtool_ops dpaa2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, @@ -896,4 +919,5 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .set_tunable = dpaa2_eth_set_tunable, .get_coalesce = dpaa2_eth_get_coalesce, .set_coalesce = dpaa2_eth_set_coalesce, + .get_channels = dpaa2_eth_get_channels, }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c new file mode 100644 index 000000000000..567f52affe21 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2022 NXP + */ +#include <linux/filter.h> +#include <linux/compiler.h> +#include <linux/bpf_trace.h> +#include <net/xdp.h> +#include <net/xdp_sock_drv.h> + +#include "dpaa2-eth.h" + +static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + enum dpaa2_eth_fq_type type, + dpaa2_eth_consume_cb_t *consume) +{ + struct dpaa2_eth_fq *fq; + int i; + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + + if (fq->type != type) + continue; + if (fq->channel != ch) + continue; + + fq->consume = consume; + } +} + +static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *rx_fq, + struct dpaa2_fd *fd, void *vaddr) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + struct bpf_prog *xdp_prog; + struct xdp_buff *xdp_buff; + struct dpaa2_eth_swa *swa; + u32 xdp_act = XDP_PASS; + int err; + + xdp_prog = READ_ONCE(ch->xdp.prog); + if (!xdp_prog) + goto out; + + swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE + + ch->xsk_pool->umem->headroom); + xdp_buff = swa->xsk.xdp_buff; + + xdp_buff->data_hard_start = vaddr; + xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd); + xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd); + xdp_set_data_meta_invalid(xdp_buff); + xdp_buff->rxq = &ch->xdp_rxq; + + xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool); + xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff); + + /* xdp.data pointer may have changed */ + dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr); + dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data); + + if (likely(xdp_act == XDP_REDIRECT)) { + err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog); + if (unlikely(err)) { + ch->stats.xdp_drop++; + dpaa2_eth_recycle_buf(priv, ch, addr); + } else { + ch->buf_count--; + ch->stats.xdp_redirect++; + } + + goto xdp_redir; + } + + switch (xdp_act) { + case XDP_PASS: + break; + case XDP_TX: + dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); + break; + default: + bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_DROP: + dpaa2_eth_recycle_buf(priv, ch, addr); + ch->stats.xdp_drop++; + break; + } + +xdp_redir: + ch->xdp.res |= xdp_act; +out: + return xdp_act; +} + +/* Rx frame processing routine for the AF_XDP fast path */ +static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + struct rtnl_link_stats64 *percpu_stats; + u32 fd_length = dpaa2_fd_get_len(fd); + struct sk_buff *skb; + void *vaddr; + u32 xdp_act; + + trace_dpaa2_rx_xsk_fd(priv->net_dev, fd); + + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + percpu_stats = this_cpu_ptr(priv->percpu_stats); + + if (fd_format != dpaa2_fd_single) { + WARN_ON(priv->xdp_prog); + /* AF_XDP doesn't support any other formats */ + goto err_frame_format; + } + + xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); + if (xdp_act != XDP_PASS) { + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + return; + } + + /* Build skb */ + skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr); + if (!skb) + /* Nothing else we can do, recycle the buffer and + * drop the frame. + */ + goto err_alloc_skb; + + /* Send the skb to the Linux networking stack */ + dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); + + return; + +err_alloc_skb: + dpaa2_eth_recycle_buf(priv, ch, addr); +err_frame_format: + percpu_stats->rx_dropped++; +} + +static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv, + struct dpni_pools_cfg *pools_params) +{ + int curr_bp = 0, i, j; + + pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN; + for (i = 0; i < priv->num_bps; i++) { + for (j = 0; j < priv->num_channels; j++) + if (priv->bp[i] == priv->channel[j]->bp) + pools_params->pools[curr_bp].priority_mask |= (1 << j); + if (!pools_params->pools[curr_bp].priority_mask) + continue; + + pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; + pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; + pools_params->pools[curr_bp++].backup_pool = 0; + } + pools_params->num_dpbp = curr_bp; +} + +static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid); + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err; + bool up; + + ch = priv->channel[qid]; + if (!ch->xsk_pool) + return -EINVAL; + + up = netif_running(dev); + if (up) + dev_close(dev); + + xsk_pool_dma_unmap(pool, 0); + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n", + err); + + dpaa2_eth_free_dpbp(priv, ch->bp); + + ch->xsk_zc = false; + ch->xsk_pool = NULL; + ch->xsk_tx_pkts_sent = 0; + ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) + netdev_err(dev, "dpni_set_pools() failed\n"); + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_xsk_enable_pool(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err, err2; + bool up; + + if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) { + netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n"); + return -EOPNOTSUPP; + } + + if (priv->dpni_attrs.num_queues > 8) { + netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n"); + return -EOPNOTSUPP; + } + + up = netif_running(dev); + if (up) + dev_close(dev); + + err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0); + if (err) { + netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n", + err); + goto err_dma_unmap; + } + + ch = priv->channel[qid]; + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) { + netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err); + goto err_mem_model; + } + xsk_pool_set_rxq_info(pool, &ch->xdp_rxq); + + priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv); + if (IS_ERR(priv->bp[priv->num_bps])) { + err = PTR_ERR(priv->bp[priv->num_bps]); + goto err_bp_alloc; + } + ch->xsk_zc = true; + ch->xsk_pool = pool; + ch->bp = priv->bp[priv->num_bps++]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + netdev_err(dev, "dpni_set_pools() failed\n"); + goto err_set_pools; + } + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; + +err_set_pools: + err2 = dpaa2_xsk_disable_pool(dev, qid); + if (err2) + netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2); +err_bp_alloc: + err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err2) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2); +err_mem_model: + xsk_pool_dma_unmap(pool, 0); +err_dma_unmap: + if (up) + dev_open(dev, NULL); + + return err; +} + +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid) +{ + return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) : + dpaa2_xsk_disable_pool(dev, qid); +} + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpaa2_eth_channel *ch = priv->channel[qid]; + + if (!priv->link_state.up) + return -ENETDOWN; + + if (!priv->xdp_prog) + return -EINVAL; + + if (!ch->xsk_zc) + return -EINVAL; + + /* We do not have access to a per channel SW interrupt, so instead we + * schedule a NAPI instance. + */ + if (!napi_if_scheduled_mark_missed(&ch->napi)) + napi_schedule(&ch->napi); + + return 0; +} + +static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + struct xdp_desc *xdp_desc) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_sg_entry *sgt; + struct dpaa2_eth_swa *swa; + void *sgt_buf = NULL; + dma_addr_t sgt_addr; + int sgt_buf_size; + dma_addr_t addr; + int err = 0; + + /* Prepare the HW SGT structure */ + sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); + sgt_buf = dpaa2_eth_sgt_get(priv); + if (unlikely(!sgt_buf)) + return -ENOMEM; + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + + /* Get the address of the XSK Tx buffer */ + addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr); + xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len); + + /* Fill in the HW SGT structure */ + dpaa2_sg_set_addr(sgt, addr); + dpaa2_sg_set_len(sgt, xdp_desc->len); + dpaa2_sg_set_final(sgt, true); + + /* Store the necessary info in the SGT buffer */ + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->type = DPAA2_ETH_SWA_XSK; + swa->xsk.sgt_size = sgt_buf_size; + + /* Separately map the SGT buffer */ + sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, sgt_addr))) { + err = -ENOMEM; + goto sgt_map_failed; + } + + /* Initialize FD fields */ + memset(fd, 0, sizeof(struct dpaa2_fd)); + dpaa2_fd_set_offset(fd, priv->tx_data_offset); + dpaa2_fd_set_format(fd, dpaa2_fd_sg); + dpaa2_fd_set_addr(fd, sgt_addr); + dpaa2_fd_set_len(fd, xdp_desc->len); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); + + return 0; + +sgt_map_failed: + dpaa2_eth_sgt_recycle(priv, sgt_buf); + + return err; +} + +bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch) +{ + struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs; + struct dpaa2_eth_drv_stats *percpu_extras; + struct rtnl_link_stats64 *percpu_stats; + int budget = DPAA2_ETH_TX_ZC_PER_NAPI; + int total_enqueued, enqueued; + int retries, max_retries; + struct dpaa2_eth_fq *fq; + struct dpaa2_fd *fds; + int batch, i, err; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + fds = (this_cpu_ptr(priv->fd))->array; + + /* Use the FQ with the same idx as the affine CPU */ + fq = &priv->fq[ch->nctx.desired_cpu]; + + batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); + if (!batch) + return false; + + /* Create a FD for each XSK frame to be sent */ + for (i = 0; i < batch; i++) { + err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]); + if (err) { + batch = i; + break; + } + + trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]); + } + + /* Enqueue all the created FDs */ + max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES; + total_enqueued = 0; + enqueued = 0; + retries = 0; + while (total_enqueued < batch && retries < max_retries) { + err = priv->enqueue(priv, fq, &fds[total_enqueued], 0, + batch - total_enqueued, &enqueued); + if (err == -EBUSY) { + retries++; + continue; + } + + total_enqueued += enqueued; + } + percpu_extras->tx_portal_busy += retries; + + /* Update statistics */ + percpu_stats->tx_packets += total_enqueued; + for (i = 0; i < total_enqueued; i++) + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); + for (i = total_enqueued; i < batch; i++) { + dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false); + percpu_stats->tx_errors++; + } + + xsk_tx_release(ch->xsk_pool); + + return total_enqueued == budget ? true : false; +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h index 828f538097af..be9492b8d5dc 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -13,10 +13,12 @@ #define DPNI_VER_MINOR 0 #define DPNI_CMD_BASE_VERSION 1 #define DPNI_CMD_2ND_VERSION 2 +#define DPNI_CMD_3RD_VERSION 3 #define DPNI_CMD_ID_OFFSET 4 #define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) #define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION) +#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION) #define DPNI_CMDID_OPEN DPNI_CMD(0x801) #define DPNI_CMDID_CLOSE DPNI_CMD(0x800) @@ -39,7 +41,7 @@ #define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) #define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) -#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200) #define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) #define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) @@ -115,14 +117,19 @@ struct dpni_cmd_open { }; #define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) + +struct dpni_cmd_pool { + __le16 dpbp_id; + u8 priority_mask; + u8 pad; +}; + struct dpni_cmd_set_pools { - /* cmd word 0 */ u8 num_dpbp; u8 backup_pool_mask; - __le16 pad; - /* cmd word 0..4 */ - __le32 dpbp_id[DPNI_MAX_DPBP]; - /* cmd word 4..6 */ + u8 pad; + u8 pool_options; + struct dpni_cmd_pool pool[DPNI_MAX_DPBP]; __le16 buffer_size[DPNI_MAX_DPBP]; }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index 6c3b36f20fb8..02601a283b59 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -173,8 +173,12 @@ int dpni_set_pools(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpni_cmd_set_pools *)cmd.params; cmd_params->num_dpbp = cfg->num_dpbp; + cmd_params->pool_options = cfg->pool_options; for (i = 0; i < DPNI_MAX_DPBP; i++) { - cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); + cmd_params->pool[i].dpbp_id = + cpu_to_le16(cfg->pools[i].dpbp_id); + cmd_params->pool[i].priority_mask = + cfg->pools[i].priority_mask; cmd_params->buffer_size[i] = cpu_to_le16(cfg->pools[i].buffer_size); cmd_params->backup_pool_mask |= diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index 6fffd519aa00..5c0a1d5ac934 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -92,19 +92,28 @@ int dpni_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +#define DPNI_POOL_ASSOC_QPRI 0 +#define DPNI_POOL_ASSOC_QDBIN 1 + /** * struct dpni_pools_cfg - Structure representing buffer pools configuration * @num_dpbp: Number of DPBPs + * @pool_options: Buffer assignment options. + * This field is a combination of DPNI_POOL_ASSOC_flags * @pools: Array of buffer pools parameters; The number of valid entries * must match 'num_dpbp' value * @pools.dpbp_id: DPBP object ID + * @pools.priority: Priority mask that indicates TC's used with this buffer. + * If set to 0x00 MC will assume value 0xff. * @pools.buffer_size: Buffer size * @pools.backup_pool: Backup pool */ struct dpni_pools_cfg { u8 num_dpbp; + u8 pool_options; struct { int dpbp_id; + u8 priority_mask; u16 buffer_size; int backup_pool; } pools[DPNI_MAX_DPBP]; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 33f84a30e167..476e3863a310 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -658,6 +658,8 @@ struct fec_enet_private { unsigned int reload_period; int pps_enable; unsigned int next_counter; + struct hrtimer perout_timer; + u64 perout_stime; struct imx_sc_ipc *ipc_handle; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 98d5cd313fdd..6986b74fb8af 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -365,16 +365,6 @@ static void swap_buffer(void *bufaddr, int len) swab32s(buf); } -static void swap_buffer2(void *dst_buf, void *src_buf, int len) -{ - int i; - unsigned int *src = src_buf; - unsigned int *dst = dst_buf; - - for (i = 0; i < len; i += 4, src++, dst++) - *dst = swab32p(src); -} - static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1494,53 +1484,6 @@ static void fec_enet_tx(struct net_device *ndev) fec_enet_tx_queue(ndev, i); } -static int __maybe_unused -fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - int off; - - off = ((unsigned long)skb->data) & fep->rx_align; - if (off) - skb_reserve(skb, fep->rx_align + 1 - off); - - bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); - if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { - if (net_ratelimit()) - netdev_err(ndev, "Rx DMA memory map failed\n"); - return -ENOMEM; - } - - return 0; -} - -static bool __maybe_unused -fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length, bool swap) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct sk_buff *new_skb; - - if (length > fep->rx_copybreak) - return false; - - new_skb = netdev_alloc_skb(ndev, length); - if (!new_skb) - return false; - - dma_sync_single_for_cpu(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - if (!swap) - memcpy(new_skb->data, (*skb)->data, length); - else - swap_buffer2(new_skb->data, (*skb)->data, length); - *skb = new_skb; - - return true; -} - static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, struct bufdesc *bdp, int index) { diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index cffd9ad499dd..67aa694a62ec 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -88,6 +88,9 @@ #define FEC_CHANNLE_0 0 #define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 +#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL +#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL + /** * fec_ptp_enable_pps * @fep: the fec_enet_private structure handle @@ -198,6 +201,78 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) return 0; } +static int fec_ptp_pps_perout(struct fec_enet_private *fep) +{ + u32 compare_val, ptp_hc, temp_val; + u64 curr_time; + unsigned long flags; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + /* Update time counter */ + timecounter_read(&fep->tc); + + /* Get the current ptp hardware time counter */ + temp_val = readl(fep->hwp + FEC_ATIME_CTRL); + temp_val |= FEC_T_CTRL_CAPTURE; + writel(temp_val, fep->hwp + FEC_ATIME_CTRL); + if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) + udelay(1); + + ptp_hc = readl(fep->hwp + FEC_ATIME); + + /* Convert the ptp local counter to 1588 timestamp */ + curr_time = timecounter_cyc2time(&fep->tc, ptp_hc); + + /* If the pps start time less than current time add 100ms, just return. + * Because the software might not able to set the comparison time into + * the FEC_TCCR register in time and missed the start time. + */ + if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) { + dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n"); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + return -1; + } + + compare_val = fep->perout_stime - curr_time + ptp_hc; + compare_val &= fep->cc.mask; + + writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel)); + fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask; + + /* Enable compare event when overflow */ + temp_val = readl(fep->hwp + FEC_ATIME_CTRL); + temp_val |= FEC_T_CTRL_PINPER; + writel(temp_val, fep->hwp + FEC_ATIME_CTRL); + + /* Compare channel setting. */ + temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); + temp_val &= ~(1 << FEC_T_TDRE_OFFSET); + temp_val &= ~(FEC_T_TMODE_MASK); + temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET); + writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* Write the second compare event timestamp and calculate + * the third timestamp. Refer the TCCR register detail in the spec. + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); + fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer) +{ + struct fec_enet_private *fep = container_of(timer, + struct fec_enet_private, perout_timer); + + fec_ptp_pps_perout(fep); + + return HRTIMER_NORESTART; +} + /** * fec_ptp_read - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure @@ -425,6 +500,17 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, return 0; } +static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel) +{ + unsigned long flags; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + writel(0, fep->hwp + FEC_TCSR(channel)); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + /** * fec_ptp_enable * @ptp: the ptp clock structure @@ -437,14 +523,84 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, { struct fec_enet_private *fep = container_of(ptp, struct fec_enet_private, ptp_caps); + ktime_t timeout; + struct timespec64 start_time, period; + u64 curr_time, delta, period_ns; + unsigned long flags; int ret = 0; if (rq->type == PTP_CLK_REQ_PPS) { ret = fec_ptp_enable_pps(fep, on); return ret; + } else if (rq->type == PTP_CLK_REQ_PEROUT) { + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (rq->perout.index != DEFAULT_PPS_CHANNEL) + return -EOPNOTSUPP; + + fep->pps_channel = DEFAULT_PPS_CHANNEL; + period.tv_sec = rq->perout.period.sec; + period.tv_nsec = rq->perout.period.nsec; + period_ns = timespec64_to_ns(&period); + + /* FEC PTP timer only has 31 bits, so if the period exceed + * 4s is not supported. + */ + if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) { + dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n"); + return -EOPNOTSUPP; + } + + fep->reload_period = div_u64(period_ns, 2); + if (on && fep->reload_period) { + /* Convert 1588 timestamp to ns*/ + start_time.tv_sec = rq->perout.start.sec; + start_time.tv_nsec = rq->perout.start.nsec; + fep->perout_stime = timespec64_to_ns(&start_time); + + mutex_lock(&fep->ptp_clk_mutex); + if (!fep->ptp_clk_on) { + dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n"); + mutex_unlock(&fep->ptp_clk_mutex); + return -EOPNOTSUPP; + } + spin_lock_irqsave(&fep->tmreg_lock, flags); + /* Read current timestamp */ + curr_time = timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); + + /* Calculate time difference */ + delta = fep->perout_stime - curr_time; + + if (fep->perout_stime <= curr_time) { + dev_err(&fep->pdev->dev, "Start time must larger than current time!\n"); + return -EINVAL; + } + + /* Because the timer counter of FEC only has 31-bits, correspondingly, + * the time comparison register FEC_TCCR also only low 31 bits can be + * set. If the start time of pps signal exceeds current time more than + * 0x80000000 ns, a software timer is used and the timer expires about + * 1 second before the start time to be able to set FEC_TCCR. + */ + if (delta > FEC_PTP_MAX_NSEC_COUNTER) { + timeout = ns_to_ktime(delta - NSEC_PER_SEC); + hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL); + } else { + return fec_ptp_pps_perout(fep); + } + } else { + fec_ptp_pps_disable(fep, fep->pps_channel); + } + + return 0; + } else { + return -EOPNOTSUPP; } - return -EOPNOTSUPP; } /** @@ -583,7 +739,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; fep->ptp_caps.n_ext_ts = 0; - fep->ptp_caps.n_per_out = 0; + fep->ptp_caps.n_per_out = 1; fep->ptp_caps.n_pins = 0; fep->ptp_caps.pps = 1; fep->ptp_caps.adjfreq = fec_ptp_adjfreq; @@ -605,6 +761,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); + hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL); + fep->perout_timer.function = fec_ptp_pps_perout_handler; + irq = platform_get_irq_byname_optional(pdev, "pps"); if (irq < 0) irq = platform_get_irq_optional(pdev, irq_idx); @@ -634,6 +793,7 @@ void fec_ptp_stop(struct platform_device *pdev) struct fec_enet_private *fep = netdev_priv(ndev); cancel_delayed_work_sync(&fep->time_keep); + hrtimer_cancel(&fep->perout_timer); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); } diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index 48bf8088795d..e76a3d262b2b 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -3,7 +3,9 @@ config FSL_FMAN tristate "FMan support" depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST select GENERIC_ALLOCATOR - select PHYLIB + select PHYLINK + select PCS + select PCS_LYNX select CRC32 default n help diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 6617932fd3fd..3c87820ca202 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -17,6 +17,7 @@ #include <linux/crc32.h> #include <linux/of_mdio.h> #include <linux/mii.h> +#include <linux/netdevice.h> /* TBI register addresses */ #define MII_TBICON 0x11 @@ -29,9 +30,6 @@ #define TBICON_CLK_SELECT 0x0020 /* Clock select */ #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */ -#define TBIANA_SGMII 0x4001 -#define TBIANA_1000X 0x01a0 - /* Interrupt Mask Register (IMASK) */ #define DTSEC_IMASK_BREN 0x80000000 #define DTSEC_IMASK_RXCEN 0x40000000 @@ -92,9 +90,10 @@ #define DTSEC_ECNTRL_GMIIM 0x00000040 #define DTSEC_ECNTRL_TBIM 0x00000020 -#define DTSEC_ECNTRL_SGMIIM 0x00000002 #define DTSEC_ECNTRL_RPM 0x00000010 #define DTSEC_ECNTRL_R100M 0x00000008 +#define DTSEC_ECNTRL_RMM 0x00000004 +#define DTSEC_ECNTRL_SGMIIM 0x00000002 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 #define TCTRL_TTSE 0x00000040 @@ -318,7 +317,8 @@ struct fman_mac { void *fm; struct fman_rev_info fm_rev_info; bool basex_if; - struct phy_device *tbiphy; + struct mdio_device *tbidev; + struct phylink_pcs pcs; }; static void set_dflts(struct dtsec_cfg *cfg) @@ -356,56 +356,14 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, phy_interface_t iface, u16 iface_speed, u64 addr, u32 exception_mask, u8 tbi_addr) { - bool is_rgmii, is_sgmii, is_qsgmii; enet_addr_t eth_addr; - u32 tmp; + u32 tmp = 0; int i; /* Soft reset */ iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1); iowrite32be(0, ®s->maccfg1); - /* dtsec_id2 */ - tmp = ioread32be(®s->tsec_id2); - - /* check RGMII support */ - if (iface == PHY_INTERFACE_MODE_RGMII || - iface == PHY_INTERFACE_MODE_RGMII_ID || - iface == PHY_INTERFACE_MODE_RGMII_RXID || - iface == PHY_INTERFACE_MODE_RGMII_TXID || - iface == PHY_INTERFACE_MODE_RMII) - if (tmp & DTSEC_ID2_INT_REDUCED_OFF) - return -EINVAL; - - if (iface == PHY_INTERFACE_MODE_SGMII || - iface == PHY_INTERFACE_MODE_MII) - if (tmp & DTSEC_ID2_INT_REDUCED_OFF) - return -EINVAL; - - is_rgmii = iface == PHY_INTERFACE_MODE_RGMII || - iface == PHY_INTERFACE_MODE_RGMII_ID || - iface == PHY_INTERFACE_MODE_RGMII_RXID || - iface == PHY_INTERFACE_MODE_RGMII_TXID; - is_sgmii = iface == PHY_INTERFACE_MODE_SGMII; - is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII; - - tmp = 0; - if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII) - tmp |= DTSEC_ECNTRL_GMIIM; - if (is_sgmii) - tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM); - if (is_qsgmii) - tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM | - DTSEC_ECNTRL_QSGMIIM); - if (is_rgmii) - tmp |= DTSEC_ECNTRL_RPM; - if (iface_speed == SPEED_100) - tmp |= DTSEC_ECNTRL_R100M; - - iowrite32be(tmp, ®s->ecntrl); - - tmp = 0; - if (cfg->tx_pause_time) tmp |= cfg->tx_pause_time; if (cfg->tx_pause_time_extd) @@ -446,17 +404,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, tmp = 0; - if (iface_speed < SPEED_1000) - tmp |= MACCFG2_NIBBLE_MODE; - else if (iface_speed == SPEED_1000) - tmp |= MACCFG2_BYTE_MODE; - tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) & MACCFG2_PREAMBLE_LENGTH_MASK; if (cfg->tx_pad_crc) tmp |= MACCFG2_PAD_CRC_EN; - /* Full Duplex */ - tmp |= MACCFG2_FULL_DUPLEX; iowrite32be(tmp, ®s->maccfg2); tmp = (((cfg->non_back_to_back_ipg1 << @@ -525,10 +476,6 @@ static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, static int check_init_parameters(struct fman_mac *dtsec) { - if (dtsec->max_speed >= SPEED_10000) { - pr_err("1G MAC driver supports 1G or lower speeds\n"); - return -EINVAL; - } if ((dtsec->dtsec_drv_param)->rx_prepend > MAX_PACKET_ALIGNMENT) { pr_err("packetAlignmentPadding can't be > than %d\n", @@ -630,22 +577,10 @@ static int get_exception_flag(enum fman_mac_exceptions exception) return bit_mask; } -static bool is_init_done(struct dtsec_cfg *dtsec_drv_params) -{ - /* Checks if dTSEC driver parameters were initialized */ - if (!dtsec_drv_params) - return true; - - return false; -} - static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec) { struct dtsec_regs __iomem *regs = dtsec->regs; - if (is_init_done(dtsec->dtsec_drv_param)) - return 0; - return (u16)ioread32be(®s->maxfrm); } @@ -682,6 +617,7 @@ static void dtsec_isr(void *handle) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT); if (event & DTSEC_IMASK_XFUNEN) { /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */ + /* FIXME: This races with the rest of the driver! */ if (dtsec->fm_rev_info.major == 2) { u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i; /* a. Write 0x00E0_0C00 to DTSEC_ID @@ -814,6 +750,43 @@ static void free_init_resources(struct fman_mac *dtsec) dtsec->unicast_addr_hash = NULL; } +static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct fman_mac, pcs); +} + +static void dtsec_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct fman_mac *dtsec = pcs_to_dtsec(pcs); + + phylink_mii_c22_pcs_get_state(dtsec->tbidev, state); +} + +static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct fman_mac *dtsec = pcs_to_dtsec(pcs); + + return phylink_mii_c22_pcs_config(dtsec->tbidev, mode, interface, + advertising); +} + +static void dtsec_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct fman_mac *dtsec = pcs_to_dtsec(pcs); + + phylink_mii_c22_pcs_an_restart(dtsec->tbidev); +} + +static const struct phylink_pcs_ops dtsec_pcs_ops = { + .pcs_get_state = dtsec_pcs_get_state, + .pcs_config = dtsec_pcs_config, + .pcs_an_restart = dtsec_pcs_an_restart, +}; + static void graceful_start(struct fman_mac *dtsec) { struct dtsec_regs __iomem *regs = dtsec->regs; @@ -854,36 +827,11 @@ static void graceful_stop(struct fman_mac *dtsec) static int dtsec_enable(struct fman_mac *dtsec) { - struct dtsec_regs __iomem *regs = dtsec->regs; - u32 tmp; - - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - /* Enable */ - tmp = ioread32be(®s->maccfg1); - tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN; - iowrite32be(tmp, ®s->maccfg1); - - /* Graceful start - clear the graceful Rx/Tx stop bit */ - graceful_start(dtsec); - return 0; } static void dtsec_disable(struct fman_mac *dtsec) { - struct dtsec_regs __iomem *regs = dtsec->regs; - u32 tmp; - - WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param)); - - /* Graceful stop - Assert the graceful Rx/Tx stop bit */ - graceful_stop(dtsec); - - tmp = ioread32be(®s->maccfg1); - tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); - iowrite32be(tmp, ®s->maccfg1); } static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, @@ -894,11 +842,6 @@ static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, struct dtsec_regs __iomem *regs = dtsec->regs; u32 ptv = 0; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - graceful_stop(dtsec); - if (pause_time) { /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) { @@ -919,8 +862,6 @@ static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW, ®s->maccfg1); - graceful_start(dtsec); - return 0; } @@ -929,11 +870,6 @@ static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) struct dtsec_regs __iomem *regs = dtsec->regs; u32 tmp; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - graceful_stop(dtsec); - tmp = ioread32be(®s->maccfg1); if (en) tmp |= MACCFG1_RX_FLOW; @@ -941,17 +877,125 @@ static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) tmp &= ~MACCFG1_RX_FLOW; iowrite32be(tmp, ®s->maccfg1); + return 0; +} + +static struct phylink_pcs *dtsec_select_pcs(struct phylink_config *config, + phy_interface_t iface) +{ + struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac; + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return &dtsec->pcs; + default: + return NULL; + } +} + +static void dtsec_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct dtsec_regs __iomem *regs = mac_dev->fman_mac->regs; + u32 tmp; + + switch (state->interface) { + case PHY_INTERFACE_MODE_RMII: + tmp = DTSEC_ECNTRL_RMM; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM; + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM; + break; + default: + dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n", + phy_modes(state->interface)); + return; + } + + iowrite32be(tmp, ®s->ecntrl); +} + +static void dtsec_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct fman_mac *dtsec = mac_dev->fman_mac; + struct dtsec_regs __iomem *regs = dtsec->regs; + u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE; + u32 tmp; + + dtsec_set_tx_pause_frames(dtsec, 0, pause_time, 0); + dtsec_accept_rx_pause_frames(dtsec, rx_pause); + + tmp = ioread32be(®s->ecntrl); + if (speed == SPEED_100) + tmp |= DTSEC_ECNTRL_R100M; + else + tmp &= ~DTSEC_ECNTRL_R100M; + iowrite32be(tmp, ®s->ecntrl); + + tmp = ioread32be(®s->maccfg2); + tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE | MACCFG2_FULL_DUPLEX); + if (speed >= SPEED_1000) + tmp |= MACCFG2_BYTE_MODE; + else + tmp |= MACCFG2_NIBBLE_MODE; + + if (duplex == DUPLEX_FULL) + tmp |= MACCFG2_FULL_DUPLEX; + + iowrite32be(tmp, ®s->maccfg2); + + mac_dev->update_speed(mac_dev, speed); + + /* Enable */ + tmp = ioread32be(®s->maccfg1); + tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN; + iowrite32be(tmp, ®s->maccfg1); + + /* Graceful start - clear the graceful Rx/Tx stop bit */ graceful_start(dtsec); +} - return 0; +static void dtsec_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac; + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + /* Graceful stop - Assert the graceful Rx/Tx stop bit */ + graceful_stop(dtsec); + + tmp = ioread32be(®s->maccfg1); + tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); + iowrite32be(tmp, ®s->maccfg1); } +static const struct phylink_mac_ops dtsec_mac_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = dtsec_select_pcs, + .mac_config = dtsec_mac_config, + .mac_link_up = dtsec_link_up, + .mac_link_down = dtsec_link_down, +}; + static int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr) { - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - graceful_stop(dtsec); /* Initialize MAC Station Address registers (1 & 2) @@ -975,9 +1019,6 @@ static int dtsec_add_hash_mac_address(struct fman_mac *dtsec, u32 crc = 0xFFFFFFFF; bool mcast, ghtx; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); @@ -1037,9 +1078,6 @@ static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable) u32 tmp; struct dtsec_regs __iomem *regs = dtsec->regs; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->rctrl); if (enable) tmp |= RCTRL_MPROM; @@ -1056,9 +1094,6 @@ static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable) struct dtsec_regs __iomem *regs = dtsec->regs; u32 rctrl, tctrl; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - rctrl = ioread32be(®s->rctrl); tctrl = ioread32be(®s->tctrl); @@ -1087,9 +1122,6 @@ static int dtsec_del_hash_mac_address(struct fman_mac *dtsec, u32 crc = 0xFFFFFFFF; bool mcast, ghtx; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); @@ -1153,9 +1185,6 @@ static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) struct dtsec_regs __iomem *regs = dtsec->regs; u32 tmp; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - /* Set unicast promiscuous */ tmp = ioread32be(®s->rctrl); if (new_val) @@ -1177,90 +1206,12 @@ static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) return 0; } -static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) -{ - struct dtsec_regs __iomem *regs = dtsec->regs; - u32 tmp; - - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - graceful_stop(dtsec); - - tmp = ioread32be(®s->maccfg2); - - /* Full Duplex */ - tmp |= MACCFG2_FULL_DUPLEX; - - tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE); - if (speed < SPEED_1000) - tmp |= MACCFG2_NIBBLE_MODE; - else if (speed == SPEED_1000) - tmp |= MACCFG2_BYTE_MODE; - iowrite32be(tmp, ®s->maccfg2); - - tmp = ioread32be(®s->ecntrl); - if (speed == SPEED_100) - tmp |= DTSEC_ECNTRL_R100M; - else - tmp &= ~DTSEC_ECNTRL_R100M; - iowrite32be(tmp, ®s->ecntrl); - - graceful_start(dtsec); - - return 0; -} - -static int dtsec_restart_autoneg(struct fman_mac *dtsec) -{ - u16 tmp_reg16; - - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR); - - tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000); - tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART | - BMCR_FULLDPLX | BMCR_SPEED1000); - - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); - - return 0; -} - -static void adjust_link_dtsec(struct mac_device *mac_dev) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - struct fman_mac *fman_mac; - bool rx_pause, tx_pause; - int err; - - fman_mac = mac_dev->fman_mac; - if (!phy_dev->link) { - dtsec_restart_autoneg(fman_mac); - - return; - } - - dtsec_adjust_link(fman_mac, phy_dev->speed); - mac_dev->update_speed(mac_dev, phy_dev->speed); - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); - if (err < 0) - dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n", - err); -} - static int dtsec_set_exception(struct fman_mac *dtsec, enum fman_mac_exceptions exception, bool enable) { struct dtsec_regs __iomem *regs = dtsec->regs; u32 bit_mask = 0; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) { bit_mask = get_exception_flag(exception); if (bit_mask) { @@ -1310,12 +1261,9 @@ static int dtsec_init(struct fman_mac *dtsec) { struct dtsec_regs __iomem *regs = dtsec->regs; struct dtsec_cfg *dtsec_drv_param; - u16 max_frm_ln; + u16 max_frm_ln, tbicon; int err; - if (is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - if (DEFAULT_RESET_ON_INIT && (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) { pr_err("Can't reset MAC!\n"); @@ -1330,38 +1278,19 @@ static int dtsec_init(struct fman_mac *dtsec) err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if, dtsec->max_speed, dtsec->addr, dtsec->exceptions, - dtsec->tbiphy->mdio.addr); + dtsec->tbidev->addr); if (err) { free_init_resources(dtsec); pr_err("DTSEC version doesn't support this i/f mode\n"); return err; } - if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) { - u16 tmp_reg16; - - /* Configure the TBI PHY Control Register */ - tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET; - phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); + /* Configure the TBI PHY Control Register */ + tbicon = TBICON_CLK_SELECT | TBICON_SOFT_RESET; + mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon); - tmp_reg16 = TBICON_CLK_SELECT; - phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); - - tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE | - BMCR_FULLDPLX | BMCR_SPEED1000); - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); - - if (dtsec->basex_if) - tmp_reg16 = TBIANA_1000X; - else - tmp_reg16 = TBIANA_SGMII; - phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16); - - tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART | - BMCR_FULLDPLX | BMCR_SPEED1000); - - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); - } + tbicon = TBICON_CLK_SELECT; + mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon); /* Max Frame Length */ max_frm_ln = (u16)ioread32be(®s->maxfrm); @@ -1406,6 +1335,8 @@ static int dtsec_free(struct fman_mac *dtsec) kfree(dtsec->dtsec_drv_param); dtsec->dtsec_drv_param = NULL; + if (!IS_ERR_OR_NULL(dtsec->tbidev)) + put_device(&dtsec->tbidev->dev); kfree(dtsec); return 0; @@ -1434,7 +1365,6 @@ static struct fman_mac *dtsec_config(struct mac_device *mac_dev, dtsec->regs = mac_dev->vaddr; dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr); - dtsec->max_speed = params->max_speed; dtsec->phy_if = mac_dev->phy_if; dtsec->mac_id = params->mac_id; dtsec->exceptions = (DTSEC_IMASK_BREN | @@ -1457,7 +1387,6 @@ static struct fman_mac *dtsec_config(struct mac_device *mac_dev, dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en; dtsec->fm = params->fm; - dtsec->basex_if = params->basex_if; /* Save FMan revision */ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info); @@ -1476,18 +1405,18 @@ int dtsec_initialization(struct mac_device *mac_dev, int err; struct fman_mac *dtsec; struct device_node *phy_node; + unsigned long capabilities; + unsigned long *supported; + mac_dev->phylink_ops = &dtsec_mac_ops; mac_dev->set_promisc = dtsec_set_promiscuous; mac_dev->change_addr = dtsec_modify_mac_address; mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address; mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address; - mac_dev->set_tx_pause = dtsec_set_tx_pause_frames; - mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; mac_dev->set_tstamp = dtsec_set_tstamp; mac_dev->set_multi = fman_set_multi; - mac_dev->adjust_link = adjust_link_dtsec; mac_dev->enable = dtsec_enable; mac_dev->disable = dtsec_disable; @@ -1502,19 +1431,56 @@ int dtsec_initialization(struct mac_device *mac_dev, dtsec->dtsec_drv_param->tx_pad_crc = true; phy_node = of_parse_phandle(mac_node, "tbi-handle", 0); - if (!phy_node) { - pr_err("TBI PHY node is not available\n"); + if (!phy_node || of_device_is_available(phy_node)) { + of_node_put(phy_node); err = -EINVAL; + dev_err_probe(mac_dev->dev, err, + "TBI PCS node is not available\n"); goto _return_fm_mac_free; } - dtsec->tbiphy = of_phy_find_device(phy_node); - if (!dtsec->tbiphy) { - pr_err("of_phy_find_device (TBI PHY) failed\n"); - err = -EINVAL; + dtsec->tbidev = of_mdio_find_device(phy_node); + of_node_put(phy_node); + if (!dtsec->tbidev) { + err = -EPROBE_DEFER; + dev_err_probe(mac_dev->dev, err, + "could not find mdiodev for PCS\n"); goto _return_fm_mac_free; } - put_device(&dtsec->tbiphy->mdio.dev); + dtsec->pcs.ops = &dtsec_pcs_ops; + dtsec->pcs.poll = true; + + supported = mac_dev->phylink_config.supported_interfaces; + + /* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are + * supported? If not, we can determine support via the phy if SerDes + * support is added. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII || + mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) { + __set_bit(PHY_INTERFACE_MODE_SGMII, supported); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported); + } else if (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) { + __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported); + } + + if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) { + phy_interface_set_rgmii(supported); + + /* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports + * RMII and RGMII. However, the only SoCs which support RMII + * are the P1017 and P1023. Avoid advertising this mode on + * other SoCs. This is a bit of a moot point, since there's no + * in-tree support for ethernet on these platforms... + */ + if (of_machine_is_compatible("fsl,P1023") || + of_machine_is_compatible("fsl,P1023RDB")) + __set_bit(PHY_INTERFACE_MODE_RMII, supported); + } + + capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + capabilities |= MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + mac_dev->phylink_config.mac_capabilities = capabilities; err = dtsec_init(dtsec); if (err < 0) diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index 65887a3160d7..e5d6cddea731 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -170,20 +170,10 @@ struct fman_mac_params { * 0 - FM_MAX_NUM_OF_10G_MACS */ u8 mac_id; - /* Note that the speed should indicate the maximum rate that - * this MAC should support rather than the actual speed; - */ - u16 max_speed; /* A handle to the FM object this port related to */ void *fm; fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */ fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */ - /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC - * and phy or backplane; Note: 1000BaseX auto-negotiation relates only - * to interface between MAC and phy/backplane, SGMII phy can still - * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps - */ - bool basex_if; }; struct eth_hash_t { diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 32d26cf17843..9349f841bd06 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -11,42 +11,12 @@ #include <linux/slab.h> #include <linux/io.h> +#include <linux/pcs-lynx.h> #include <linux/phy.h> #include <linux/phy_fixed.h> +#include <linux/phy/phy.h> #include <linux/of_mdio.h> -/* PCS registers */ -#define MDIO_SGMII_CR 0x00 -#define MDIO_SGMII_DEV_ABIL_SGMII 0x04 -#define MDIO_SGMII_LINK_TMR_L 0x12 -#define MDIO_SGMII_LINK_TMR_H 0x13 -#define MDIO_SGMII_IF_MODE 0x14 - -/* SGMII Control defines */ -#define SGMII_CR_AN_EN 0x1000 -#define SGMII_CR_RESTART_AN 0x0200 -#define SGMII_CR_FD 0x0100 -#define SGMII_CR_SPEED_SEL1_1G 0x0040 -#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \ - SGMII_CR_SPEED_SEL1_1G) - -/* SGMII Device Ability for SGMII defines */ -#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001 -#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0 - -/* Link timer define */ -#define LINK_TMR_L 0xa120 -#define LINK_TMR_H 0x0007 -#define LINK_TMR_L_BASEX 0xaf08 -#define LINK_TMR_H_BASEX 0x002f - -/* SGMII IF Mode defines */ -#define IF_MODE_USE_SGMII_AN 0x0002 -#define IF_MODE_SGMII_EN 0x0001 -#define IF_MODE_SGMII_SPEED_100M 0x0004 -#define IF_MODE_SGMII_SPEED_1G 0x0008 -#define IF_MODE_SGMII_DUPLEX_HALF 0x0010 - /* Num of additional exact match MAC adr regs */ #define MEMAC_NUM_OF_PADDRS 7 @@ -308,9 +278,6 @@ struct fman_mac { struct memac_regs __iomem *regs; /* MAC address of device */ u64 addr; - /* Ethernet physical interface */ - phy_interface_t phy_if; - u16 max_speed; struct mac_device *dev_id; /* device cookie used by the exception cbs */ fman_mac_exception_cb *exception_cb; fman_mac_exception_cb *event_cb; @@ -323,9 +290,12 @@ struct fman_mac { struct memac_cfg *memac_drv_param; void *fm; struct fman_rev_info fm_rev_info; - bool basex_if; - struct phy_device *pcsphy; + struct phy *serdes; + struct phylink_pcs *sgmii_pcs; + struct phylink_pcs *qsgmii_pcs; + struct phylink_pcs *xfi_pcs; bool allmulti_enabled; + bool rgmii_no_half_duplex; }; static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr, @@ -383,7 +353,6 @@ static void set_exception(struct memac_regs __iomem *regs, u32 val, } static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, - phy_interface_t phy_if, u16 speed, bool slow_10g_if, u32 exceptions) { u32 tmp; @@ -411,41 +380,6 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, iowrite32be((u32)cfg->pause_quanta, ®s->pause_quanta[0]); iowrite32be((u32)0, ®s->pause_thresh[0]); - /* IF_MODE */ - tmp = 0; - switch (phy_if) { - case PHY_INTERFACE_MODE_XGMII: - tmp |= IF_MODE_10G; - break; - case PHY_INTERFACE_MODE_MII: - tmp |= IF_MODE_MII; - break; - default: - tmp |= IF_MODE_GMII; - if (phy_if == PHY_INTERFACE_MODE_RGMII || - phy_if == PHY_INTERFACE_MODE_RGMII_ID || - phy_if == PHY_INTERFACE_MODE_RGMII_RXID || - phy_if == PHY_INTERFACE_MODE_RGMII_TXID) - tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO; - } - iowrite32be(tmp, ®s->if_mode); - - /* TX_FIFO_SECTIONS */ - tmp = 0; - if (phy_if == PHY_INTERFACE_MODE_XGMII) { - if (slow_10g_if) { - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G | - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); - } else { - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G | - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); - } - } else { - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G | - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G); - } - iowrite32be(tmp, ®s->tx_fifo_sections); - /* clear all pending events and set-up interrupts */ iowrite32be(0xffffffff, ®s->ievent); set_exception(regs, exceptions, true); @@ -485,93 +419,6 @@ static u32 get_mac_addr_hash_code(u64 eth_addr) return xor_val; } -static void setup_sgmii_internal_phy(struct fman_mac *memac, - struct fixed_phy_status *fixed_link) -{ - u16 tmp_reg16; - - if (WARN_ON(!memac->pcsphy)) - return; - - /* SGMII mode */ - tmp_reg16 = IF_MODE_SGMII_EN; - if (!fixed_link) - /* AN enable */ - tmp_reg16 |= IF_MODE_USE_SGMII_AN; - else { - switch (fixed_link->speed) { - case 10: - /* For 10M: IF_MODE[SPEED_10M] = 0 */ - break; - case 100: - tmp_reg16 |= IF_MODE_SGMII_SPEED_100M; - break; - case 1000: - default: - tmp_reg16 |= IF_MODE_SGMII_SPEED_1G; - break; - } - if (!fixed_link->duplex) - tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF; - } - phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16); - - /* Device ability according to SGMII specification */ - tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE; - phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); - - /* Adjust link timer for SGMII - - * According to Cisco SGMII specification the timer should be 1.6 ms. - * The link_timer register is configured in units of the clock. - * - When running as 1G SGMII, Serdes clock is 125 MHz, so - * unit = 1 / (125*10^6 Hz) = 8 ns. - * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40 - * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so - * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. - * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120. - * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, - * we always set up here a value of 2.5 SGMII. - */ - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H); - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L); - - if (!fixed_link) - /* Restart AN */ - tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; - else - /* AN disabled */ - tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN; - phy_write(memac->pcsphy, 0x0, tmp_reg16); -} - -static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac) -{ - u16 tmp_reg16; - - /* AN Device capability */ - tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE; - phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); - - /* Adjust link timer for SGMII - - * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms. - * The link_timer register is configured in units of the clock. - * - When running as 1G SGMII, Serdes clock is 125 MHz, so - * unit = 1 / (125*10^6 Hz) = 8 ns. - * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0 - * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so - * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. - * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08. - * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, - * we always set up here a value of 2.5 SGMII. - */ - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX); - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX); - - /* Restart AN */ - tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; - phy_write(memac->pcsphy, 0x0, tmp_reg16); -} - static int check_init_parameters(struct fman_mac *memac) { if (!memac->exception_cb) { @@ -677,41 +524,31 @@ static void free_init_resources(struct fman_mac *memac) memac->unicast_addr_hash = NULL; } -static bool is_init_done(struct memac_cfg *memac_drv_params) -{ - /* Checks if mEMAC driver parameters were initialized */ - if (!memac_drv_params) - return true; - - return false; -} - static int memac_enable(struct fman_mac *memac) { - struct memac_regs __iomem *regs = memac->regs; - u32 tmp; + int ret; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; + ret = phy_init(memac->serdes); + if (ret) { + dev_err(memac->dev_id->dev, + "could not initialize serdes: %pe\n", ERR_PTR(ret)); + return ret; + } - tmp = ioread32be(®s->command_config); - tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; - iowrite32be(tmp, ®s->command_config); + ret = phy_power_on(memac->serdes); + if (ret) { + dev_err(memac->dev_id->dev, + "could not power on serdes: %pe\n", ERR_PTR(ret)); + phy_exit(memac->serdes); + } - return 0; + return ret; } static void memac_disable(struct fman_mac *memac) - { - struct memac_regs __iomem *regs = memac->regs; - u32 tmp; - - WARN_ON_ONCE(!is_init_done(memac->memac_drv_param)); - - tmp = ioread32be(®s->command_config); - tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); - iowrite32be(tmp, ®s->command_config); + phy_power_off(memac->serdes); + phy_exit(memac->serdes); } static int memac_set_promiscuous(struct fman_mac *memac, bool new_val) @@ -719,9 +556,6 @@ static int memac_set_promiscuous(struct fman_mac *memac, bool new_val) struct memac_regs __iomem *regs = memac->regs; u32 tmp; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (new_val) tmp |= CMD_CFG_PROMIS_EN; @@ -733,73 +567,12 @@ static int memac_set_promiscuous(struct fman_mac *memac, bool new_val) return 0; } -static int memac_adjust_link(struct fman_mac *memac, u16 speed) -{ - struct memac_regs __iomem *regs = memac->regs; - u32 tmp; - - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - - tmp = ioread32be(®s->if_mode); - - /* Set full duplex */ - tmp &= ~IF_MODE_HD; - - if (phy_interface_mode_is_rgmii(memac->phy_if)) { - /* Configure RGMII in manual mode */ - tmp &= ~IF_MODE_RGMII_AUTO; - tmp &= ~IF_MODE_RGMII_SP_MASK; - /* Full duplex */ - tmp |= IF_MODE_RGMII_FD; - - switch (speed) { - case SPEED_1000: - tmp |= IF_MODE_RGMII_1000; - break; - case SPEED_100: - tmp |= IF_MODE_RGMII_100; - break; - case SPEED_10: - tmp |= IF_MODE_RGMII_10; - break; - default: - break; - } - } - - iowrite32be(tmp, ®s->if_mode); - - return 0; -} - -static void adjust_link_memac(struct mac_device *mac_dev) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - struct fman_mac *fman_mac; - bool rx_pause, tx_pause; - int err; - - fman_mac = mac_dev->fman_mac; - memac_adjust_link(fman_mac, phy_dev->speed); - mac_dev->update_speed(mac_dev, phy_dev->speed); - - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); - if (err < 0) - dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n", - err); -} - static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, u16 pause_time, u16 thresh_time) { struct memac_regs __iomem *regs = memac->regs; u32 tmp; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->tx_fifo_sections); GET_TX_EMPTY_DEFAULT_VALUE(tmp); @@ -834,9 +607,6 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) struct memac_regs __iomem *regs = memac->regs; u32 tmp; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (en) tmp &= ~CMD_CFG_PAUSE_IGNORE; @@ -848,12 +618,175 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) return 0; } +static void memac_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + unsigned long caps = config->mac_capabilities; + + if (phy_interface_mode_is_rgmii(state->interface) && + memac->rgmii_no_half_duplex) + caps &= ~(MAC_10HD | MAC_100HD); + + phylink_validate_mask_caps(supported, state, caps); +} + +/** + * memac_if_mode() - Convert an interface mode into an IF_MODE config + * @interface: A phy interface mode + * + * Return: A configuration word, suitable for programming into the lower bits + * of %IF_MODE. + */ +static u32 memac_if_mode(phy_interface_t interface) +{ + switch (interface) { + case PHY_INTERFACE_MODE_MII: + return IF_MODE_MII; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + return IF_MODE_GMII | IF_MODE_RGMII; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_QSGMII: + return IF_MODE_GMII; + case PHY_INTERFACE_MODE_10GBASER: + return IF_MODE_10G; + default: + WARN_ON_ONCE(1); + return 0; + } +} + +static struct phylink_pcs *memac_select_pcs(struct phylink_config *config, + phy_interface_t iface) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + return memac->sgmii_pcs; + case PHY_INTERFACE_MODE_QSGMII: + return memac->qsgmii_pcs; + case PHY_INTERFACE_MODE_10GBASER: + return memac->xfi_pcs; + default: + return NULL; + } +} + +static int memac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t iface) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_10GBASER: + return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET, + iface); + default: + return 0; + } +} + +static void memac_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct memac_regs __iomem *regs = mac_dev->fman_mac->regs; + u32 tmp = ioread32be(®s->if_mode); + + tmp &= ~(IF_MODE_MASK | IF_MODE_RGMII); + tmp |= memac_if_mode(state->interface); + if (phylink_autoneg_inband(mode)) + tmp |= IF_MODE_RGMII_AUTO; + iowrite32be(tmp, ®s->if_mode); +} + +static void memac_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct fman_mac *memac = mac_dev->fman_mac; + struct memac_regs __iomem *regs = memac->regs; + u32 tmp = memac_if_mode(interface); + u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE; + + memac_set_tx_pause_frames(memac, 0, pause_time, 0); + memac_accept_rx_pause_frames(memac, rx_pause); + + if (duplex == DUPLEX_HALF) + tmp |= IF_MODE_HD; + + switch (speed) { + case SPEED_1000: + tmp |= IF_MODE_RGMII_1000; + break; + case SPEED_100: + tmp |= IF_MODE_RGMII_100; + break; + case SPEED_10: + tmp |= IF_MODE_RGMII_10; + break; + } + iowrite32be(tmp, ®s->if_mode); + + /* TODO: EEE? */ + + if (speed == SPEED_10000) { + if (memac->fm_rev_info.major == 6 && + memac->fm_rev_info.minor == 4) + tmp = TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G; + else + tmp = TX_FIFO_SECTIONS_TX_AVAIL_10G; + tmp |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G; + } else { + tmp = TX_FIFO_SECTIONS_TX_AVAIL_1G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G; + } + iowrite32be(tmp, ®s->tx_fifo_sections); + + mac_dev->update_speed(mac_dev, speed); + + tmp = ioread32be(®s->command_config); + tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); +} + +static void memac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + /* TODO: graceful */ + tmp = ioread32be(®s->command_config); + tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); + iowrite32be(tmp, ®s->command_config); +} + +static const struct phylink_mac_ops memac_mac_ops = { + .validate = memac_validate, + .mac_select_pcs = memac_select_pcs, + .mac_prepare = memac_prepare, + .mac_config = memac_mac_config, + .mac_link_up = memac_link_up, + .mac_link_down = memac_link_down, +}; + static int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr) { - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0); return 0; @@ -867,9 +800,6 @@ static int memac_add_hash_mac_address(struct fman_mac *memac, u32 hash; u64 addr; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); if (!(addr & GROUP_ADDRESS)) { @@ -898,9 +828,6 @@ static int memac_set_allmulti(struct fman_mac *memac, bool enable) u32 entry; struct memac_regs __iomem *regs = memac->regs; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - if (enable) { for (entry = 0; entry < HASH_TABLE_SIZE; entry++) iowrite32be(entry | HASH_CTRL_MCAST_EN, @@ -930,9 +857,6 @@ static int memac_del_hash_mac_address(struct fman_mac *memac, u32 hash; u64 addr; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK; @@ -960,9 +884,6 @@ static int memac_set_exception(struct fman_mac *memac, { u32 bit_mask = 0; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - bit_mask = get_exception_flag(exception); if (bit_mask) { if (enable) @@ -981,25 +902,16 @@ static int memac_set_exception(struct fman_mac *memac, static int memac_init(struct fman_mac *memac) { struct memac_cfg *memac_drv_param; - u8 i; enet_addr_t eth_addr; - bool slow_10g_if = false; - struct fixed_phy_status *fixed_link = NULL; int err; u32 reg32 = 0; - if (is_init_done(memac->memac_drv_param)) - return -EINVAL; - err = check_init_parameters(memac); if (err) return err; memac_drv_param = memac->memac_drv_param; - if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4) - slow_10g_if = true; - /* First, reset the MAC if desired. */ if (memac_drv_param->reset_on_init) { err = reset(memac->regs); @@ -1015,10 +927,7 @@ static int memac_init(struct fman_mac *memac) add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0); } - fixed_link = memac_drv_param->fixed_link; - - init(memac->regs, memac->memac_drv_param, memac->phy_if, - memac->max_speed, slow_10g_if, memac->exceptions); + init(memac->regs, memac->memac_drv_param, memac->exceptions); /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround * Exists only in FMan 6.0 and 6.3. @@ -1034,33 +943,6 @@ static int memac_init(struct fman_mac *memac) iowrite32be(reg32, &memac->regs->command_config); } - if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { - /* Configure internal SGMII PHY */ - if (memac->basex_if) - setup_sgmii_internal_phy_base_x(memac); - else - setup_sgmii_internal_phy(memac, fixed_link); - } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { - /* Configure 4 internal SGMII PHYs */ - for (i = 0; i < 4; i++) { - u8 qsmgii_phy_addr, phy_addr; - /* QSGMII PHY address occupies 3 upper bits of 5-bit - * phy_address; the lower 2 bits are used to extend - * register address space and access each one of 4 - * ports inside QSGMII. - */ - phy_addr = memac->pcsphy->mdio.addr; - qsmgii_phy_addr = (u8)((phy_addr << 2) | i); - memac->pcsphy->mdio.addr = qsmgii_phy_addr; - if (memac->basex_if) - setup_sgmii_internal_phy_base_x(memac); - else - setup_sgmii_internal_phy(memac, fixed_link); - - memac->pcsphy->mdio.addr = phy_addr; - } - } - /* Max Frame Length */ err = fman_set_mac_max_frame(memac->fm, memac->mac_id, memac_drv_param->max_frame_length); @@ -1089,19 +971,28 @@ static int memac_init(struct fman_mac *memac) fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, FMAN_INTR_TYPE_NORMAL, memac_exception, memac); - kfree(memac_drv_param); - memac->memac_drv_param = NULL; - return 0; } +static void pcs_put(struct phylink_pcs *pcs) +{ + struct mdio_device *mdiodev; + + if (IS_ERR_OR_NULL(pcs)) + return; + + mdiodev = lynx_get_mdio_device(pcs); + lynx_pcs_destroy(pcs); + mdio_device_free(mdiodev); +} + static int memac_free(struct fman_mac *memac) { free_init_resources(memac); - if (memac->pcsphy) - put_device(&memac->pcsphy->mdio.dev); - + pcs_put(memac->sgmii_pcs); + pcs_put(memac->qsgmii_pcs); + pcs_put(memac->xfi_pcs); kfree(memac->memac_drv_param); kfree(memac); @@ -1134,8 +1025,6 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev, memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr); memac->regs = mac_dev->vaddr; - memac->max_speed = params->max_speed; - memac->phy_if = mac_dev->phy_if; memac->mac_id = params->mac_id; memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI); @@ -1143,7 +1032,6 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev, memac->event_cb = params->event_cb; memac->dev_id = mac_dev; memac->fm = params->fm; - memac->basex_if = params->basex_if; /* Save FMan revision */ fman_get_revision(memac->fm, &memac->fm_rev_info); @@ -1151,101 +1039,221 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev, return memac; } +static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node, + int index) +{ + struct device_node *node; + struct mdio_device *mdiodev = NULL; + struct phylink_pcs *pcs; + + node = of_parse_phandle(mac_node, "pcsphy-handle", index); + if (node && of_device_is_available(node)) + mdiodev = of_mdio_find_device(node); + of_node_put(node); + + if (!mdiodev) + return ERR_PTR(-EPROBE_DEFER); + + pcs = lynx_pcs_create(mdiodev); + return pcs; +} + +static bool memac_supports(struct mac_device *mac_dev, phy_interface_t iface) +{ + /* If there's no serdes device, assume that it's been configured for + * whatever the default interface mode is. + */ + if (!mac_dev->fman_mac->serdes) + return mac_dev->phy_if == iface; + /* Otherwise, ask the serdes */ + return !phy_validate(mac_dev->fman_mac->serdes, PHY_MODE_ETHERNET, + iface, NULL); +} + int memac_initialization(struct mac_device *mac_dev, struct device_node *mac_node, struct fman_mac_params *params) { int err; - struct device_node *phy_node; - struct fixed_phy_status *fixed_link; + struct device_node *fixed; + struct phylink_pcs *pcs; struct fman_mac *memac; + unsigned long capabilities; + unsigned long *supported; + mac_dev->phylink_ops = &memac_mac_ops; mac_dev->set_promisc = memac_set_promiscuous; mac_dev->change_addr = memac_modify_mac_address; mac_dev->add_hash_mac_addr = memac_add_hash_mac_address; mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address; - mac_dev->set_tx_pause = memac_set_tx_pause_frames; - mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; mac_dev->set_tstamp = memac_set_tstamp; mac_dev->set_multi = fman_set_multi; - mac_dev->adjust_link = adjust_link_memac; mac_dev->enable = memac_enable; mac_dev->disable = memac_disable; - if (params->max_speed == SPEED_10000) - mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII; - mac_dev->fman_mac = memac_config(mac_dev, params); - if (!mac_dev->fman_mac) { - err = -EINVAL; - goto _return; - } + if (!mac_dev->fman_mac) + return -EINVAL; memac = mac_dev->fman_mac; memac->memac_drv_param->max_frame_length = fman_get_max_frm(); memac->memac_drv_param->reset_on_init = true; - if (memac->phy_if == PHY_INTERFACE_MODE_SGMII || - memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { - phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0); - if (!phy_node) { - pr_err("PCS PHY node is not available\n"); - err = -EINVAL; + + err = of_property_match_string(mac_node, "pcs-handle-names", "xfi"); + if (err >= 0) { + memac->xfi_pcs = memac_pcs_create(mac_node, err); + if (IS_ERR(memac->xfi_pcs)) { + err = PTR_ERR(memac->xfi_pcs); + dev_err_probe(mac_dev->dev, err, "missing xfi pcs\n"); goto _return_fm_mac_free; } + } else if (err != -EINVAL && err != -ENODATA) { + goto _return_fm_mac_free; + } - memac->pcsphy = of_phy_find_device(phy_node); - if (!memac->pcsphy) { - pr_err("of_phy_find_device (PCS PHY) failed\n"); - err = -EINVAL; + err = of_property_match_string(mac_node, "pcs-handle-names", "qsgmii"); + if (err >= 0) { + memac->qsgmii_pcs = memac_pcs_create(mac_node, err); + if (IS_ERR(memac->qsgmii_pcs)) { + err = PTR_ERR(memac->qsgmii_pcs); + dev_err_probe(mac_dev->dev, err, + "missing qsgmii pcs\n"); goto _return_fm_mac_free; } + } else if (err != -EINVAL && err != -ENODATA) { + goto _return_fm_mac_free; } - if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) { - struct phy_device *phy; + /* For compatibility, if pcs-handle-names is missing, we assume this + * phy is the first one in pcsphy-handle + */ + err = of_property_match_string(mac_node, "pcs-handle-names", "sgmii"); + if (err == -EINVAL || err == -ENODATA) + pcs = memac_pcs_create(mac_node, 0); + else if (err < 0) + goto _return_fm_mac_free; + else + pcs = memac_pcs_create(mac_node, err); - err = of_phy_register_fixed_link(mac_node); - if (err) - goto _return_fm_mac_free; + if (IS_ERR(pcs)) { + err = PTR_ERR(pcs); + dev_err_probe(mac_dev->dev, err, "missing pcs\n"); + goto _return_fm_mac_free; + } - fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL); - if (!fixed_link) { - err = -ENOMEM; - goto _return_fm_mac_free; - } + /* If err is set here, it means that pcs-handle-names was missing above + * (and therefore that xfi_pcs cannot be set). If we are defaulting to + * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII. + */ + if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + memac->xfi_pcs = pcs; + else + memac->sgmii_pcs = pcs; + + memac->serdes = devm_of_phy_get(mac_dev->dev, mac_node, "serdes"); + err = PTR_ERR(memac->serdes); + if (err == -ENODEV || err == -ENOSYS) { + dev_dbg(mac_dev->dev, "could not get (optional) serdes\n"); + memac->serdes = NULL; + } else if (IS_ERR(memac->serdes)) { + dev_err_probe(mac_dev->dev, err, "could not get serdes\n"); + goto _return_fm_mac_free; + } - mac_dev->phy_node = of_node_get(mac_node); - phy = of_phy_find_device(mac_dev->phy_node); - if (!phy) { - err = -EINVAL; - of_node_put(mac_dev->phy_node); - goto _return_fixed_link_free; - } + /* The internal connection to the serdes is XGMII, but this isn't + * really correct for the phy mode (which is the external connection). + * However, this is how all older device trees say that they want + * 10GBASE-R (aka XFI), so just convert it for them. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER; + + /* TODO: The following interface modes are supported by (some) hardware + * but not by this driver: + * - 1000BASE-KX + * - 10GBASE-KR + * - XAUI/HiGig + */ + supported = mac_dev->phylink_config.supported_interfaces; - fixed_link->link = phy->link; - fixed_link->speed = phy->speed; - fixed_link->duplex = phy->duplex; - fixed_link->pause = phy->pause; - fixed_link->asym_pause = phy->asym_pause; + /* Note that half duplex is only supported on 10/100M interfaces. */ - put_device(&phy->mdio.dev); - memac->memac_drv_param->fixed_link = fixed_link; + if (memac->sgmii_pcs && + (memac_supports(mac_dev, PHY_INTERFACE_MODE_SGMII) || + memac_supports(mac_dev, PHY_INTERFACE_MODE_1000BASEX))) { + __set_bit(PHY_INTERFACE_MODE_SGMII, supported); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported); } + if (memac->sgmii_pcs && + memac_supports(mac_dev, PHY_INTERFACE_MODE_2500BASEX)) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported); + + if (memac->qsgmii_pcs && + memac_supports(mac_dev, PHY_INTERFACE_MODE_QSGMII)) + __set_bit(PHY_INTERFACE_MODE_QSGMII, supported); + else if (mac_dev->phy_if == PHY_INTERFACE_MODE_QSGMII) + dev_warn(mac_dev->dev, "no QSGMII pcs specified\n"); + + if (memac->xfi_pcs && + memac_supports(mac_dev, PHY_INTERFACE_MODE_10GBASER)) { + __set_bit(PHY_INTERFACE_MODE_10GBASER, supported); + } else { + /* From what I can tell, no 10g macs support RGMII. */ + phy_interface_set_rgmii(supported); + __set_bit(PHY_INTERFACE_MODE_MII, supported); + } + + capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100; + capabilities |= MAC_1000FD | MAC_2500FD | MAC_10000FD; + + /* These SoCs don't support half duplex at all; there's no different + * FMan version or compatible, so we just have to check the machine + * compatible instead + */ + if (of_machine_is_compatible("fsl,ls1043a") || + of_machine_is_compatible("fsl,ls1046a") || + of_machine_is_compatible("fsl,B4QDS")) + capabilities &= ~(MAC_10HD | MAC_100HD); + + mac_dev->phylink_config.mac_capabilities = capabilities; + + /* The T2080 and T4240 don't support half duplex RGMII. There is no + * other way to identify these SoCs, so just use the machine + * compatible. + */ + if (of_machine_is_compatible("fsl,T2080QDS") || + of_machine_is_compatible("fsl,T2080RDB") || + of_machine_is_compatible("fsl,T2081QDS") || + of_machine_is_compatible("fsl,T4240QDS") || + of_machine_is_compatible("fsl,T4240RDB")) + memac->rgmii_no_half_duplex = true; + + /* Most boards should use MLO_AN_INBAND, but existing boards don't have + * a managed property. Default to MLO_AN_INBAND if nothing else is + * specified. We need to be careful and not enable this if we have a + * fixed link or if we are using MII or RGMII, since those + * configurations modes don't use in-band autonegotiation. + */ + fixed = of_get_child_by_name(mac_node, "fixed-link"); + if (!fixed && !of_property_read_bool(mac_node, "fixed-link") && + !of_property_read_bool(mac_node, "managed") && + mac_dev->phy_if != PHY_INTERFACE_MODE_MII && + !phy_interface_mode_is_rgmii(mac_dev->phy_if)) + mac_dev->phylink_config.ovr_an_inband = true; + of_node_put(fixed); + err = memac_init(mac_dev->fman_mac); if (err < 0) - goto _return_fixed_link_free; + goto _return_fm_mac_free; dev_info(mac_dev->dev, "FMan MEMAC\n"); - goto _return; + return 0; -_return_fixed_link_free: - kfree(fixed_link); _return_fm_mac_free: memac_free(mac_dev->fman_mac); -_return: return err; } diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 5a4be54ad459..c265b7f19a4d 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -13,6 +13,7 @@ #include <linux/bitrev.h> #include <linux/io.h> #include <linux/crc32.h> +#include <linux/netdevice.h> /* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */ #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff @@ -243,10 +244,6 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg, static int check_init_parameters(struct fman_mac *tgec) { - if (tgec->max_speed < SPEED_10000) { - pr_err("10G MAC driver only support 10G speed\n"); - return -EINVAL; - } if (!tgec->exception_cb) { pr_err("uninitialized exception_cb\n"); return -EINVAL; @@ -384,40 +381,13 @@ static void free_init_resources(struct fman_mac *tgec) tgec->unicast_addr_hash = NULL; } -static bool is_init_done(struct tgec_cfg *cfg) -{ - /* Checks if tGEC driver parameters were initialized */ - if (!cfg) - return true; - - return false; -} - static int tgec_enable(struct fman_mac *tgec) { - struct tgec_regs __iomem *regs = tgec->regs; - u32 tmp; - - if (!is_init_done(tgec->cfg)) - return -EINVAL; - - tmp = ioread32be(®s->command_config); - tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; - iowrite32be(tmp, ®s->command_config); - return 0; } static void tgec_disable(struct fman_mac *tgec) { - struct tgec_regs __iomem *regs = tgec->regs; - u32 tmp; - - WARN_ON_ONCE(!is_init_done(tgec->cfg)); - - tmp = ioread32be(®s->command_config); - tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); - iowrite32be(tmp, ®s->command_config); } static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val) @@ -425,9 +395,6 @@ static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val) struct tgec_regs __iomem *regs = tgec->regs; u32 tmp; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (new_val) tmp |= CMD_CFG_PROMIS_EN; @@ -444,9 +411,6 @@ static int tgec_set_tx_pause_frames(struct fman_mac *tgec, { struct tgec_regs __iomem *regs = tgec->regs; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - iowrite32be((u32)pause_time, ®s->pause_quant); return 0; @@ -457,9 +421,6 @@ static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) struct tgec_regs __iomem *regs = tgec->regs; u32 tmp; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (!en) tmp |= CMD_CFG_PAUSE_IGNORE; @@ -470,12 +431,53 @@ static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) return 0; } +static void tgec_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void tgec_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct fman_mac *tgec = mac_dev->fman_mac; + struct tgec_regs __iomem *regs = tgec->regs; + u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE; + u32 tmp; + + tgec_set_tx_pause_frames(tgec, 0, pause_time, 0); + tgec_accept_rx_pause_frames(tgec, rx_pause); + mac_dev->update_speed(mac_dev, speed); + + tmp = ioread32be(®s->command_config); + tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); +} + +static void tgec_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct fman_mac *tgec = fman_config_to_mac(config)->fman_mac; + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + tmp = ioread32be(®s->command_config); + tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); + iowrite32be(tmp, ®s->command_config); +} + +static const struct phylink_mac_ops tgec_mac_ops = { + .validate = phylink_generic_validate, + .mac_config = tgec_mac_config, + .mac_link_up = tgec_link_up, + .mac_link_down = tgec_link_down, +}; + static int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr) { - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr); set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr)); @@ -490,9 +492,6 @@ static int tgec_add_hash_mac_address(struct fman_mac *tgec, u32 crc = 0xFFFFFFFF, hash; u64 addr; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); if (!(addr & GROUP_ADDRESS)) { @@ -525,9 +524,6 @@ static int tgec_set_allmulti(struct fman_mac *tgec, bool enable) u32 entry; struct tgec_regs __iomem *regs = tgec->regs; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - if (enable) { for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++) iowrite32be(entry | TGEC_HASH_MCAST_EN, @@ -548,9 +544,6 @@ static int tgec_set_tstamp(struct fman_mac *tgec, bool enable) struct tgec_regs __iomem *regs = tgec->regs; u32 tmp; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (enable) @@ -572,9 +565,6 @@ static int tgec_del_hash_mac_address(struct fman_mac *tgec, u32 crc = 0xFFFFFFFF, hash; u64 addr; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - addr = ((*(u64 *)eth_addr) >> 16); /* CRC calculation */ @@ -601,22 +591,12 @@ static int tgec_del_hash_mac_address(struct fman_mac *tgec, return 0; } -static void tgec_adjust_link(struct mac_device *mac_dev) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - - mac_dev->update_speed(mac_dev, phy_dev->speed); -} - static int tgec_set_exception(struct fman_mac *tgec, enum fman_mac_exceptions exception, bool enable) { struct tgec_regs __iomem *regs = tgec->regs; u32 bit_mask = 0; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - bit_mask = get_exception_flag(exception); if (bit_mask) { if (enable) @@ -641,9 +621,6 @@ static int tgec_init(struct fman_mac *tgec) enet_addr_t eth_addr; int err; - if (is_init_done(tgec->cfg)) - return -EINVAL; - if (DEFAULT_RESET_ON_INIT && (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) { pr_err("Can't reset MAC!\n"); @@ -753,7 +730,6 @@ static struct fman_mac *tgec_config(struct mac_device *mac_dev, tgec->regs = mac_dev->vaddr; tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr); - tgec->max_speed = params->max_speed; tgec->mac_id = params->mac_id; tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT | TGEC_IMASK_REM_FAULT | @@ -788,17 +764,15 @@ int tgec_initialization(struct mac_device *mac_dev, int err; struct fman_mac *tgec; + mac_dev->phylink_ops = &tgec_mac_ops; mac_dev->set_promisc = tgec_set_promiscuous; mac_dev->change_addr = tgec_modify_mac_address; mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address; mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address; - mac_dev->set_tx_pause = tgec_set_tx_pause_frames; - mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; mac_dev->set_tstamp = tgec_set_tstamp; mac_dev->set_multi = fman_set_multi; - mac_dev->adjust_link = tgec_adjust_link; mac_dev->enable = tgec_enable; mac_dev->disable = tgec_disable; @@ -808,6 +782,19 @@ int tgec_initialization(struct mac_device *mac_dev, goto _return; } + /* The internal connection to the serdes is XGMII, but this isn't + * really correct for the phy mode (which is the external connection). + * However, this is how all older device trees say that they want + * XAUI, so just convert it for them. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->phy_if = PHY_INTERFACE_MODE_XAUI; + + __set_bit(PHY_INTERFACE_MODE_XAUI, + mac_dev->phylink_config.supported_interfaces); + mac_dev->phylink_config.mac_capabilities = + MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10000FD; + tgec = mac_dev->fman_mac; tgec->cfg->max_frame_length = fman_get_max_frm(); err = tgec_init(tgec); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 65df308bad97..c6496a498726 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -15,6 +15,7 @@ #include <linux/phy.h> #include <linux/netdevice.h> #include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/etherdevice.h> #include <linux/libfdt_env.h> @@ -93,130 +94,8 @@ int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev) return 0; } -/** - * fman_set_mac_active_pause - * @mac_dev: A pointer to the MAC device - * @rx: Pause frame setting for RX - * @tx: Pause frame setting for TX - * - * Set the MAC RX/TX PAUSE frames settings - * - * Avoid redundant calls to FMD, if the MAC driver already contains the desired - * active PAUSE settings. Otherwise, the new active settings should be reflected - * in FMan. - * - * Return: 0 on success; Error code otherwise. - */ -int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx) -{ - struct fman_mac *fman_mac = mac_dev->fman_mac; - int err = 0; - - if (rx != mac_dev->rx_pause_active) { - err = mac_dev->set_rx_pause(fman_mac, rx); - if (likely(err == 0)) - mac_dev->rx_pause_active = rx; - } - - if (tx != mac_dev->tx_pause_active) { - u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE : - FSL_FM_PAUSE_TIME_DISABLE); - - err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0); - - if (likely(err == 0)) - mac_dev->tx_pause_active = tx; - } - - return err; -} -EXPORT_SYMBOL(fman_set_mac_active_pause); - -/** - * fman_get_pause_cfg - * @mac_dev: A pointer to the MAC device - * @rx_pause: Return value for RX setting - * @tx_pause: Return value for TX setting - * - * Determine the MAC RX/TX PAUSE frames settings based on PHY - * autonegotiation or values set by eththool. - * - * Return: Pointer to FMan device. - */ -void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, - bool *tx_pause) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - u16 lcl_adv, rmt_adv; - u8 flowctrl; - - *rx_pause = *tx_pause = false; - - if (!phy_dev->duplex) - return; - - /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings - * are those set by ethtool. - */ - if (!mac_dev->autoneg_pause) { - *rx_pause = mac_dev->rx_pause_req; - *tx_pause = mac_dev->tx_pause_req; - return; - } - - /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE - * settings depend on the result of the link negotiation. - */ - - /* get local capabilities */ - lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising); - - /* get link partner capabilities */ - rmt_adv = 0; - if (phy_dev->pause) - rmt_adv |= LPA_PAUSE_CAP; - if (phy_dev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; - - /* Calculate TX/RX settings based on local and peer advertised - * symmetric/asymmetric PAUSE capabilities. - */ - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); - if (flowctrl & FLOW_CTRL_RX) - *rx_pause = true; - if (flowctrl & FLOW_CTRL_TX) - *tx_pause = true; -} -EXPORT_SYMBOL(fman_get_pause_cfg); - -#define DTSEC_SUPPORTED \ - (SUPPORTED_10baseT_Half \ - | SUPPORTED_10baseT_Full \ - | SUPPORTED_100baseT_Half \ - | SUPPORTED_100baseT_Full \ - | SUPPORTED_Autoneg \ - | SUPPORTED_Pause \ - | SUPPORTED_Asym_Pause \ - | SUPPORTED_FIBRE \ - | SUPPORTED_MII) - static DEFINE_MUTEX(eth_lock); -static const u16 phy2speed[] = { - [PHY_INTERFACE_MODE_MII] = SPEED_100, - [PHY_INTERFACE_MODE_GMII] = SPEED_1000, - [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, - [PHY_INTERFACE_MODE_TBI] = SPEED_1000, - [PHY_INTERFACE_MODE_RMII] = SPEED_100, - [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, - [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, - [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, - [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, - [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, - [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000, - [PHY_INTERFACE_MODE_XGMII] = SPEED_10000 -}; - static struct platform_device *dpaa_eth_add_device(int fman_id, struct mac_device *mac_dev) { @@ -263,8 +142,8 @@ no_mem: } static const struct of_device_id mac_match[] = { - { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization }, - { .compatible = "fsl,fman-xgec", .data = tgec_initialization }, + { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization }, + { .compatible = "fsl,fman-xgec", .data = tgec_initialization }, { .compatible = "fsl,fman-memac", .data = memac_initialization }, {} }; @@ -295,6 +174,7 @@ static int mac_probe(struct platform_device *_of_dev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + platform_set_drvdata(_of_dev, mac_dev); /* Save private information */ mac_dev->priv = priv; @@ -424,57 +304,21 @@ static int mac_probe(struct platform_device *_of_dev) } mac_dev->phy_if = phy_if; - priv->speed = phy2speed[mac_dev->phy_if]; - params.max_speed = priv->speed; - mac_dev->if_support = DTSEC_SUPPORTED; - /* We don't support half-duplex in SGMII mode */ - if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) - mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | - SUPPORTED_100baseT_Half); - - /* Gigabit support (no half-duplex) */ - if (params.max_speed == 1000) - mac_dev->if_support |= SUPPORTED_1000baseT_Full; - - /* The 10G interface only supports one mode */ - if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) - mac_dev->if_support = SUPPORTED_10000baseT_Full; - - /* Get the rest of the PHY information */ - mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); - - params.basex_if = false; params.mac_id = priv->cell_index; params.fm = (void *)priv->fman; params.exception_cb = mac_exception; params.event_cb = mac_exception; err = init(mac_dev, mac_node, ¶ms); - if (err < 0) { - dev_err(dev, "mac_dev->init() = %d\n", err); - of_node_put(mac_dev->phy_node); - return err; - } - - /* pause frame autonegotiation enabled */ - mac_dev->autoneg_pause = true; - - /* By intializing the values to false, force FMD to enable PAUSE frames - * on RX and TX - */ - mac_dev->rx_pause_req = true; - mac_dev->tx_pause_req = true; - mac_dev->rx_pause_active = false; - mac_dev->tx_pause_active = false; - err = fman_set_mac_active_pause(mac_dev, true, true); if (err < 0) - dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); + return err; if (!is_zero_ether_addr(mac_dev->addr)) dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); if (IS_ERR(priv->eth_dev)) { + err = PTR_ERR(priv->eth_dev); dev_err(dev, "failed to add Ethernet platform device for MAC %d\n", priv->cell_index); priv->eth_dev = NULL; diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 13b69ca5f00c..ad06f8d7924b 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/if_ether.h> #include <linux/phy.h> +#include <linux/phylink.h> #include <linux/list.h> #include "fman_port.h" @@ -24,32 +25,22 @@ struct mac_device { struct resource *res; u8 addr[ETH_ALEN]; struct fman_port *port[2]; - u32 if_support; - struct phy_device *phy_dev; + struct phylink *phylink; + struct phylink_config phylink_config; phy_interface_t phy_if; - struct device_node *phy_node; - struct net_device *net_dev; - bool autoneg_pause; - bool rx_pause_req; - bool tx_pause_req; - bool rx_pause_active; - bool tx_pause_active; bool promisc; bool allmulti; + const struct phylink_mac_ops *phylink_ops; int (*enable)(struct fman_mac *mac_dev); void (*disable)(struct fman_mac *mac_dev); - void (*adjust_link)(struct mac_device *mac_dev); int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, struct mac_device *mac_dev); - int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); - int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority, - u16 pause_time, u16 thresh_time); int (*set_exception)(struct fman_mac *mac_dev, enum fman_mac_exceptions exception, bool enable); int (*add_hash_mac_addr)(struct fman_mac *mac_dev, @@ -63,6 +54,12 @@ struct mac_device { struct mac_priv_s *priv; }; +static inline struct mac_device +*fman_config_to_mac(struct phylink_config *config) +{ + return container_of(config, struct mac_device, phylink_config); +} + struct dpaa_eth_data { struct mac_device *mac_dev; int mac_hw_id; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 3b14dc93f59d..7d79006250ae 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -690,8 +690,7 @@ static int ibmveth_close(struct net_device *netdev) napi_disable(&adapter->napi); - if (!adapter->pool_config) - netif_tx_stop_all_queues(netdev); + netif_tx_stop_all_queues(netdev); h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); @@ -799,9 +798,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) if (netif_running(dev)) { restart = 1; - adapter->pool_config = 1; ibmveth_close(dev); - adapter->pool_config = 0; } set_attr = 0; @@ -883,9 +880,7 @@ static int ibmveth_set_tso(struct net_device *dev, u32 data) if (netif_running(dev)) { restart = 1; - adapter->pool_config = 1; ibmveth_close(dev); - adapter->pool_config = 0; } set_attr = 0; @@ -1535,9 +1530,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) only the buffer pools necessary to hold the new MTU */ if (netif_running(adapter->netdev)) { need_restart = 1; - adapter->pool_config = 1; ibmveth_close(adapter->netdev); - adapter->pool_config = 0; } /* Look for an active buffer pool that can hold the new MTU */ @@ -1701,7 +1694,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); - adapter->pool_config = 0; ibmveth_init_link_settings(netdev); netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16); @@ -1841,9 +1833,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, return -ENOMEM; } pool->active = 1; - adapter->pool_config = 1; ibmveth_close(netdev); - adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } else { @@ -1869,10 +1859,8 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, } if (netif_running(netdev)) { - adapter->pool_config = 1; ibmveth_close(netdev); pool->active = 0; - adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } @@ -1883,9 +1871,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, return -EINVAL; } else { if (netif_running(netdev)) { - adapter->pool_config = 1; ibmveth_close(netdev); - adapter->pool_config = 0; pool->size = value; if ((rc = ibmveth_open(netdev))) return rc; @@ -1898,9 +1884,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, return -EINVAL; } else { if (netif_running(netdev)) { - adapter->pool_config = 1; ibmveth_close(netdev); - adapter->pool_config = 0; pool->buff_size = value; if ((rc = ibmveth_open(netdev))) return rc; diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index daf6f615c03f..4f8357187292 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -146,7 +146,6 @@ struct ibmveth_adapter { dma_addr_t filter_list_dma; struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; struct ibmveth_rx_q rx_queue; - int pool_config; int rx_csum; int large_send; bool is_active_trunk; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0777bed5bb1a..b74f30ec629a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4620,6 +4620,7 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4); #define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8 BIT(19) /* reg_ptys_ext_eth_proto_cap * Extended Ethernet port supported speeds and protocols. diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index dcd79d7e2af4..472830d07ac1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1672,6 +1672,19 @@ mlxsw_sp2_mask_ethtool_400gaui_8[] = { #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_800gaui_8[] = { + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_800gaui_8) + #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) @@ -1820,6 +1833,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .speed = SPEED_400000, .width = 8, }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8, + .mask_ethtool = mlxsw_sp2_mask_ethtool_800gaui_8, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN, + .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_800000, + .width = 8, + }, }; #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index ed7a35c3ceac..24c994baad13 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -57,5 +57,6 @@ config LAN743X source "drivers/net/ethernet/microchip/lan966x/Kconfig" source "drivers/net/ethernet/microchip/sparx5/Kconfig" +source "drivers/net/ethernet/microchip/vcap/Kconfig" endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index 9faa41436198..bbd349264e6f 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -11,3 +11,4 @@ lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o obj-$(CONFIG_LAN966X_SWITCH) += lan966x/ obj-$(CONFIG_SPARX5_SWITCH) += sparx5/ +obj-$(CONFIG_VCAP) += vcap/ diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index cc5e48e1bb4c..98e27530a91f 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -9,5 +9,6 @@ config SPARX5_SWITCH select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER + select VCAP help This driver supports the Sparx5 network switch device. diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index d1c6ad966747..ee2c42f66742 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -5,7 +5,11 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o -sparx5-switch-objs := sparx5_main.o sparx5_packet.o \ +sparx5-switch-y := sparx5_main.o sparx5_packet.o \ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \ - sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o + sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \ + sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o + +# Provide include files +ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 62a325e96345..0b70c00c6eaa 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -672,6 +672,14 @@ static int sparx5_start(struct sparx5 *sparx5) sparx5_board_init(sparx5); err = sparx5_register_notifier_blocks(sparx5); + if (err) + return err; + + err = sparx5_vcap_init(sparx5); + if (err) { + sparx5_unregister_notifier_blocks(sparx5); + return err; + } /* Start Frame DMA with fallback to register based INJ/XTR */ err = -ENXIO; @@ -906,6 +914,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev) sparx5_ptp_deinit(sparx5); sparx5_fdma_stop(sparx5); sparx5_cleanup_ports(sparx5); + sparx5_vcap_destroy(sparx5); /* Unregister netdevs */ sparx5_unregister_notifier_blocks(sparx5); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 7a83222caa73..2ab22a7b799e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -288,6 +288,8 @@ struct sparx5 { struct mutex ptp_lock; /* lock for ptp interface state */ u16 ptp_skbs; int ptp_irq; + /* VCAP */ + struct vcap_control *vcap_ctrl; /* PGID allocation map */ u8 pgid_map[PGID_TABLE_SIZE]; }; @@ -382,6 +384,10 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port, struct sk_buff *skb); irqreturn_t sparx5_ptp_irq_handler(int irq, void *args); +/* sparx5_vcap_impl.c */ +int sparx5_vcap_init(struct sparx5 *sparx5); +void sparx5_vcap_destroy(struct sparx5 *sparx5); + /* sparx5_pgid.c */ enum sparx5_pgid_type { SPX5_PGID_FREE, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h index fa2eb70f487a..c42195f4ec4d 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -4,8 +4,8 @@ * Copyright (c) 2021 Microchip Technology Inc. */ -/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100. - * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty) +/* This file is autogenerated by cml-utils 2022-09-12 14:22:42 +0200. + * Commit ID: 06aecbca4eab6e85d87f665fe6b6348c48146245 */ #ifndef _SPARX5_MAIN_REGS_H_ @@ -171,6 +171,162 @@ enum sparx5_target { /* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */ #define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4) +/* ANA_ACL:COMMON:VCAP_S2_CFG */ +#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 0, r, 70, 4) + +#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28) +#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA GENMASK(27, 26) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA GENMASK(25, 24) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA GENMASK(23, 22) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA GENMASK(21, 20) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA GENMASK(19, 18) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA GENMASK(17, 16) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA GENMASK(15, 14) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA GENMASK(13, 12) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA GENMASK(11, 10) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA GENMASK(9, 8) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA GENMASK(7, 6) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA GENMASK(5, 4) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA, x) + +#define ANA_ACL_VCAP_S2_CFG_SEC_ENA GENMASK(3, 0) +#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x) +#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x) + +/* ANA_ACL:COMMON:SWAP_IP_CTRL */ +#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4) + +#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18) +#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\ + FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL, x) +#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_GET(x)\ + FIELD_GET(ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL, x) + +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL GENMASK(17, 10) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL_SET(x)\ + FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL, x) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL_GET(x)\ + FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL, x) + +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL GENMASK(9, 2) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL_SET(x)\ + FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL, x) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL_GET(x)\ + FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL, x) + +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA BIT(1) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA, x) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA_GET(x)\ + FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA, x) + +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA BIT(0) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x) +#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_GET(x)\ + FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x) + +/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */ +#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 424, r, 4, 4) + +#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6) +#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK, x) +#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK, x) + +#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK GENMASK(5, 0) +#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x) +#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x) + +/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */ +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4) + +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5) +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN, x) +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN, x) + +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS BIT(4) +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS, x) +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS, x) + +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES GENMASK(3, 0) +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x) +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x) + /* ANA_ACL:COMMON:OWN_UPSID */ #define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4) @@ -180,6 +336,174 @@ enum sparx5_target { #define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\ FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x) +/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */ +#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL, 0, 1, 34200, g, 134, 16, 0, r, 4, 4) + +#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13) +#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA, x) + +#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL BIT(12) +#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL, x) + +#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL GENMASK(11, 10) +#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL, x) + +#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL GENMASK(9, 8) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL, x) + +#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL GENMASK(7, 6) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL, x) + +#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL GENMASK(5, 3) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL, x) + +#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL GENMASK(2, 1) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL, x) + +#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL BIT(0) +#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(x)\ + FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x) +#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(x)\ + FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x) + +/* ANA_ACL:CNT_A:CNT_A */ +#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL, 0, 1, 0, g, 4096, 4, 0, 0, 1, 4) + +/* ANA_ACL:CNT_B:CNT_B */ +#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL, 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4) + +/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */ +#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL, 0, 1, 36408, 0, 1, 16, 0, r, 4, 4) + +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY BIT(16) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY BIT(15) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY BIT(14) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY BIT(13) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY BIT(12) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY BIT(11) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY BIT(10) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY BIT(9) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY BIT(8) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY BIT(6) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY BIT(5) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY BIT(4) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY BIT(3) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY BIT(2) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY BIT(1) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY, x) + +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY BIT(0) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_SET(x)\ + FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x) +#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\ + FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x) + /* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */ #define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4) @@ -5039,6 +5363,138 @@ enum sparx5_target { #define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x) +/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */ +#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4) + +#define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22) +#define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_CMD, x) +#define VCAP_SUPER_CTRL_UPDATE_CMD_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_UPDATE_CMD, x) + +#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS BIT(21) +#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS, x) +#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS, x) + +#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS BIT(20) +#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ACTION_DIS, x) +#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ACTION_DIS, x) + +#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS BIT(19) +#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_CNT_DIS, x) +#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_UPDATE_CNT_DIS, x) + +#define VCAP_SUPER_CTRL_UPDATE_ADDR GENMASK(18, 3) +#define VCAP_SUPER_CTRL_UPDATE_ADDR_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ADDR, x) +#define VCAP_SUPER_CTRL_UPDATE_ADDR_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ADDR, x) + +#define VCAP_SUPER_CTRL_UPDATE_SHOT BIT(2) +#define VCAP_SUPER_CTRL_UPDATE_SHOT_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_SHOT, x) +#define VCAP_SUPER_CTRL_UPDATE_SHOT_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_UPDATE_SHOT, x) + +#define VCAP_SUPER_CTRL_CLEAR_CACHE BIT(1) +#define VCAP_SUPER_CTRL_CLEAR_CACHE_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_CLEAR_CACHE, x) +#define VCAP_SUPER_CTRL_CLEAR_CACHE_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_CLEAR_CACHE, x) + +#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN BIT(0) +#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_SET(x)\ + FIELD_PREP(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x) +#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_GET(x)\ + FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x) + +/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */ +#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4) + +#define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16) +#define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\ + FIELD_PREP(VCAP_SUPER_CFG_MV_NUM_POS, x) +#define VCAP_SUPER_CFG_MV_NUM_POS_GET(x)\ + FIELD_GET(VCAP_SUPER_CFG_MV_NUM_POS, x) + +#define VCAP_SUPER_CFG_MV_SIZE GENMASK(15, 0) +#define VCAP_SUPER_CFG_MV_SIZE_SET(x)\ + FIELD_PREP(VCAP_SUPER_CFG_MV_SIZE, x) +#define VCAP_SUPER_CFG_MV_SIZE_GET(x)\ + FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x) + +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */ +#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 0, r, 64, 4) + +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */ +#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 256, r, 64, 4) + +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */ +#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 512, r, 64, 4) + +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */ +#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 768, r, 32, 4) + +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */ +#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */ +#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */ +#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4) + +#define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0) +#define VCAP_SUPER_IDX_CORE_IDX_SET(x)\ + FIELD_PREP(VCAP_SUPER_IDX_CORE_IDX, x) +#define VCAP_SUPER_IDX_CORE_IDX_GET(x)\ + FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x) + +/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */ +#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4) + +#define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0) +#define VCAP_SUPER_MAP_CORE_MAP_SET(x)\ + FIELD_PREP(VCAP_SUPER_MAP_CORE_MAP, x) +#define VCAP_SUPER_MAP_CORE_MAP_GET(x)\ + FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x) + +/* VCAP_SUPER:VCAP_CONST:VCAP_VER */ +#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */ +#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */ +#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */ +#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */ +#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */ +#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */ +#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */ +#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:CORE_CNT */ +#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4) + +/* VCAP_SUPER:VCAP_CONST:IF_CNT */ +#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4) + /* VCAP_SUPER:RAM_CTRL:RAM_INIT */ #define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c index e05429c751ee..9432251b8322 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c @@ -10,6 +10,50 @@ #include "sparx5_main.h" #include "sparx5_qos.h" +/* tc block handling */ +static LIST_HEAD(sparx5_block_cb_list); + +static int sparx5_tc_block_cb(enum tc_setup_type type, + void *type_data, + void *cb_priv, bool ingress) +{ + struct net_device *ndev = cb_priv; + + if (type == TC_SETUP_CLSFLOWER) + return sparx5_tc_flower(ndev, type_data, ingress); + return -EOPNOTSUPP; +} + +static int sparx5_tc_block_cb_ingress(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return sparx5_tc_block_cb(type, type_data, cb_priv, true); +} + +static int sparx5_tc_block_cb_egress(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return sparx5_tc_block_cb(type, type_data, cb_priv, false); +} + +static int sparx5_tc_setup_block(struct net_device *ndev, + struct flow_block_offload *fbo) +{ + flow_setup_cb_t *cb; + + if (fbo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + cb = sparx5_tc_block_cb_ingress; + else if (fbo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + cb = sparx5_tc_block_cb_egress; + else + return -EOPNOTSUPP; + + return flow_block_cb_setup_simple(fbo, &sparx5_block_cb_list, + cb, ndev, ndev, false); +} + static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer, u32 *idx) { @@ -111,6 +155,8 @@ int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { switch (type) { + case TC_SETUP_BLOCK: + return sparx5_tc_setup_block(ndev, type_data); case TC_SETUP_QDISC_MQPRIO: return sparx5_tc_setup_qdisc_mqprio(ndev, type_data); case TC_SETUP_QDISC_TBF: diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h index 5b55e11b77e1..2b07a93fc9b7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h @@ -7,9 +7,23 @@ #ifndef __SPARX5_TC_H__ #define __SPARX5_TC_H__ +#include <net/flow_offload.h> #include <linux/netdevice.h> +/* Controls how PORT_MASK is applied */ +enum SPX5_PORT_MASK_MODE { + SPX5_PMM_OR_DSTMASK, + SPX5_PMM_AND_VLANMASK, + SPX5_PMM_REPLACE_PGID, + SPX5_PMM_REPLACE_ALL, + SPX5_PMM_REDIR_PGID, + SPX5_PMM_OR_PGID_MASK, +}; + int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data); +int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco, + bool ingress); + #endif /* __SPARX5_TC_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c new file mode 100644 index 000000000000..626558a5c850 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip VCAP API + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#include <net/tcp.h> + +#include "sparx5_tc.h" +#include "vcap_api.h" +#include "vcap_api_client.h" +#include "sparx5_main.h" +#include "sparx5_vcap_impl.h" + +struct sparx5_tc_flower_parse_usage { + struct flow_cls_offload *fco; + struct flow_rule *frule; + struct vcap_rule *vrule; + unsigned int used_keys; +}; + +static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st) +{ + enum vcap_key_field smac_key = VCAP_KF_L2_SMAC; + enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC; + struct flow_match_eth_addrs match; + struct vcap_u48_key smac, dmac; + int err = 0; + + flow_rule_match_eth_addrs(st->frule, &match); + + if (!is_zero_ether_addr(match.mask->src)) { + vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN); + vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN); + err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac); + if (err) + goto out; + } + + if (!is_zero_ether_addr(match.mask->dst)) { + vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN); + vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN); + err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac); + if (err) + goto out; + } + + st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS); + + return err; + +out: + NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error"); + return err; +} + +static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = { + /* More dissector handlers will be added here later */ + [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage, +}; + +static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco, + struct vcap_admin *admin, + struct vcap_rule *vrule) +{ + struct sparx5_tc_flower_parse_usage state = { + .fco = fco, + .vrule = vrule, + }; + int idx, err = 0; + + state.frule = flow_cls_offload_flow_rule(fco); + for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) { + if (!flow_rule_match_key(state.frule, idx)) + continue; + if (!sparx5_tc_flower_usage_handlers[idx]) + continue; + err = sparx5_tc_flower_usage_handlers[idx](&state); + if (err) + return err; + } + return err; +} + +static int sparx5_tc_flower_replace(struct net_device *ndev, + struct flow_cls_offload *fco, + struct vcap_admin *admin) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct flow_action_entry *act; + struct vcap_control *vctrl; + struct flow_rule *frule; + struct vcap_rule *vrule; + int err, idx; + + frule = flow_cls_offload_flow_rule(fco); + if (!flow_action_has_entries(&frule->action)) { + NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions"); + return -EINVAL; + } + + if (!flow_action_basic_hw_stats_check(&frule->action, fco->common.extack)) + return -EOPNOTSUPP; + + vctrl = port->sparx5->vcap_ctrl; + vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC, + fco->common.prio, 0); + if (IS_ERR(vrule)) + return PTR_ERR(vrule); + + vrule->cookie = fco->cookie; + sparx5_tc_use_dissectors(fco, admin, vrule); + flow_action_for_each(idx, act, &frule->action) { + switch (act->id) { + case FLOW_ACTION_TRAP: + err = vcap_rule_add_action_bit(vrule, + VCAP_AF_CPU_COPY_ENA, + VCAP_BIT_1); + if (err) + goto out; + err = vcap_rule_add_action_u32(vrule, + VCAP_AF_CPU_QUEUE_NUM, 0); + if (err) + goto out; + err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, + SPX5_PMM_REPLACE_ALL); + if (err) + goto out; + /* For now the actionset is hardcoded */ + err = vcap_set_rule_set_actionset(vrule, + VCAP_AFS_BASE_TYPE); + if (err) + goto out; + break; + case FLOW_ACTION_ACCEPT: + /* For now the actionset is hardcoded */ + err = vcap_set_rule_set_actionset(vrule, + VCAP_AFS_BASE_TYPE); + if (err) + goto out; + break; + default: + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Unsupported TC action"); + err = -EOPNOTSUPP; + goto out; + } + } + /* For now the keyset is hardcoded */ + err = vcap_set_rule_set_keyset(vrule, VCAP_KFS_MAC_ETYPE); + if (err) { + NL_SET_ERR_MSG_MOD(fco->common.extack, + "No matching port keyset for filter protocol and keys"); + goto out; + } + err = vcap_val_rule(vrule, ETH_P_ALL); + if (err) { + vcap_set_tc_exterr(fco, vrule); + goto out; + } + err = vcap_add_rule(vrule); + if (err) + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Could not add the filter"); +out: + vcap_free_rule(vrule); + return err; +} + +static int sparx5_tc_flower_destroy(struct net_device *ndev, + struct flow_cls_offload *fco, + struct vcap_admin *admin) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct vcap_control *vctrl; + int err = -ENOENT, rule_id; + + vctrl = port->sparx5->vcap_ctrl; + while (true) { + rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie); + if (rule_id <= 0) + break; + err = vcap_del_rule(vctrl, ndev, rule_id); + if (err) { + pr_err("%s:%d: could not delete rule %d\n", + __func__, __LINE__, rule_id); + break; + } + } + return err; +} + +int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco, + bool ingress) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct vcap_control *vctrl; + struct vcap_admin *admin; + int err = -EINVAL; + + /* Get vcap instance from the chain id */ + vctrl = port->sparx5->vcap_ctrl; + admin = vcap_find_admin(vctrl, fco->common.chain_index); + if (!admin) { + NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain"); + return err; + } + + switch (fco->command) { + case FLOW_CLS_REPLACE: + return sparx5_tc_flower_replace(ndev, fco, admin); + case FLOW_CLS_DESTROY: + return sparx5_tc_flower_destroy(ndev, fco, admin); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c new file mode 100644 index 000000000000..1bd987c664e8 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c @@ -0,0 +1,1351 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API + */ + +/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200. + * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86 + */ + +#include <linux/types.h> +#include <linux/kernel.h> + +#include "vcap_api.h" +#include "sparx5_vcap_ag_api.h" + +/* keyfields */ +static const struct vcap_field is2_mac_etype_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 89, + .width = 1, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 90, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 138, + .width = 48, + }, + [VCAP_KF_ETYPE_LEN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 186, + .width = 1, + }, + [VCAP_KF_ETYPE] = { + .type = VCAP_FIELD_U32, + .offset = 187, + .width = 16, + }, + [VCAP_KF_L2_PAYLOAD_ETYPE] = { + .type = VCAP_FIELD_U64, + .offset = 203, + .width = 64, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 267, + .width = 16, + }, + [VCAP_KF_OAM_CCM_CNTS_EQ0] = { + .type = VCAP_FIELD_BIT, + .offset = 283, + .width = 1, + }, + [VCAP_KF_OAM_Y1731_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 284, + .width = 1, + }, +}; + +static const struct vcap_field is2_arp_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 86, + .width = 48, + }, + [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 134, + .width = 1, + }, + [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 135, + .width = 1, + }, + [VCAP_KF_ARP_LEN_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 136, + .width = 1, + }, + [VCAP_KF_ARP_TGT_MATCH_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 137, + .width = 1, + }, + [VCAP_KF_ARP_SENDER_MATCH_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 138, + .width = 1, + }, + [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 139, + .width = 1, + }, + [VCAP_KF_ARP_OPCODE] = { + .type = VCAP_FIELD_U32, + .offset = 140, + .width = 2, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 142, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 174, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 206, + .width = 1, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 207, + .width = 16, + }, +}; + +static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 89, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 93, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 96, + .width = 8, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 104, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 136, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 168, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 169, + .width = 1, + }, + [VCAP_KF_L4_DPORT] = { + .type = VCAP_FIELD_U32, + .offset = 170, + .width = 16, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 186, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 202, + .width = 16, + }, + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 218, + .width = 1, + }, + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 219, + .width = 1, + }, + [VCAP_KF_L4_FIN] = { + .type = VCAP_FIELD_BIT, + .offset = 220, + .width = 1, + }, + [VCAP_KF_L4_SYN] = { + .type = VCAP_FIELD_BIT, + .offset = 221, + .width = 1, + }, + [VCAP_KF_L4_RST] = { + .type = VCAP_FIELD_BIT, + .offset = 222, + .width = 1, + }, + [VCAP_KF_L4_PSH] = { + .type = VCAP_FIELD_BIT, + .offset = 223, + .width = 1, + }, + [VCAP_KF_L4_ACK] = { + .type = VCAP_FIELD_BIT, + .offset = 224, + .width = 1, + }, + [VCAP_KF_L4_URG] = { + .type = VCAP_FIELD_BIT, + .offset = 225, + .width = 1, + }, + [VCAP_KF_L4_PAYLOAD] = { + .type = VCAP_FIELD_U64, + .offset = 226, + .width = 64, + }, +}; + +static const struct vcap_field is2_ip4_other_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 89, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 93, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 96, + .width = 8, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 104, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 136, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 168, + .width = 1, + }, + [VCAP_KF_L3_IP_PROTO] = { + .type = VCAP_FIELD_U32, + .offset = 169, + .width = 8, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 177, + .width = 16, + }, + [VCAP_KF_L3_PAYLOAD] = { + .type = VCAP_FIELD_U112, + .offset = 193, + .width = 96, + }, +}; + +static const struct vcap_field is2_ip6_std_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 91, + .width = 128, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 219, + .width = 1, + }, + [VCAP_KF_L3_IP_PROTO] = { + .type = VCAP_FIELD_U32, + .offset = 220, + .width = 8, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 228, + .width = 16, + }, + [VCAP_KF_L3_PAYLOAD] = { + .type = VCAP_FIELD_U48, + .offset = 244, + .width = 40, + }, +}; + +static const struct vcap_field is2_ip_7tuple_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 11, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 12, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 18, + .width = 65, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 83, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 84, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 86, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 87, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 99, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 112, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 113, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 116, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 119, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 120, + .width = 1, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 121, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 169, + .width = 48, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 217, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 218, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 219, + .width = 8, + }, + [VCAP_KF_L3_IP6_DIP] = { + .type = VCAP_FIELD_U128, + .offset = 227, + .width = 128, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 355, + .width = 128, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 483, + .width = 1, + }, + [VCAP_KF_TCP_UDP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 484, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 485, + .width = 1, + }, + [VCAP_KF_L4_DPORT] = { + .type = VCAP_FIELD_U32, + .offset = 486, + .width = 16, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 502, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 518, + .width = 16, + }, + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 534, + .width = 1, + }, + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 535, + .width = 1, + }, + [VCAP_KF_L4_FIN] = { + .type = VCAP_FIELD_BIT, + .offset = 536, + .width = 1, + }, + [VCAP_KF_L4_SYN] = { + .type = VCAP_FIELD_BIT, + .offset = 537, + .width = 1, + }, + [VCAP_KF_L4_RST] = { + .type = VCAP_FIELD_BIT, + .offset = 538, + .width = 1, + }, + [VCAP_KF_L4_PSH] = { + .type = VCAP_FIELD_BIT, + .offset = 539, + .width = 1, + }, + [VCAP_KF_L4_ACK] = { + .type = VCAP_FIELD_BIT, + .offset = 540, + .width = 1, + }, + [VCAP_KF_L4_URG] = { + .type = VCAP_FIELD_BIT, + .offset = 541, + .width = 1, + }, + [VCAP_KF_L4_PAYLOAD] = { + .type = VCAP_FIELD_U64, + .offset = 542, + .width = 64, + }, +}; + +/* keyfield_set */ +static const struct vcap_set is2_keyfield_set[] = { + [VCAP_KFS_MAC_ETYPE] = { + .type_id = 0, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_ARP] = { + .type_id = 3, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP4_TCP_UDP] = { + .type_id = 4, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP4_OTHER] = { + .type_id = 5, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP6_STD] = { + .type_id = 6, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP_7TUPLE] = { + .type_id = 1, + .sw_per_item = 12, + .sw_cnt = 1, + }, +}; + +/* keyfield_set map */ +static const struct vcap_field *is2_keyfield_set_map[] = { + [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield, + [VCAP_KFS_ARP] = is2_arp_keyfield, + [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield, + [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield, + [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield, + [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield, +}; + +/* keyfield_set map sizes */ +static int is2_keyfield_set_map_size[] = { + [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield), + [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield), + [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield), + [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield), + [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield), + [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield), +}; + +/* actionfields */ +static const struct vcap_field is2_base_type_actionfield[] = { + [VCAP_AF_PIPELINE_FORCE_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_AF_PIPELINE_PT] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 5, + }, + [VCAP_AF_HIT_ME_ONCE] = { + .type = VCAP_FIELD_BIT, + .offset = 7, + .width = 1, + }, + [VCAP_AF_INTR_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 8, + .width = 1, + }, + [VCAP_AF_CPU_COPY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 9, + .width = 1, + }, + [VCAP_AF_CPU_QUEUE_NUM] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 3, + }, + [VCAP_AF_LRN_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_AF_RT_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_AF_POLICE_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 16, + .width = 1, + }, + [VCAP_AF_POLICE_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 6, + }, + [VCAP_AF_IGNORE_PIPELINE_CTRL] = { + .type = VCAP_FIELD_BIT, + .offset = 23, + .width = 1, + }, + [VCAP_AF_MASK_MODE] = { + .type = VCAP_FIELD_U32, + .offset = 27, + .width = 3, + }, + [VCAP_AF_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 30, + .width = 68, + }, + [VCAP_AF_MIRROR_PROBE] = { + .type = VCAP_FIELD_U32, + .offset = 111, + .width = 2, + }, + [VCAP_AF_MATCH_ID] = { + .type = VCAP_FIELD_U32, + .offset = 159, + .width = 16, + }, + [VCAP_AF_MATCH_ID_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 175, + .width = 16, + }, + [VCAP_AF_CNT_ID] = { + .type = VCAP_FIELD_U32, + .offset = 191, + .width = 12, + }, +}; + +/* actionfield_set */ +static const struct vcap_set is2_actionfield_set[] = { + [VCAP_AFS_BASE_TYPE] = { + .type_id = -1, + .sw_per_item = 3, + .sw_cnt = 4, + }, +}; + +/* actionfield_set map */ +static const struct vcap_field *is2_actionfield_set_map[] = { + [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield, +}; + +/* actionfield_set map size */ +static int is2_actionfield_set_map_size[] = { + [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield), +}; + +/* Type Groups */ +static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 3, + .value = 4, + }, + { + .offset = 156, + .width = 1, + .value = 0, + }, + { + .offset = 312, + .width = 2, + .value = 0, + }, + { + .offset = 468, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 156, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = { + [12] = is2_x12_keyfield_set_typegroups, + [6] = is2_x6_keyfield_set_typegroups, + [3] = is2_x3_keyfield_set_typegroups, + [1] = is2_x1_keyfield_set_typegroups, + [13] = NULL, +}; + +static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 110, + .width = 1, + .value = 0, + }, + { + .offset = 220, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = { + [3] = is2_x3_actionfield_set_typegroups, + [1] = is2_x1_actionfield_set_typegroups, + [13] = NULL, +}; + +/* Keyfieldset names */ +static const char * const vcap_keyfield_set_names[] = { + [VCAP_KFS_NO_VALUE] = "(None)", + [VCAP_KFS_ARP] = "VCAP_KFS_ARP", + [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER", + [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP", + [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD", + [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE", + [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE", +}; + +/* Actionfieldset names */ +static const char * const vcap_actionfield_set_names[] = { + [VCAP_AFS_NO_VALUE] = "(None)", + [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE", +}; + +/* Keyfield names */ +static const char * const vcap_keyfield_names[] = { + [VCAP_KF_NO_VALUE] = "(None)", + [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS", + [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS", + [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS", + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS", + [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS", + [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS", + [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE", + [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS", + [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS", + [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS", + [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS", + [VCAP_KF_ETYPE] = "ETYPE", + [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS", + [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK", + [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3", + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG", + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL", + [VCAP_KF_IP4_IS] = "IP4_IS", + [VCAP_KF_ISDX_CLS] = "ISDX_CLS", + [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS", + [VCAP_KF_L2_BC_IS] = "L2_BC_IS", + [VCAP_KF_L2_DMAC] = "L2_DMAC", + [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS", + [VCAP_KF_L2_MC_IS] = "L2_MC_IS", + [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE", + [VCAP_KF_L2_SMAC] = "L2_SMAC", + [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS", + [VCAP_KF_L3_DST_IS] = "L3_DST_IS", + [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE", + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN", + [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP", + [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP", + [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP", + [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP", + [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO", + [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS", + [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD", + [VCAP_KF_L3_RT_IS] = "L3_RT_IS", + [VCAP_KF_L3_TOS] = "L3_TOS", + [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0", + [VCAP_KF_L4_ACK] = "L4_ACK", + [VCAP_KF_L4_DPORT] = "L4_DPORT", + [VCAP_KF_L4_FIN] = "L4_FIN", + [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD", + [VCAP_KF_L4_PSH] = "L4_PSH", + [VCAP_KF_L4_RNG] = "L4_RNG", + [VCAP_KF_L4_RST] = "L4_RST", + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS", + [VCAP_KF_L4_SPORT] = "L4_SPORT", + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS", + [VCAP_KF_L4_SYN] = "L4_SYN", + [VCAP_KF_L4_URG] = "L4_URG", + [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS", + [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG", + [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0", + [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS", + [VCAP_KF_TCP_IS] = "TCP_IS", + [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS", + [VCAP_KF_TYPE] = "TYPE", +}; + +/* Actionfield names */ +static const char * const vcap_actionfield_names[] = { + [VCAP_AF_NO_VALUE] = "(None)", + [VCAP_AF_CNT_ID] = "CNT_ID", + [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA", + [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM", + [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE", + [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL", + [VCAP_AF_INTR_ENA] = "INTR_ENA", + [VCAP_AF_LRN_DIS] = "LRN_DIS", + [VCAP_AF_MASK_MODE] = "MASK_MODE", + [VCAP_AF_MATCH_ID] = "MATCH_ID", + [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK", + [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE", + [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA", + [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT", + [VCAP_AF_POLICE_ENA] = "POLICE_ENA", + [VCAP_AF_POLICE_IDX] = "POLICE_IDX", + [VCAP_AF_PORT_MASK] = "PORT_MASK", + [VCAP_AF_RT_DIS] = "RT_DIS", +}; + +/* VCAPs */ +const struct vcap_info sparx5_vcaps[] = { + [VCAP_TYPE_IS2] = { + .name = "is2", + .rows = 256, + .sw_count = 12, + .sw_width = 52, + .sticky_width = 1, + .act_width = 110, + .default_cnt = 73, + .require_cnt_dis = 0, + .version = 1, + .keyfield_set = is2_keyfield_set, + .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set), + .actionfield_set = is2_actionfield_set, + .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set), + .keyfield_set_map = is2_keyfield_set_map, + .keyfield_set_map_size = is2_keyfield_set_map_size, + .actionfield_set_map = is2_actionfield_set_map, + .actionfield_set_map_size = is2_actionfield_set_map_size, + .keyfield_set_typegroups = is2_keyfield_set_typegroups, + .actionfield_set_typegroups = is2_actionfield_set_typegroups, + }, +}; + +const struct vcap_statistics sparx5_vcap_stats = { + .name = "sparx5", + .count = 1, + .keyfield_set_names = vcap_keyfield_set_names, + .actionfield_set_names = vcap_actionfield_set_names, + .keyfield_names = vcap_keyfield_names, + .actionfield_names = vcap_actionfield_names, +}; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h new file mode 100644 index 000000000000..7d106f1276fe --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API + */ + +/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200. + * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86 + */ + +#ifndef __SPARX5_VCAP_AG_API_H__ +#define __SPARX5_VCAP_AG_API_H__ + +/* VCAPs */ +extern const struct vcap_info sparx5_vcaps[]; +extern const struct vcap_statistics sparx5_vcap_stats; + +#endif /* __SPARX5_VCAP_AG_API_H__ */ + diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c new file mode 100644 index 000000000000..50153264179e --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver VCAP implementation + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ + +#include <linux/types.h> +#include <linux/list.h> + +#include "vcap_api.h" +#include "vcap_api_client.h" +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_vcap_impl.h" +#include "sparx5_vcap_ag_api.h" + +#define SUPER_VCAP_BLK_SIZE 3072 /* addresses per Super VCAP block */ +#define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */ + +#define SPARX5_IS2_LOOKUPS 4 + +/* IS2 port keyset selection control */ + +/* IS2 non-ethernet traffic type keyset generation */ +enum vcap_is2_port_sel_noneth { + VCAP_IS2_PS_NONETH_MAC_ETYPE, + VCAP_IS2_PS_NONETH_CUSTOM_1, + VCAP_IS2_PS_NONETH_CUSTOM_2, + VCAP_IS2_PS_NONETH_NO_LOOKUP +}; + +/* IS2 IPv4 unicast traffic type keyset generation */ +enum vcap_is2_port_sel_ipv4_uc { + VCAP_IS2_PS_IPV4_UC_MAC_ETYPE, + VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER, + VCAP_IS2_PS_IPV4_UC_IP_7TUPLE, +}; + +/* IS2 IPv4 multicast traffic type keyset generation */ +enum vcap_is2_port_sel_ipv4_mc { + VCAP_IS2_PS_IPV4_MC_MAC_ETYPE, + VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER, + VCAP_IS2_PS_IPV4_MC_IP_7TUPLE, + VCAP_IS2_PS_IPV4_MC_IP4_VID, +}; + +/* IS2 IPv6 unicast traffic type keyset generation */ +enum vcap_is2_port_sel_ipv6_uc { + VCAP_IS2_PS_IPV6_UC_MAC_ETYPE, + VCAP_IS2_PS_IPV6_UC_IP_7TUPLE, + VCAP_IS2_PS_IPV6_UC_IP6_STD, + VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER, +}; + +/* IS2 IPv6 multicast traffic type keyset generation */ +enum vcap_is2_port_sel_ipv6_mc { + VCAP_IS2_PS_IPV6_MC_MAC_ETYPE, + VCAP_IS2_PS_IPV6_MC_IP_7TUPLE, + VCAP_IS2_PS_IPV6_MC_IP6_VID, + VCAP_IS2_PS_IPV6_MC_IP6_STD, + VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER, +}; + +/* IS2 ARP traffic type keyset generation */ +enum vcap_is2_port_sel_arp { + VCAP_IS2_PS_ARP_MAC_ETYPE, + VCAP_IS2_PS_ARP_ARP, +}; + +static struct sparx5_vcap_inst { + enum vcap_type vtype; /* type of vcap */ + int vinst; /* instance number within the same type */ + int lookups; /* number of lookups in this vcap type */ + int lookups_per_instance; /* number of lookups in this instance */ + int first_cid; /* first chain id in this vcap */ + int last_cid; /* last chain id in this vcap */ + int count; /* number of available addresses, not in super vcap */ + int map_id; /* id in the super vcap block mapping (if applicable) */ + int blockno; /* starting block in super vcap (if applicable) */ + int blocks; /* number of blocks in super vcap (if applicable) */ +} sparx5_vcap_inst_cfg[] = { + { + .vtype = VCAP_TYPE_IS2, /* IS2-0 */ + .vinst = 0, + .map_id = 4, + .lookups = SPARX5_IS2_LOOKUPS, + .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2, + .first_cid = SPARX5_VCAP_CID_IS2_L0, + .last_cid = SPARX5_VCAP_CID_IS2_L2 - 1, + .blockno = 0, /* Maps block 0-1 */ + .blocks = 2, + }, + { + .vtype = VCAP_TYPE_IS2, /* IS2-1 */ + .vinst = 1, + .map_id = 5, + .lookups = SPARX5_IS2_LOOKUPS, + .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2, + .first_cid = SPARX5_VCAP_CID_IS2_L2, + .last_cid = SPARX5_VCAP_CID_IS2_MAX, + .blockno = 2, /* Maps block 2-3 */ + .blocks = 2, + }, +}; + +/* Await the super VCAP completion of the current operation */ +static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5) +{ + u32 value; + + read_poll_timeout(spx5_rd, value, + !VCAP_SUPER_CTRL_UPDATE_SHOT_GET(value), 500, 10000, + false, sparx5, VCAP_SUPER_CTRL); +} + +/* Initializing a VCAP address range: only IS2 for now */ +static void _sparx5_vcap_range_init(struct sparx5 *sparx5, + struct vcap_admin *admin, + u32 addr, u32 count) +{ + u32 size = count - 1; + + spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) | + VCAP_SUPER_CFG_MV_SIZE_SET(size), + sparx5, VCAP_SUPER_CFG); + spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) | + VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) | + VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) | + VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) | + VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) | + VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) | + VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true), + sparx5, VCAP_SUPER_CTRL); + sparx5_vcap_wait_super_update(sparx5); +} + +/* Initializing VCAP rule data area */ +static void sparx5_vcap_block_init(struct sparx5 *sparx5, + struct vcap_admin *admin) +{ + _sparx5_vcap_range_init(sparx5, admin, admin->first_valid_addr, + admin->last_valid_addr - + admin->first_valid_addr); +} + +/* Get the keyset name from the sparx5 VCAP model */ +static const char *sparx5_vcap_keyset_name(struct net_device *ndev, + enum vcap_keyfield_set keyset) +{ + struct sparx5_port *port = netdev_priv(ndev); + + return port->sparx5->vcap_ctrl->stats->keyfield_set_names[keyset]; +} + +/* Check if this is the first lookup of IS2 */ +static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule) +{ + return (rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L0 && + rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L1) || + ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L2 && + rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L3)); +} + +/* Set the narrow range ingress port mask on a rule */ +static void sparx5_vcap_add_range_port_mask(struct vcap_rule *rule, + struct net_device *ndev) +{ + struct sparx5_port *port = netdev_priv(ndev); + u32 port_mask; + u32 range; + + range = port->portno / BITS_PER_TYPE(u32); + /* Port bit set to match-any */ + port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32)); + vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_SEL, 0, 0xf); + vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG, range, 0xf); + vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0, port_mask); +} + +/* Set the wide range ingress port mask on a rule */ +static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule, + struct net_device *ndev) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct vcap_u72_key port_mask; + u32 range; + + /* Port bit set to match-any */ + memset(port_mask.value, 0, sizeof(port_mask.value)); + memset(port_mask.mask, 0xff, sizeof(port_mask.mask)); + range = port->portno / BITS_PER_BYTE; + port_mask.mask[range] = ~BIT(port->portno % BITS_PER_BYTE); + vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask); +} + +/* API callback used for validating a field keyset (check the port keysets) */ +static enum vcap_keyfield_set +sparx5_vcap_validate_keyset(struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule, + struct vcap_keyset_list *kslist, + u16 l3_proto) +{ + if (!kslist || kslist->cnt == 0) + return VCAP_KFS_NO_VALUE; + /* for now just return whatever the API suggests */ + return kslist->keysets[0]; +} + +/* API callback used for adding default fields to a rule */ +static void sparx5_vcap_add_default_fields(struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule) +{ + const struct vcap_field *field; + + field = vcap_lookup_keyfield(rule, VCAP_KF_IF_IGR_PORT_MASK); + if (field && field->width == SPX5_PORTS) + sparx5_vcap_add_wide_port_mask(rule, ndev); + else if (field && field->width == BITS_PER_TYPE(u32)) + sparx5_vcap_add_range_port_mask(rule, ndev); + else + pr_err("%s:%d: %s: could not add an ingress port mask for: %s\n", + __func__, __LINE__, netdev_name(ndev), + sparx5_vcap_keyset_name(ndev, rule->keyset)); + /* add the lookup bit */ + if (sparx5_vcap_is2_is_first_chain(rule)) + vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1); + else + vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0); +} + +/* API callback used for erasing the vcap cache area (not the register area) */ +static void sparx5_vcap_cache_erase(struct vcap_admin *admin) +{ + memset(admin->cache.keystream, 0, STREAMSIZE); + memset(admin->cache.maskstream, 0, STREAMSIZE); + memset(admin->cache.actionstream, 0, STREAMSIZE); + memset(&admin->cache.counter, 0, sizeof(admin->cache.counter)); +} + +/* API callback used for writing to the VCAP cache */ +static void sparx5_vcap_cache_write(struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_selection sel, + u32 start, + u32 count) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + u32 *keystr, *mskstr, *actstr; + int idx; + + keystr = &admin->cache.keystream[start]; + mskstr = &admin->cache.maskstream[start]; + actstr = &admin->cache.actionstream[start]; + switch (sel) { + case VCAP_SEL_ENTRY: + for (idx = 0; idx < count; ++idx) { + /* Avoid 'match-off' by setting value & mask */ + spx5_wr(keystr[idx] & mskstr[idx], sparx5, + VCAP_SUPER_VCAP_ENTRY_DAT(idx)); + spx5_wr(~mskstr[idx], sparx5, + VCAP_SUPER_VCAP_MASK_DAT(idx)); + } + break; + case VCAP_SEL_ACTION: + for (idx = 0; idx < count; ++idx) + spx5_wr(actstr[idx], sparx5, + VCAP_SUPER_VCAP_ACTION_DAT(idx)); + break; + case VCAP_SEL_ALL: + pr_err("%s:%d: cannot write all streams at once\n", + __func__, __LINE__); + break; + default: + break; + } +} + +/* API callback used for reading from the VCAP into the VCAP cache */ +static void sparx5_vcap_cache_read(struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_selection sel, u32 start, + u32 count) +{ + /* this will be added later */ +} + +/* API callback used for initializing a VCAP address range */ +static void sparx5_vcap_range_init(struct net_device *ndev, + struct vcap_admin *admin, u32 addr, + u32 count) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + + _sparx5_vcap_range_init(sparx5, admin, addr, count); +} + +/* API callback used for updating the VCAP cache */ +static void sparx5_vcap_update(struct net_device *ndev, + struct vcap_admin *admin, enum vcap_command cmd, + enum vcap_selection sel, u32 addr) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + bool clear; + + clear = (cmd == VCAP_CMD_INITIALIZE); + spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) | + VCAP_SUPER_CFG_MV_SIZE_SET(0), sparx5, VCAP_SUPER_CFG); + spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) | + VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) | + VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) | + VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) | + VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) | + VCAP_SUPER_CTRL_CLEAR_CACHE_SET(clear) | + VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true), + sparx5, VCAP_SUPER_CTRL); + sparx5_vcap_wait_super_update(sparx5); +} + +/* API callback used for moving a block of rules in the VCAP */ +static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin, + u32 addr, int offset, int count) +{ + /* this will be added later */ +} + +/* Provide port information via a callback interface */ +static int sparx5_port_info(struct net_device *ndev, enum vcap_type vtype, + int (*pf)(void *out, int arg, const char *fmt, ...), + void *out, int arg) +{ + /* this will be added later */ + return 0; +} + +/* API callback operations: only IS2 is supported for now */ +static struct vcap_operations sparx5_vcap_ops = { + .validate_keyset = sparx5_vcap_validate_keyset, + .add_default_fields = sparx5_vcap_add_default_fields, + .cache_erase = sparx5_vcap_cache_erase, + .cache_write = sparx5_vcap_cache_write, + .cache_read = sparx5_vcap_cache_read, + .init = sparx5_vcap_range_init, + .update = sparx5_vcap_update, + .move = sparx5_vcap_move, + .port_info = sparx5_port_info, +}; + +/* Enable lookups per port and set the keyset generation: only IS2 for now */ +static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5, + struct vcap_admin *admin) +{ + int portno, lookup; + u32 keysel; + + /* enable all 4 lookups on all ports */ + for (portno = 0; portno < SPX5_PORTS; ++portno) + spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5, + ANA_ACL_VCAP_S2_CFG(portno)); + + /* all traffic types generate the MAC_ETYPE keyset for now in all + * lookups on all ports + */ + keysel = ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(true) | + ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(VCAP_IS2_PS_NONETH_MAC_ETYPE) | + ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(VCAP_IS2_PS_IPV4_MC_MAC_ETYPE) | + ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(VCAP_IS2_PS_IPV4_UC_MAC_ETYPE) | + ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(VCAP_IS2_PS_IPV6_MC_MAC_ETYPE) | + ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(VCAP_IS2_PS_IPV6_UC_MAC_ETYPE) | + ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(VCAP_IS2_PS_ARP_MAC_ETYPE); + for (lookup = 0; lookup < admin->lookups; ++lookup) { + for (portno = 0; portno < SPX5_PORTS; ++portno) { + spx5_wr(keysel, sparx5, + ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup)); + } + } +} + +/* Disable lookups per port and set the keyset generation: only IS2 for now */ +static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5, + struct vcap_admin *admin) +{ + int portno; + + for (portno = 0; portno < SPX5_PORTS; ++portno) + spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), + ANA_ACL_VCAP_S2_CFG_SEC_ENA, + sparx5, + ANA_ACL_VCAP_S2_CFG(portno)); +} + +static void sparx5_vcap_admin_free(struct vcap_admin *admin) +{ + if (!admin) + return; + kfree(admin->cache.keystream); + kfree(admin->cache.maskstream); + kfree(admin->cache.actionstream); + kfree(admin); +} + +/* Allocate a vcap instance with a rule list and a cache area */ +static struct vcap_admin * +sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl, + const struct sparx5_vcap_inst *cfg) +{ + struct vcap_admin *admin; + + admin = kzalloc(sizeof(*admin), GFP_KERNEL); + if (!admin) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&admin->list); + INIT_LIST_HEAD(&admin->rules); + admin->vtype = cfg->vtype; + admin->vinst = cfg->vinst; + admin->lookups = cfg->lookups; + admin->lookups_per_instance = cfg->lookups_per_instance; + admin->first_cid = cfg->first_cid; + admin->last_cid = cfg->last_cid; + admin->cache.keystream = + kzalloc(STREAMSIZE, GFP_KERNEL); + admin->cache.maskstream = + kzalloc(STREAMSIZE, GFP_KERNEL); + admin->cache.actionstream = + kzalloc(STREAMSIZE, GFP_KERNEL); + if (!admin->cache.keystream || !admin->cache.maskstream || + !admin->cache.actionstream) { + sparx5_vcap_admin_free(admin); + return ERR_PTR(-ENOMEM); + } + return admin; +} + +/* Do block allocations and provide addresses for VCAP instances */ +static void sparx5_vcap_block_alloc(struct sparx5 *sparx5, + struct vcap_admin *admin, + const struct sparx5_vcap_inst *cfg) +{ + int idx; + + /* Super VCAP block mapping and address configuration. Block 0 + * is assigned addresses 0 through 3071, block 1 is assigned + * addresses 3072 though 6143, and so on. + */ + for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks; ++idx) { + spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5, + VCAP_SUPER_IDX); + spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id), sparx5, + VCAP_SUPER_MAP); + } + admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE; + admin->last_used_addr = admin->first_valid_addr + + cfg->blocks * SUPER_VCAP_BLK_SIZE; + admin->last_valid_addr = admin->last_used_addr - 1; +} + +/* Allocate a vcap control and vcap instances and configure the system */ +int sparx5_vcap_init(struct sparx5 *sparx5) +{ + const struct sparx5_vcap_inst *cfg; + struct vcap_control *ctrl; + struct vcap_admin *admin; + int err = 0, idx; + + /* Create a VCAP control instance that owns the platform specific VCAP + * model with VCAP instances and information about keysets, keys, + * actionsets and actions + * - Create administrative state for each available VCAP + * - Lists of rules + * - Address information + * - Initialize VCAP blocks + * - Configure port keysets + */ + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + sparx5->vcap_ctrl = ctrl; + /* select the sparx5 VCAP model */ + ctrl->vcaps = sparx5_vcaps; + ctrl->stats = &sparx5_vcap_stats; + /* Setup callbacks to allow the API to use the VCAP HW */ + ctrl->ops = &sparx5_vcap_ops; + + INIT_LIST_HEAD(&ctrl->list); + for (idx = 0; idx < ARRAY_SIZE(sparx5_vcap_inst_cfg); ++idx) { + cfg = &sparx5_vcap_inst_cfg[idx]; + admin = sparx5_vcap_admin_alloc(sparx5, ctrl, cfg); + if (IS_ERR(admin)) { + err = PTR_ERR(admin); + pr_err("%s:%d: vcap allocation failed: %d\n", + __func__, __LINE__, err); + return err; + } + sparx5_vcap_block_alloc(sparx5, admin, cfg); + sparx5_vcap_block_init(sparx5, admin); + if (cfg->vinst == 0) + sparx5_vcap_port_key_selection(sparx5, admin); + list_add_tail(&admin->list, &ctrl->list); + } + + return err; +} + +void sparx5_vcap_destroy(struct sparx5 *sparx5) +{ + struct vcap_control *ctrl = sparx5->vcap_ctrl; + struct vcap_admin *admin, *admin_next; + + if (!ctrl) + return; + + list_for_each_entry_safe(admin, admin_next, &ctrl->list, list) { + sparx5_vcap_port_key_deselection(sparx5, admin); + vcap_del_rules(ctrl, admin); + list_del(&admin->list); + sparx5_vcap_admin_free(admin); + } + kfree(ctrl); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h new file mode 100644 index 000000000000..8e44ebd76b41 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver VCAP implementation + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ + +#ifndef __SPARX5_VCAP_IMPL_H__ +#define __SPARX5_VCAP_IMPL_H__ + +#define SPARX5_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */ +#define SPARX5_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */ +#define SPARX5_VCAP_CID_IS2_L2 VCAP_CID_INGRESS_STAGE2_L2 /* IS2 lookup 2 */ +#define SPARX5_VCAP_CID_IS2_L3 VCAP_CID_INGRESS_STAGE2_L3 /* IS2 lookup 3 */ +#define SPARX5_VCAP_CID_IS2_MAX \ + (VCAP_CID_INGRESS_STAGE2_L3 + VCAP_CID_LOOKUP_SIZE - 1) /* IS2 Max */ + +#endif /* __SPARX5_VCAP_IMPL_H__ */ diff --git a/drivers/net/ethernet/microchip/vcap/Kconfig b/drivers/net/ethernet/microchip/vcap/Kconfig new file mode 100644 index 000000000000..1af30a358a15 --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/Kconfig @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Microchip VCAP API configuration +# + +if NET_VENDOR_MICROCHIP + +config VCAP + bool "VCAP (Versatile Content-Aware Processor) library" + help + Provides the basic VCAP functionality for multiple Microchip switchcores + + A VCAP is essentially a TCAM with rules consisting of + + - Programmable key fields + - Programmable action fields + - A counter (which may be only one bit wide) + + Besides this each VCAP has: + + - A number of lookups + - A keyset configuration per port per lookup + + The VCAP implementation provides switchcore independent handling of rules + and supports: + + - Creating and deleting rules + - Updating and getting rules + + The platform specific configuration as well as the platform specific model + of the VCAP instances are attached to the VCAP API and a client can then + access rules via the API in a platform independent way, with the + limitations that each VCAP has in terms of its supported keys and actions. + + Different switchcores will have different VCAP instances with different + characteristics. Look in the datasheet for the VCAP specifications for the + specific switchcore. + +config VCAP_KUNIT_TEST + bool "KUnit test for VCAP library" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on KUNIT=y && VCAP=y && y + default KUNIT_ALL_TESTS + help + This builds unit tests for the VCAP library. + + For more information on KUnit and unit tests in general, please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + +endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/vcap/Makefile b/drivers/net/ethernet/microchip/vcap/Makefile new file mode 100644 index 000000000000..b377569f92d8 --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip VCAP API +# + +obj-$(CONFIG_VCAP) += vcap.o +obj-$(CONFIG_VCAP_KUNIT_TEST) += vcap_model_kunit.o + +vcap-y += vcap_api.o diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h new file mode 100644 index 000000000000..804d57b9b60a --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API + */ + +/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200. + * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86 + */ + +#ifndef __VCAP_AG_API__ +#define __VCAP_AG_API__ + +enum vcap_type { + VCAP_TYPE_IS2, + VCAP_TYPE_MAX +}; + +/* Keyfieldset names with origin information */ +enum vcap_keyfield_set { + VCAP_KFS_NO_VALUE, /* initial value */ + VCAP_KFS_ARP, /* sparx5 is2 X6 */ + VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6 */ + VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6 */ + VCAP_KFS_IP6_STD, /* sparx5 is2 X6 */ + VCAP_KFS_IP_7TUPLE, /* sparx5 is2 X12 */ + VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6 */ +}; + +/* List of keyfields with description + * + * Keys ending in _IS are booleans derived from frame data + * Keys ending in _CLS are classified frame data + * + * VCAP_KF_8021Q_DEI_CLS: W1, sparx5: is2 + * Classified DEI + * VCAP_KF_8021Q_PCP_CLS: W3, sparx5: is2 + * Classified PCP + * VCAP_KF_8021Q_VID_CLS: W13, sparx5: is2 + * Classified VID + * VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2 + * Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has + * one or more Q-tags. Independent of port VLAN awareness + * VCAP_KF_ARP_ADDR_SPACE_OK_IS: W1, sparx5: is2 + * Set if hardware address is Ethernet + * VCAP_KF_ARP_LEN_OK_IS: W1, sparx5: is2 + * Set if hardware address length = 6 (Ethernet) and IP address length = 4 (IP). + * VCAP_KF_ARP_OPCODE: W2, sparx5: is2 + * ARP opcode + * VCAP_KF_ARP_OPCODE_UNKNOWN_IS: W1, sparx5: is2 + * Set if not one of the codes defined in VCAP_KF_ARP_OPCODE + * VCAP_KF_ARP_PROTO_SPACE_OK_IS: W1, sparx5: is2 + * Set if protocol address space is 0x0800 + * VCAP_KF_ARP_SENDER_MATCH_IS: W1, sparx5: is2 + * Sender Hardware Address = SMAC (ARP) + * VCAP_KF_ARP_TGT_MATCH_IS: W1, sparx5: is2 + * Target Hardware Address = SMAC (RARP) + * VCAP_KF_ETYPE: W16, sparx5: is2 + * Ethernet type + * VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is2 + * Set if frame has EtherType >= 0x600 + * VCAP_KF_IF_IGR_PORT_MASK: sparx5 is2 W32, sparx5 is2 W65 + * Ingress port mask, one bit per port/erleg + * VCAP_KF_IF_IGR_PORT_MASK_L3: W1, sparx5: is2 + * If set, IF_IGR_PORT_MASK, IF_IGR_PORT_MASK_RNG, and IF_IGR_PORT_MASK_SEL are + * used to specify L3 interfaces + * VCAP_KF_IF_IGR_PORT_MASK_RNG: W4, sparx5: is2 + * Range selector for IF_IGR_PORT_MASK. Specifies which group of 32 ports are + * available in IF_IGR_PORT_MASK + * VCAP_KF_IF_IGR_PORT_MASK_SEL: W2, sparx5: is2 + * Mode selector for IF_IGR_PORT_MASK, applicable when IF_IGR_PORT_MASK_L3 == 0. + * Mapping: 0: DEFAULT 1: LOOPBACK 2: MASQUERADE 3: CPU_VD + * VCAP_KF_IP4_IS: W1, sparx5: is2 + * Set if frame has EtherType = 0x800 and IP version = 4 + * VCAP_KF_ISDX_CLS: W12, sparx5: is2 + * Classified ISDX + * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2 + * Set if classified ISDX > 0 + * VCAP_KF_L2_BC_IS: W1, sparx5: is2 + * Set if frame’s destination MAC address is the broadcast address + * (FF-FF-FF-FF-FF-FF). + * VCAP_KF_L2_DMAC: W48, sparx5: is2 + * Destination MAC address + * VCAP_KF_L2_FWD_IS: W1, sparx5: is2 + * Set if the frame is allowed to be forwarded to front ports + * VCAP_KF_L2_MC_IS: W1, sparx5: is2 + * Set if frame’s destination MAC address is a multicast address (bit 40 = 1). + * VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2 + * Byte 0-7 of L2 payload after Type/Len field and overloading for OAM + * VCAP_KF_L2_SMAC: W48, sparx5: is2 + * Source MAC address + * VCAP_KF_L3_DIP_EQ_SIP_IS: W1, sparx5: is2 + * Set if Src IP matches Dst IP address + * VCAP_KF_L3_DST_IS: W1, sparx5: is2 + * Set if lookup is done for egress router leg + * VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is2 + * L3 Fragmentation type (none, initial, suspicious, valid follow up) + * VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is2 + * Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L + * EN + * VCAP_KF_L3_IP4_DIP: W32, sparx5: is2 + * Destination IPv4 Address + * VCAP_KF_L3_IP4_SIP: W32, sparx5: is2 + * Source IPv4 Address + * VCAP_KF_L3_IP6_DIP: W128, sparx5: is2 + * Sparx5: Full IPv6 DIP, LAN966x: Either Full IPv6 DIP or a subset depending on + * frame type + * VCAP_KF_L3_IP6_SIP: W128, sparx5: is2 + * Sparx5: Full IPv6 SIP, LAN966x: Either Full IPv6 SIP or a subset depending on + * frame type + * VCAP_KF_L3_IP_PROTO: W8, sparx5: is2 + * IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4 + * VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is2 + * Set if IPv4 frame contains options (IP len > 5) + * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40 + * Sparx5: Payload bytes after IP header. IPv4: IPv4 options are not parsed so + * payload is always taken 20 bytes after the start of the IPv4 header, LAN966x: + * Bytes 0-6 after IP header + * VCAP_KF_L3_RT_IS: W1, sparx5: is2 + * Set if frame has hit a router leg + * VCAP_KF_L3_TOS: W8, sparx5: is2 + * Sparx5: Frame's IPv4/IPv6 DSCP and ECN fields, LAN966x: IP TOS field + * VCAP_KF_L3_TTL_GT0: W1, sparx5: is2 + * Set if IPv4 TTL / IPv6 hop limit is greater than 0 + * VCAP_KF_L4_ACK: W1, sparx5: is2 + * Sparx5 and LAN966x: TCP flag ACK, LAN966x only: PTP over UDP: flagField bit 2 + * (unicastFlag) + * VCAP_KF_L4_DPORT: W16, sparx5: is2 + * Sparx5: TCP/UDP destination port. Overloading for IP_7TUPLE: Non-TCP/UDP IP + * frames: L4_DPORT = L3_IP_PROTO, LAN966x: TCP/UDP destination port + * VCAP_KF_L4_FIN: W1, sparx5: is2 + * TCP flag FIN, LAN966x: TCP flag FIN, and for PTP over UDP: messageType bit 1 + * VCAP_KF_L4_PAYLOAD: W64, sparx5: is2 + * Payload bytes after TCP/UDP header Overloading for IP_7TUPLE: Non TCP/UDP + * frames: Payload bytes 0–7 after IP header. IPv4 options are not parsed so + * payload is always taken 20 bytes after the start of the IPv4 header for non + * TCP/UDP IPv4 frames + * VCAP_KF_L4_PSH: W1, sparx5: is2 + * Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit + * 1 (twoStepFlag) + * VCAP_KF_L4_RNG: W16, sparx5: is2 + * Range checker bitmask (one for each range checker). Input into range checkers + * is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE, + * outer VID, inner VID) + * VCAP_KF_L4_RST: W1, sparx5: is2 + * Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType + * bit 3 + * VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2 + * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP: + * messageType bit 0 + * VCAP_KF_L4_SPORT: W16, sparx5: is2 + * TCP/UDP source port + * VCAP_KF_L4_SPORT_EQ_DPORT_IS: W1, sparx5: is2 + * Set if UDP or TCP source port equals UDP or TCP destination port + * VCAP_KF_L4_SYN: W1, sparx5: is2 + * Sparx5: TCP flag SYN, LAN966x: TCP: TCP flag SYN. PTP over UDP: messageType + * bit 2 + * VCAP_KF_L4_URG: W1, sparx5: is2 + * Sparx5: TCP flag URG, LAN966x: TCP: TCP flag URG. PTP over UDP: flagField bit + * 7 (reserved) + * VCAP_KF_LOOKUP_FIRST_IS: W1, sparx5: is2 + * Selects between entries relevant for first and second lookup. Set for first + * lookup, cleared for second lookup. + * VCAP_KF_LOOKUP_PAG: W8, sparx5: is2 + * Classified Policy Association Group: chains rules from IS1/CLM to IS2 + * VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2 + * Dual-ended loss measurement counters in CCM frames are all zero + * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is2 + * Set if frame’s EtherType = 0x8902 + * VCAP_KF_TCP_IS: W1, sparx5: is2 + * Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next + * header = 6) + * VCAP_KF_TCP_UDP_IS: W1, sparx5: is2 + * Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6 + * or 17) + * VCAP_KF_TYPE: sparx5 is2 W4, sparx5 is2 W2 + * Keyset type id - set by the API + */ + +/* Keyfield names */ +enum vcap_key_field { + VCAP_KF_NO_VALUE, /* initial value */ + VCAP_KF_8021Q_DEI_CLS, + VCAP_KF_8021Q_PCP_CLS, + VCAP_KF_8021Q_VID_CLS, + VCAP_KF_8021Q_VLAN_TAGGED_IS, + VCAP_KF_ARP_ADDR_SPACE_OK_IS, + VCAP_KF_ARP_LEN_OK_IS, + VCAP_KF_ARP_OPCODE, + VCAP_KF_ARP_OPCODE_UNKNOWN_IS, + VCAP_KF_ARP_PROTO_SPACE_OK_IS, + VCAP_KF_ARP_SENDER_MATCH_IS, + VCAP_KF_ARP_TGT_MATCH_IS, + VCAP_KF_ETYPE, + VCAP_KF_ETYPE_LEN_IS, + VCAP_KF_IF_IGR_PORT_MASK, + VCAP_KF_IF_IGR_PORT_MASK_L3, + VCAP_KF_IF_IGR_PORT_MASK_RNG, + VCAP_KF_IF_IGR_PORT_MASK_SEL, + VCAP_KF_IP4_IS, + VCAP_KF_ISDX_CLS, + VCAP_KF_ISDX_GT0_IS, + VCAP_KF_L2_BC_IS, + VCAP_KF_L2_DMAC, + VCAP_KF_L2_FWD_IS, + VCAP_KF_L2_MC_IS, + VCAP_KF_L2_PAYLOAD_ETYPE, + VCAP_KF_L2_SMAC, + VCAP_KF_L3_DIP_EQ_SIP_IS, + VCAP_KF_L3_DST_IS, + VCAP_KF_L3_FRAGMENT_TYPE, + VCAP_KF_L3_FRAG_INVLD_L4_LEN, + VCAP_KF_L3_IP4_DIP, + VCAP_KF_L3_IP4_SIP, + VCAP_KF_L3_IP6_DIP, + VCAP_KF_L3_IP6_SIP, + VCAP_KF_L3_IP_PROTO, + VCAP_KF_L3_OPTIONS_IS, + VCAP_KF_L3_PAYLOAD, + VCAP_KF_L3_RT_IS, + VCAP_KF_L3_TOS, + VCAP_KF_L3_TTL_GT0, + VCAP_KF_L4_ACK, + VCAP_KF_L4_DPORT, + VCAP_KF_L4_FIN, + VCAP_KF_L4_PAYLOAD, + VCAP_KF_L4_PSH, + VCAP_KF_L4_RNG, + VCAP_KF_L4_RST, + VCAP_KF_L4_SEQUENCE_EQ0_IS, + VCAP_KF_L4_SPORT, + VCAP_KF_L4_SPORT_EQ_DPORT_IS, + VCAP_KF_L4_SYN, + VCAP_KF_L4_URG, + VCAP_KF_LOOKUP_FIRST_IS, + VCAP_KF_LOOKUP_PAG, + VCAP_KF_OAM_CCM_CNTS_EQ0, + VCAP_KF_OAM_Y1731_IS, + VCAP_KF_TCP_IS, + VCAP_KF_TCP_UDP_IS, + VCAP_KF_TYPE, +}; + +/* Actionset names with origin information */ +enum vcap_actionfield_set { + VCAP_AFS_NO_VALUE, /* initial value */ + VCAP_AFS_BASE_TYPE, /* sparx5 is2 X3 */ +}; + +/* List of actionfields with description + * + * VCAP_AF_CNT_ID: W12, sparx5: is2 + * Counter ID, used per lookup to index the 4K frame counters (ANA_ACL:CNT_TBL). + * Multiple VCAP IS2 entries can use the same counter. + * VCAP_AF_CPU_COPY_ENA: W1, sparx5: is2 + * Setting this bit to 1 causes all frames that hit this action to be copied to + * the CPU extraction queue specified in CPU_QUEUE_NUM. + * VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2 + * CPU queue number. Used when CPU_COPY_ENA is set. + * VCAP_AF_HIT_ME_ONCE: W1, sparx5: is2 + * Setting this bit to 1 causes the first frame that hits this action where the + * HIT_CNT counter is zero to be copied to the CPU extraction queue specified in + * CPU_QUEUE_NUM. The HIT_CNT counter is then incremented and any frames that + * hit this action later are not copied to the CPU. To re-enable the HIT_ME_ONCE + * functionality, the HIT_CNT counter must be cleared. + * VCAP_AF_IGNORE_PIPELINE_CTRL: W1, sparx5: is2 + * Ignore ingress pipeline control. This enforces the use of the VCAP IS2 action + * even when the pipeline control has terminated the frame before VCAP IS2. + * VCAP_AF_INTR_ENA: W1, sparx5: is2 + * If set, an interrupt is triggered when this rule is hit + * VCAP_AF_LRN_DIS: W1, sparx5: is2 + * Setting this bit to 1 disables learning of frames hitting this action. + * VCAP_AF_MASK_MODE: W3, sparx5: is2 + * Controls the PORT_MASK use. Sparx5: 0: OR_DSTMASK, 1: AND_VLANMASK, 2: + * REPLACE_PGID, 3: REPLACE_ALL, 4: REDIR_PGID, 5: OR_PGID_MASK, 6: VSTAX, 7: + * Not applicable. LAN966X: 0: No action, 1: Permit/deny (AND), 2: Policy + * forwarding (DMAC lookup), 3: Redirect. The CPU port is untouched by + * MASK_MODE. + * VCAP_AF_MATCH_ID: W16, sparx5: is2 + * Logical ID for the entry. The MATCH_ID is extracted together with the frame + * if the frame is forwarded to the CPU (CPU_COPY_ENA). The result is placed in + * IFH.CL_RSLT. + * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is2 + * Mask used by MATCH_ID. + * VCAP_AF_MIRROR_PROBE: W2, sparx5: is2 + * Mirroring performed according to configuration of a mirror probe. 0: No + * mirroring. 1: Mirror probe 0. 2: Mirror probe 1. 3: Mirror probe 2 + * VCAP_AF_PIPELINE_FORCE_ENA: W1, sparx5: is2 + * If set, use PIPELINE_PT unconditionally and set PIPELINE_ACT = NONE if + * PIPELINE_PT == NONE. Overrules previous settings of pipeline point. + * VCAP_AF_PIPELINE_PT: W5, sparx5: is2 + * Pipeline point used if PIPELINE_FORCE_ENA is set + * VCAP_AF_POLICE_ENA: W1, sparx5: is2 + * Setting this bit to 1 causes frames that hit this action to be policed by the + * ACL policer specified in POLICE_IDX. Only applies to the first lookup. + * VCAP_AF_POLICE_IDX: W6, sparx5: is2 + * Selects VCAP policer used when policing frames (POLICE_ENA) + * VCAP_AF_PORT_MASK: W68, sparx5: is2 + * Port mask applied to the forwarding decision based on MASK_MODE. + * VCAP_AF_RT_DIS: W1, sparx5: is2 + * If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also + * IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX. + */ + +/* Actionfield names */ +enum vcap_action_field { + VCAP_AF_NO_VALUE, /* initial value */ + VCAP_AF_CNT_ID, + VCAP_AF_CPU_COPY_ENA, + VCAP_AF_CPU_QUEUE_NUM, + VCAP_AF_HIT_ME_ONCE, + VCAP_AF_IGNORE_PIPELINE_CTRL, + VCAP_AF_INTR_ENA, + VCAP_AF_LRN_DIS, + VCAP_AF_MASK_MODE, + VCAP_AF_MATCH_ID, + VCAP_AF_MATCH_ID_MASK, + VCAP_AF_MIRROR_PROBE, + VCAP_AF_PIPELINE_FORCE_ENA, + VCAP_AF_PIPELINE_PT, + VCAP_AF_POLICE_ENA, + VCAP_AF_POLICE_IDX, + VCAP_AF_PORT_MASK, + VCAP_AF_RT_DIS, +}; + +#endif /* __VCAP_AG_API__ */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api_kunit.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api_kunit.h new file mode 100644 index 000000000000..e538ca725687 --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api_kunit.h @@ -0,0 +1,643 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API interface for kunit testing + * This is a different interface, to be able to include different VCAPs + */ + +/* Use same include guard as the official API to be able to override it */ +#ifndef __VCAP_AG_API__ +#define __VCAP_AG_API__ + +enum vcap_type { + VCAP_TYPE_ES2, + VCAP_TYPE_IS0, + VCAP_TYPE_IS2, + VCAP_TYPE_MAX +}; + +/* Keyfieldset names with origin information */ +enum vcap_keyfield_set { + VCAP_KFS_NO_VALUE, /* initial value */ + VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6 */ + VCAP_KFS_ETAG, /* sparx5 is0 X2 */ + VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6 */ + VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6 */ + VCAP_KFS_IP4_VID, /* sparx5 es2 X3 */ + VCAP_KFS_IP6_STD, /* sparx5 is2 X6 */ + VCAP_KFS_IP6_VID, /* sparx5 is2 X6, sparx5 es2 X6 */ + VCAP_KFS_IP_7TUPLE, /* sparx5 is2 X12, sparx5 es2 X12 */ + VCAP_KFS_LL_FULL, /* sparx5 is0 X6 */ + VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6 */ + VCAP_KFS_MLL, /* sparx5 is0 X3 */ + VCAP_KFS_NORMAL, /* sparx5 is0 X6 */ + VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */ + VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */ + VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */ + VCAP_KFS_TRI_VID, /* sparx5 is0 X2 */ +}; + +/* List of keyfields with description + * + * Keys ending in _IS are booleans derived from frame data + * Keys ending in _CLS are classified frame data + * + * VCAP_KF_8021BR_ECID_BASE: W12, sparx5: is0 + * Used by 802.1BR Bridge Port Extension in an E-Tag + * VCAP_KF_8021BR_ECID_EXT: W8, sparx5: is0 + * Used by 802.1BR Bridge Port Extension in an E-Tag + * VCAP_KF_8021BR_E_TAGGED: W1, sparx5: is0 + * Set for frames containing an E-TAG (802.1BR Ethertype 893f) + * VCAP_KF_8021BR_GRP: W2, sparx5: is0 + * E-Tag group bits in 802.1BR Bridge Port Extension + * VCAP_KF_8021BR_IGR_ECID_BASE: W12, sparx5: is0 + * Used by 802.1BR Bridge Port Extension in an E-Tag + * VCAP_KF_8021BR_IGR_ECID_EXT: W8, sparx5: is0 + * Used by 802.1BR Bridge Port Extension in an E-Tag + * VCAP_KF_8021Q_DEI0: W1, sparx5: is0 + * First DEI in multiple vlan tags (outer tag or default port tag) + * VCAP_KF_8021Q_DEI1: W1, sparx5: is0 + * Second DEI in multiple vlan tags (inner tag) + * VCAP_KF_8021Q_DEI2: W1, sparx5: is0 + * Third DEI in multiple vlan tags (not always available) + * VCAP_KF_8021Q_DEI_CLS: W1, sparx5: is2/es2 + * Classified DEI + * VCAP_KF_8021Q_PCP0: W3, sparx5: is0 + * First PCP in multiple vlan tags (outer tag or default port tag) + * VCAP_KF_8021Q_PCP1: W3, sparx5: is0 + * Second PCP in multiple vlan tags (inner tag) + * VCAP_KF_8021Q_PCP2: W3, sparx5: is0 + * Third PCP in multiple vlan tags (not always available) + * VCAP_KF_8021Q_PCP_CLS: W3, sparx5: is2/es2 + * Classified PCP + * VCAP_KF_8021Q_TPID0: W3, sparx5: is0 + * First TPIC in multiple vlan tags (outer tag or default port tag) + * VCAP_KF_8021Q_TPID1: W3, sparx5: is0 + * Second TPID in multiple vlan tags (inner tag) + * VCAP_KF_8021Q_TPID2: W3, sparx5: is0 + * Third TPID in multiple vlan tags (not always available) + * VCAP_KF_8021Q_VID0: W12, sparx5: is0 + * First VID in multiple vlan tags (outer tag or default port tag) + * VCAP_KF_8021Q_VID1: W12, sparx5: is0 + * Second VID in multiple vlan tags (inner tag) + * VCAP_KF_8021Q_VID2: W12, sparx5: is0 + * Third VID in multiple vlan tags (not always available) + * VCAP_KF_8021Q_VID_CLS: W13, sparx5: is2/es2 + * Classified VID + * VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2/es2 + * Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has + * one or more Q-tags. Independent of port VLAN awareness + * VCAP_KF_8021Q_VLAN_TAGS: W3, sparx5: is0 + * Number of VLAN tags in frame: 0: Untagged, 1: Single tagged, 3: Double + * tagged, 7: Triple tagged + * VCAP_KF_ACL_GRP_ID: W8, sparx5: es2 + * Used in interface map table + * VCAP_KF_ARP_ADDR_SPACE_OK_IS: W1, sparx5: is2/es2 + * Set if hardware address is Ethernet + * VCAP_KF_ARP_LEN_OK_IS: W1, sparx5: is2/es2 + * Set if hardware address length = 6 (Ethernet) and IP address length = 4 (IP). + * VCAP_KF_ARP_OPCODE: W2, sparx5: is2/es2 + * ARP opcode + * VCAP_KF_ARP_OPCODE_UNKNOWN_IS: W1, sparx5: is2/es2 + * Set if not one of the codes defined in VCAP_KF_ARP_OPCODE + * VCAP_KF_ARP_PROTO_SPACE_OK_IS: W1, sparx5: is2/es2 + * Set if protocol address space is 0x0800 + * VCAP_KF_ARP_SENDER_MATCH_IS: W1, sparx5: is2/es2 + * Sender Hardware Address = SMAC (ARP) + * VCAP_KF_ARP_TGT_MATCH_IS: W1, sparx5: is2/es2 + * Target Hardware Address = SMAC (RARP) + * VCAP_KF_COSID_CLS: W3, sparx5: es2 + * Class of service + * VCAP_KF_DST_ENTRY: W1, sparx5: is0 + * Selects whether the frame’s destination or source information is used for + * fields L2_SMAC and L3_IP4_SIP + * VCAP_KF_ES0_ISDX_KEY_ENA: W1, sparx5: es2 + * The value taken from the IFH .FWD.ES0_ISDX_KEY_ENA + * VCAP_KF_ETYPE: W16, sparx5: is0/is2/es2 + * Ethernet type + * VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is0/is2/es2 + * Set if frame has EtherType >= 0x600 + * VCAP_KF_ETYPE_MPLS: W2, sparx5: is0 + * Type of MPLS Ethertype (or not) + * VCAP_KF_IF_EGR_PORT_MASK: W32, sparx5: es2 + * Egress port mask, one bit per port + * VCAP_KF_IF_EGR_PORT_MASK_RNG: W3, sparx5: es2 + * Select which 32 port group is available in IF_EGR_PORT (or virtual ports or + * CPU queue) + * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9 + * Sparx5: Logical ingress port number retrieved from + * ANA_CL::PORT_ID_CFG.LPORT_NUM or ERLEG, LAN966x: ingress port nunmber + * VCAP_KF_IF_IGR_PORT_MASK: sparx5 is0 W65, sparx5 is2 W32, sparx5 is2 W65 + * Ingress port mask, one bit per port/erleg + * VCAP_KF_IF_IGR_PORT_MASK_L3: W1, sparx5: is2 + * If set, IF_IGR_PORT_MASK, IF_IGR_PORT_MASK_RNG, and IF_IGR_PORT_MASK_SEL are + * used to specify L3 interfaces + * VCAP_KF_IF_IGR_PORT_MASK_RNG: W4, sparx5: is2 + * Range selector for IF_IGR_PORT_MASK. Specifies which group of 32 ports are + * available in IF_IGR_PORT_MASK + * VCAP_KF_IF_IGR_PORT_MASK_SEL: W2, sparx5: is0/is2 + * Mode selector for IF_IGR_PORT_MASK, applicable when IF_IGR_PORT_MASK_L3 == 0. + * Mapping: 0: DEFAULT 1: LOOPBACK 2: MASQUERADE 3: CPU_VD + * VCAP_KF_IF_IGR_PORT_SEL: W1, sparx5: es2 + * Selector for IF_IGR_PORT: physical port number or ERLEG + * VCAP_KF_IP4_IS: W1, sparx5: is0/is2/es2 + * Set if frame has EtherType = 0x800 and IP version = 4 + * VCAP_KF_IP_MC_IS: W1, sparx5: is0 + * Set if frame is IPv4 frame and frame’s destination MAC address is an IPv4 + * multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame’s + * destination MAC address is an IPv6 multicast address (0x3333/16). + * VCAP_KF_IP_PAYLOAD_5TUPLE: W32, sparx5: is0 + * Payload bytes after IP header + * VCAP_KF_IP_SNAP_IS: W1, sparx5: is0 + * Set if frame is IPv4, IPv6, or SNAP frame + * VCAP_KF_ISDX_CLS: W12, sparx5: is2/es2 + * Classified ISDX + * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es2 + * Set if classified ISDX > 0 + * VCAP_KF_L2_BC_IS: W1, sparx5: is0/is2/es2 + * Set if frame’s destination MAC address is the broadcast address + * (FF-FF-FF-FF-FF-FF). + * VCAP_KF_L2_DMAC: W48, sparx5: is0/is2/es2 + * Destination MAC address + * VCAP_KF_L2_FWD_IS: W1, sparx5: is2 + * Set if the frame is allowed to be forwarded to front ports + * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2 + * Set if frame’s destination MAC address is a multicast address (bit 40 = 1). + * VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2/es2 + * Byte 0-7 of L2 payload after Type/Len field and overloading for OAM + * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2 + * Source MAC address + * VCAP_KF_L3_DIP_EQ_SIP_IS: W1, sparx5: is2/es2 + * Set if Src IP matches Dst IP address + * VCAP_KF_L3_DMAC_DIP_MATCH: W1, sparx5: is2 + * Match found in DIP security lookup in ANA_L3 + * VCAP_KF_L3_DPL_CLS: W1, sparx5: es2 + * The frames drop precedence level + * VCAP_KF_L3_DSCP: W6, sparx5: is0 + * Frame’s DSCP value + * VCAP_KF_L3_DST_IS: W1, sparx5: is2 + * Set if lookup is done for egress router leg + * VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is0/is2/es2 + * L3 Fragmentation type (none, initial, suspicious, valid follow up) + * VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is0/is2 + * Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L + * EN + * VCAP_KF_L3_IP4_DIP: W32, sparx5: is0/is2/es2 + * Destination IPv4 Address + * VCAP_KF_L3_IP4_SIP: W32, sparx5: is0/is2/es2 + * Source IPv4 Address + * VCAP_KF_L3_IP6_DIP: W128, sparx5: is0/is2/es2 + * Sparx5: Full IPv6 DIP, LAN966x: Either Full IPv6 DIP or a subset depending on + * frame type + * VCAP_KF_L3_IP6_SIP: W128, sparx5: is0/is2/es2 + * Sparx5: Full IPv6 SIP, LAN966x: Either Full IPv6 SIP or a subset depending on + * frame type + * VCAP_KF_L3_IP_PROTO: W8, sparx5: is0/is2/es2 + * IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4 + * VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is0/is2/es2 + * Set if IPv4 frame contains options (IP len > 5) + * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96 + * Sparx5: Payload bytes after IP header. IPv4: IPv4 options are not parsed so + * payload is always taken 20 bytes after the start of the IPv4 header, LAN966x: + * Bytes 0-6 after IP header + * VCAP_KF_L3_RT_IS: W1, sparx5: is2/es2 + * Set if frame has hit a router leg + * VCAP_KF_L3_SMAC_SIP_MATCH: W1, sparx5: is2 + * Match found in SIP security lookup in ANA_L3 + * VCAP_KF_L3_TOS: W8, sparx5: is2/es2 + * Sparx5: Frame's IPv4/IPv6 DSCP and ECN fields, LAN966x: IP TOS field + * VCAP_KF_L3_TTL_GT0: W1, sparx5: is2/es2 + * Set if IPv4 TTL / IPv6 hop limit is greater than 0 + * VCAP_KF_L4_ACK: W1, sparx5: is2/es2 + * Sparx5 and LAN966x: TCP flag ACK, LAN966x only: PTP over UDP: flagField bit 2 + * (unicastFlag) + * VCAP_KF_L4_DPORT: W16, sparx5: is2/es2 + * Sparx5: TCP/UDP destination port. Overloading for IP_7TUPLE: Non-TCP/UDP IP + * frames: L4_DPORT = L3_IP_PROTO, LAN966x: TCP/UDP destination port + * VCAP_KF_L4_FIN: W1, sparx5: is2/es2 + * TCP flag FIN, LAN966x: TCP flag FIN, and for PTP over UDP: messageType bit 1 + * VCAP_KF_L4_PAYLOAD: W64, sparx5: is2/es2 + * Payload bytes after TCP/UDP header Overloading for IP_7TUPLE: Non TCP/UDP + * frames: Payload bytes 0–7 after IP header. IPv4 options are not parsed so + * payload is always taken 20 bytes after the start of the IPv4 header for non + * TCP/UDP IPv4 frames + * VCAP_KF_L4_PSH: W1, sparx5: is2/es2 + * Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit + * 1 (twoStepFlag) + * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16 + * Range checker bitmask (one for each range checker). Input into range checkers + * is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE, + * outer VID, inner VID) + * VCAP_KF_L4_RST: W1, sparx5: is2/es2 + * Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType + * bit 3 + * VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2 + * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP: + * messageType bit 0 + * VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2 + * TCP/UDP source port + * VCAP_KF_L4_SPORT_EQ_DPORT_IS: W1, sparx5: is2/es2 + * Set if UDP or TCP source port equals UDP or TCP destination port + * VCAP_KF_L4_SYN: W1, sparx5: is2/es2 + * Sparx5: TCP flag SYN, LAN966x: TCP: TCP flag SYN. PTP over UDP: messageType + * bit 2 + * VCAP_KF_L4_URG: W1, sparx5: is2/es2 + * Sparx5: TCP flag URG, LAN966x: TCP: TCP flag URG. PTP over UDP: flagField bit + * 7 (reserved) + * VCAP_KF_LOOKUP_FIRST_IS: W1, sparx5: is0/is2/es2 + * Selects between entries relevant for first and second lookup. Set for first + * lookup, cleared for second lookup. + * VCAP_KF_LOOKUP_GEN_IDX: W12, sparx5: is0 + * Generic index - for chaining CLM instances + * VCAP_KF_LOOKUP_GEN_IDX_SEL: W2, sparx5: is0 + * Select the mode of the Generic Index + * VCAP_KF_LOOKUP_PAG: W8, sparx5: is2 + * Classified Policy Association Group: chains rules from IS1/CLM to IS2 + * VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2/es2 + * Dual-ended loss measurement counters in CCM frames are all zero + * VCAP_KF_OAM_MEL_FLAGS: W7, sparx5: is0 + * Encoding of MD level/MEG level (MEL) + * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is0/is2/es2 + * Set if frame’s EtherType = 0x8902 + * VCAP_KF_PROT_ACTIVE: W1, sparx5: es2 + * Protection is active + * VCAP_KF_TCP_IS: W1, sparx5: is0/is2/es2 + * Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next + * header = 6) + * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2 + * Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6 + * or 17) + * VCAP_KF_TYPE: sparx5 is0 W2, sparx5 is0 W1, sparx5 is2 W4, sparx5 is2 W2, + * sparx5 es2 W3 + * Keyset type id - set by the API + */ + +/* Keyfield names */ +enum vcap_key_field { + VCAP_KF_NO_VALUE, /* initial value */ + VCAP_KF_8021BR_ECID_BASE, + VCAP_KF_8021BR_ECID_EXT, + VCAP_KF_8021BR_E_TAGGED, + VCAP_KF_8021BR_GRP, + VCAP_KF_8021BR_IGR_ECID_BASE, + VCAP_KF_8021BR_IGR_ECID_EXT, + VCAP_KF_8021Q_DEI0, + VCAP_KF_8021Q_DEI1, + VCAP_KF_8021Q_DEI2, + VCAP_KF_8021Q_DEI_CLS, + VCAP_KF_8021Q_PCP0, + VCAP_KF_8021Q_PCP1, + VCAP_KF_8021Q_PCP2, + VCAP_KF_8021Q_PCP_CLS, + VCAP_KF_8021Q_TPID0, + VCAP_KF_8021Q_TPID1, + VCAP_KF_8021Q_TPID2, + VCAP_KF_8021Q_VID0, + VCAP_KF_8021Q_VID1, + VCAP_KF_8021Q_VID2, + VCAP_KF_8021Q_VID_CLS, + VCAP_KF_8021Q_VLAN_TAGGED_IS, + VCAP_KF_8021Q_VLAN_TAGS, + VCAP_KF_ACL_GRP_ID, + VCAP_KF_ARP_ADDR_SPACE_OK_IS, + VCAP_KF_ARP_LEN_OK_IS, + VCAP_KF_ARP_OPCODE, + VCAP_KF_ARP_OPCODE_UNKNOWN_IS, + VCAP_KF_ARP_PROTO_SPACE_OK_IS, + VCAP_KF_ARP_SENDER_MATCH_IS, + VCAP_KF_ARP_TGT_MATCH_IS, + VCAP_KF_COSID_CLS, + VCAP_KF_DST_ENTRY, + VCAP_KF_ES0_ISDX_KEY_ENA, + VCAP_KF_ETYPE, + VCAP_KF_ETYPE_LEN_IS, + VCAP_KF_ETYPE_MPLS, + VCAP_KF_IF_EGR_PORT_MASK, + VCAP_KF_IF_EGR_PORT_MASK_RNG, + VCAP_KF_IF_IGR_PORT, + VCAP_KF_IF_IGR_PORT_MASK, + VCAP_KF_IF_IGR_PORT_MASK_L3, + VCAP_KF_IF_IGR_PORT_MASK_RNG, + VCAP_KF_IF_IGR_PORT_MASK_SEL, + VCAP_KF_IF_IGR_PORT_SEL, + VCAP_KF_IP4_IS, + VCAP_KF_IP_MC_IS, + VCAP_KF_IP_PAYLOAD_5TUPLE, + VCAP_KF_IP_SNAP_IS, + VCAP_KF_ISDX_CLS, + VCAP_KF_ISDX_GT0_IS, + VCAP_KF_L2_BC_IS, + VCAP_KF_L2_DMAC, + VCAP_KF_L2_FWD_IS, + VCAP_KF_L2_MC_IS, + VCAP_KF_L2_PAYLOAD_ETYPE, + VCAP_KF_L2_SMAC, + VCAP_KF_L3_DIP_EQ_SIP_IS, + VCAP_KF_L3_DMAC_DIP_MATCH, + VCAP_KF_L3_DPL_CLS, + VCAP_KF_L3_DSCP, + VCAP_KF_L3_DST_IS, + VCAP_KF_L3_FRAGMENT_TYPE, + VCAP_KF_L3_FRAG_INVLD_L4_LEN, + VCAP_KF_L3_IP4_DIP, + VCAP_KF_L3_IP4_SIP, + VCAP_KF_L3_IP6_DIP, + VCAP_KF_L3_IP6_SIP, + VCAP_KF_L3_IP_PROTO, + VCAP_KF_L3_OPTIONS_IS, + VCAP_KF_L3_PAYLOAD, + VCAP_KF_L3_RT_IS, + VCAP_KF_L3_SMAC_SIP_MATCH, + VCAP_KF_L3_TOS, + VCAP_KF_L3_TTL_GT0, + VCAP_KF_L4_ACK, + VCAP_KF_L4_DPORT, + VCAP_KF_L4_FIN, + VCAP_KF_L4_PAYLOAD, + VCAP_KF_L4_PSH, + VCAP_KF_L4_RNG, + VCAP_KF_L4_RST, + VCAP_KF_L4_SEQUENCE_EQ0_IS, + VCAP_KF_L4_SPORT, + VCAP_KF_L4_SPORT_EQ_DPORT_IS, + VCAP_KF_L4_SYN, + VCAP_KF_L4_URG, + VCAP_KF_LOOKUP_FIRST_IS, + VCAP_KF_LOOKUP_GEN_IDX, + VCAP_KF_LOOKUP_GEN_IDX_SEL, + VCAP_KF_LOOKUP_PAG, + VCAP_KF_MIRROR_ENA, + VCAP_KF_OAM_CCM_CNTS_EQ0, + VCAP_KF_OAM_MEL_FLAGS, + VCAP_KF_OAM_Y1731_IS, + VCAP_KF_PROT_ACTIVE, + VCAP_KF_TCP_IS, + VCAP_KF_TCP_UDP_IS, + VCAP_KF_TYPE, +}; + +/* Actionset names with origin information */ +enum vcap_actionfield_set { + VCAP_AFS_NO_VALUE, /* initial value */ + VCAP_AFS_BASE_TYPE, /* sparx5 is2 X3, sparx5 es2 X3 */ + VCAP_AFS_CLASSIFICATION, /* sparx5 is0 X2 */ + VCAP_AFS_CLASS_REDUCED, /* sparx5 is0 X1 */ + VCAP_AFS_FULL, /* sparx5 is0 X3 */ + VCAP_AFS_MLBS, /* sparx5 is0 X2 */ + VCAP_AFS_MLBS_REDUCED, /* sparx5 is0 X1 */ +}; + +/* List of actionfields with description + * + * VCAP_AF_CLS_VID_SEL: W3, sparx5: is0 + * Controls the classified VID: 0: VID_NONE: No action. 1: VID_ADD: New VID = + * old VID + VID_VAL. 2: VID_REPLACE: New VID = VID_VAL. 3: VID_FIRST_TAG: New + * VID = VID from frame's first tag (outer tag) if available, otherwise VID_VAL. + * 4: VID_SECOND_TAG: New VID = VID from frame's second tag (middle tag) if + * available, otherwise VID_VAL. 5: VID_THIRD_TAG: New VID = VID from frame's + * third tag (inner tag) if available, otherwise VID_VAL. + * VCAP_AF_CNT_ID: sparx5 is2 W12, sparx5 es2 W11 + * Counter ID, used per lookup to index the 4K frame counters (ANA_ACL:CNT_TBL). + * Multiple VCAP IS2 entries can use the same counter. + * VCAP_AF_COPY_PORT_NUM: W7, sparx5: es2 + * QSYS port number when FWD_MODE is redirect or copy + * VCAP_AF_COPY_QUEUE_NUM: W16, sparx5: es2 + * QSYS queue number when FWD_MODE is redirect or copy + * VCAP_AF_CPU_COPY_ENA: W1, sparx5: is2/es2 + * Setting this bit to 1 causes all frames that hit this action to be copied to + * the CPU extraction queue specified in CPU_QUEUE_NUM. + * VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2/es2 + * CPU queue number. Used when CPU_COPY_ENA is set. + * VCAP_AF_DEI_ENA: W1, sparx5: is0 + * If set, use DEI_VAL as classified DEI value. Otherwise, DEI from basic + * classification is used + * VCAP_AF_DEI_VAL: W1, sparx5: is0 + * See DEI_ENA + * VCAP_AF_DP_ENA: W1, sparx5: is0 + * If set, use DP_VAL as classified drop precedence level. Otherwise, drop + * precedence level from basic classification is used. + * VCAP_AF_DP_VAL: W2, sparx5: is0 + * See DP_ENA. + * VCAP_AF_DSCP_ENA: W1, sparx5: is0 + * If set, use DSCP_VAL as classified DSCP value. Otherwise, DSCP value from + * basic classification is used. + * VCAP_AF_DSCP_VAL: W6, sparx5: is0 + * See DSCP_ENA. + * VCAP_AF_ES2_REW_CMD: W3, sparx5: es2 + * Command forwarded to REW: 0: No action. 1: SWAP MAC addresses. 2: Do L2CP + * DMAC translation when entering or leaving a tunnel. + * VCAP_AF_FWD_MODE: W2, sparx5: es2 + * Forward selector: 0: Forward. 1: Discard. 2: Redirect. 3: Copy. + * VCAP_AF_HIT_ME_ONCE: W1, sparx5: is2/es2 + * Setting this bit to 1 causes the first frame that hits this action where the + * HIT_CNT counter is zero to be copied to the CPU extraction queue specified in + * CPU_QUEUE_NUM. The HIT_CNT counter is then incremented and any frames that + * hit this action later are not copied to the CPU. To re-enable the HIT_ME_ONCE + * functionality, the HIT_CNT counter must be cleared. + * VCAP_AF_IGNORE_PIPELINE_CTRL: W1, sparx5: is2/es2 + * Ignore ingress pipeline control. This enforces the use of the VCAP IS2 action + * even when the pipeline control has terminated the frame before VCAP IS2. + * VCAP_AF_INTR_ENA: W1, sparx5: is2/es2 + * If set, an interrupt is triggered when this rule is hit + * VCAP_AF_ISDX_ADD_REPLACE_SEL: W1, sparx5: is0 + * Controls the classified ISDX. 0: New ISDX = old ISDX + ISDX_VAL. 1: New ISDX + * = ISDX_VAL. + * VCAP_AF_ISDX_VAL: W12, sparx5: is0 + * See isdx_add_replace_sel + * VCAP_AF_LRN_DIS: W1, sparx5: is2 + * Setting this bit to 1 disables learning of frames hitting this action. + * VCAP_AF_MAP_IDX: W9, sparx5: is0 + * Index for QoS mapping table lookup + * VCAP_AF_MAP_KEY: W3, sparx5: is0 + * Key type for QoS mapping table lookup. 0: DEI0, PCP0 (outer tag). 1: DEI1, + * PCP1 (middle tag). 2: DEI2, PCP2 (inner tag). 3: MPLS TC. 4: PCP0 (outer + * tag). 5: E-DEI, E-PCP (E-TAG). 6: DSCP if available, otherwise none. 7: DSCP + * if available, otherwise DEI0, PCP0 (outer tag) if available using MAP_IDX+8, + * otherwise none + * VCAP_AF_MAP_LOOKUP_SEL: W2, sparx5: is0 + * Selects which of the two QoS Mapping Table lookups that MAP_KEY and MAP_IDX + * are applied to. 0: No changes to the QoS Mapping Table lookup. 1: Update key + * type and index for QoS Mapping Table lookup #0. 2: Update key type and index + * for QoS Mapping Table lookup #1. 3: Reserved. + * VCAP_AF_MASK_MODE: W3, sparx5: is0/is2 + * Controls the PORT_MASK use. Sparx5: 0: OR_DSTMASK, 1: AND_VLANMASK, 2: + * REPLACE_PGID, 3: REPLACE_ALL, 4: REDIR_PGID, 5: OR_PGID_MASK, 6: VSTAX, 7: + * Not applicable. LAN966X: 0: No action, 1: Permit/deny (AND), 2: Policy + * forwarding (DMAC lookup), 3: Redirect. The CPU port is untouched by + * MASK_MODE. + * VCAP_AF_MATCH_ID: W16, sparx5: is0/is2 + * Logical ID for the entry. The MATCH_ID is extracted together with the frame + * if the frame is forwarded to the CPU (CPU_COPY_ENA). The result is placed in + * IFH.CL_RSLT. + * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is0/is2 + * Mask used by MATCH_ID. + * VCAP_AF_MIRROR_PROBE: W2, sparx5: is2 + * Mirroring performed according to configuration of a mirror probe. 0: No + * mirroring. 1: Mirror probe 0. 2: Mirror probe 1. 3: Mirror probe 2 + * VCAP_AF_MIRROR_PROBE_ID: W2, sparx5: es2 + * Signals a mirror probe to be placed in the IFH. Only possible when FWD_MODE + * is copy. 0: No mirroring. 1–3: Use mirror probe 0-2. + * VCAP_AF_NXT_IDX: W12, sparx5: is0 + * Index used as part of key (field G_IDX) in the next lookup. + * VCAP_AF_NXT_IDX_CTRL: W3, sparx5: is0 + * Controls the generation of the G_IDX used in the VCAP CLM next lookup + * VCAP_AF_PAG_OVERRIDE_MASK: W8, sparx5: is0 + * Bits set in this mask will override PAG_VAL from port profile. ï€ New PAG = + * (PAG (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK) + * VCAP_AF_PAG_VAL: W8, sparx5: is0 + * See PAG_OVERRIDE_MASK. + * VCAP_AF_PCP_ENA: W1, sparx5: is0 + * If set, use PCP_VAL as classified PCP value. Otherwise, PCP from basic + * classification is used. + * VCAP_AF_PCP_VAL: W3, sparx5: is0 + * See PCP_ENA. + * VCAP_AF_PIPELINE_FORCE_ENA: sparx5 is0 W2, sparx5 is2 W1 + * If set, use PIPELINE_PT unconditionally and set PIPELINE_ACT = NONE if + * PIPELINE_PT == NONE. Overrules previous settings of pipeline point. + * VCAP_AF_PIPELINE_PT: W5, sparx5: is0/is2 + * Pipeline point used if PIPELINE_FORCE_ENA is set + * VCAP_AF_POLICE_ENA: W1, sparx5: is2/es2 + * Setting this bit to 1 causes frames that hit this action to be policed by the + * ACL policer specified in POLICE_IDX. Only applies to the first lookup. + * VCAP_AF_POLICE_IDX: W6, sparx5: is2/es2 + * Selects VCAP policer used when policing frames (POLICE_ENA) + * VCAP_AF_POLICE_REMARK: W1, sparx5: es2 + * If set, frames exceeding policer rates are marked as yellow but not + * discarded. + * VCAP_AF_PORT_MASK: sparx5 is0 W65, sparx5 is2 W68 + * Port mask applied to the forwarding decision based on MASK_MODE. + * VCAP_AF_QOS_ENA: W1, sparx5: is0 + * If set, use QOS_VAL as classified QoS class. Otherwise, QoS class from basic + * classification is used. + * VCAP_AF_QOS_VAL: W3, sparx5: is0 + * See QOS_ENA. + * VCAP_AF_RT_DIS: W1, sparx5: is2 + * If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also + * IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX. + * VCAP_AF_TYPE: W1, sparx5: is0 + * Actionset type id - Set by the API + * VCAP_AF_VID_VAL: W13, sparx5: is0 + * New VID Value + */ + +/* Actionfield names */ +enum vcap_action_field { + VCAP_AF_NO_VALUE, /* initial value */ + VCAP_AF_ACL_MAC, + VCAP_AF_ACL_RT_MODE, + VCAP_AF_CLS_VID_SEL, + VCAP_AF_CNT_ID, + VCAP_AF_COPY_PORT_NUM, + VCAP_AF_COPY_QUEUE_NUM, + VCAP_AF_COSID_ENA, + VCAP_AF_COSID_VAL, + VCAP_AF_CPU_COPY_ENA, + VCAP_AF_CPU_DIS, + VCAP_AF_CPU_ENA, + VCAP_AF_CPU_Q, + VCAP_AF_CPU_QUEUE_NUM, + VCAP_AF_CUSTOM_ACE_ENA, + VCAP_AF_CUSTOM_ACE_OFFSET, + VCAP_AF_DEI_ENA, + VCAP_AF_DEI_VAL, + VCAP_AF_DLB_OFFSET, + VCAP_AF_DMAC_OFFSET_ENA, + VCAP_AF_DP_ENA, + VCAP_AF_DP_VAL, + VCAP_AF_DSCP_ENA, + VCAP_AF_DSCP_VAL, + VCAP_AF_EGR_ACL_ENA, + VCAP_AF_ES2_REW_CMD, + VCAP_AF_FWD_DIS, + VCAP_AF_FWD_MODE, + VCAP_AF_FWD_TYPE, + VCAP_AF_GVID_ADD_REPLACE_SEL, + VCAP_AF_HIT_ME_ONCE, + VCAP_AF_IGNORE_PIPELINE_CTRL, + VCAP_AF_IGR_ACL_ENA, + VCAP_AF_INJ_MASQ_ENA, + VCAP_AF_INJ_MASQ_LPORT, + VCAP_AF_INJ_MASQ_PORT, + VCAP_AF_INTR_ENA, + VCAP_AF_ISDX_ADD_REPLACE_SEL, + VCAP_AF_ISDX_VAL, + VCAP_AF_IS_INNER_ACL, + VCAP_AF_L3_MAC_UPDATE_DIS, + VCAP_AF_LOG_MSG_INTERVAL, + VCAP_AF_LPM_AFFIX_ENA, + VCAP_AF_LPM_AFFIX_VAL, + VCAP_AF_LPORT_ENA, + VCAP_AF_LRN_DIS, + VCAP_AF_MAP_IDX, + VCAP_AF_MAP_KEY, + VCAP_AF_MAP_LOOKUP_SEL, + VCAP_AF_MASK_MODE, + VCAP_AF_MATCH_ID, + VCAP_AF_MATCH_ID_MASK, + VCAP_AF_MIP_SEL, + VCAP_AF_MIRROR_PROBE, + VCAP_AF_MIRROR_PROBE_ID, + VCAP_AF_MPLS_IP_CTRL_ENA, + VCAP_AF_MPLS_MEP_ENA, + VCAP_AF_MPLS_MIP_ENA, + VCAP_AF_MPLS_OAM_FLAVOR, + VCAP_AF_MPLS_OAM_TYPE, + VCAP_AF_NUM_VLD_LABELS, + VCAP_AF_NXT_IDX, + VCAP_AF_NXT_IDX_CTRL, + VCAP_AF_NXT_KEY_TYPE, + VCAP_AF_NXT_NORMALIZE, + VCAP_AF_NXT_NORM_W16_OFFSET, + VCAP_AF_NXT_NORM_W32_OFFSET, + VCAP_AF_NXT_OFFSET_FROM_TYPE, + VCAP_AF_NXT_TYPE_AFTER_OFFSET, + VCAP_AF_OAM_IP_BFD_ENA, + VCAP_AF_OAM_TWAMP_ENA, + VCAP_AF_OAM_Y1731_SEL, + VCAP_AF_PAG_OVERRIDE_MASK, + VCAP_AF_PAG_VAL, + VCAP_AF_PCP_ENA, + VCAP_AF_PCP_VAL, + VCAP_AF_PIPELINE_ACT_SEL, + VCAP_AF_PIPELINE_FORCE_ENA, + VCAP_AF_PIPELINE_PT, + VCAP_AF_PIPELINE_PT_REDUCED, + VCAP_AF_POLICE_ENA, + VCAP_AF_POLICE_IDX, + VCAP_AF_POLICE_REMARK, + VCAP_AF_PORT_MASK, + VCAP_AF_PTP_MASTER_SEL, + VCAP_AF_QOS_ENA, + VCAP_AF_QOS_VAL, + VCAP_AF_REW_CMD, + VCAP_AF_RLEG_DMAC_CHK_DIS, + VCAP_AF_RLEG_STAT_IDX, + VCAP_AF_RSDX_ENA, + VCAP_AF_RSDX_VAL, + VCAP_AF_RSVD_LBL_VAL, + VCAP_AF_RT_DIS, + VCAP_AF_RT_SEL, + VCAP_AF_S2_KEY_SEL_ENA, + VCAP_AF_S2_KEY_SEL_IDX, + VCAP_AF_SAM_SEQ_ENA, + VCAP_AF_SIP_IDX, + VCAP_AF_SWAP_MAC_ENA, + VCAP_AF_TCP_UDP_DPORT, + VCAP_AF_TCP_UDP_ENA, + VCAP_AF_TCP_UDP_SPORT, + VCAP_AF_TC_ENA, + VCAP_AF_TC_LABEL, + VCAP_AF_TPID_SEL, + VCAP_AF_TTL_DECR_DIS, + VCAP_AF_TTL_ENA, + VCAP_AF_TTL_LABEL, + VCAP_AF_TTL_UPDATE_ENA, + VCAP_AF_TYPE, + VCAP_AF_VID_VAL, + VCAP_AF_VLAN_POP_CNT, + VCAP_AF_VLAN_POP_CNT_ENA, + VCAP_AF_VLAN_PUSH_CNT, + VCAP_AF_VLAN_PUSH_CNT_ENA, + VCAP_AF_VLAN_WAS_TAGGED, +}; + +#endif /* __VCAP_AG_API__ */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c new file mode 100644 index 000000000000..d255bc7deae7 --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c @@ -0,0 +1,1184 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip VCAP API + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/types.h> + +#include "vcap_api.h" +#include "vcap_api_client.h" + +#define to_intrule(rule) container_of((rule), struct vcap_rule_internal, data) + +/* Private VCAP API rule data */ +struct vcap_rule_internal { + struct vcap_rule data; /* provided by the client */ + struct list_head list; /* for insertion in the vcap admin list of rules */ + struct vcap_admin *admin; /* vcap hw instance */ + struct net_device *ndev; /* the interface that the rule applies to */ + struct vcap_control *vctrl; /* the client control */ + u32 sort_key; /* defines the position in the VCAP */ + int keyset_sw; /* subwords in a keyset */ + int actionset_sw; /* subwords in an actionset */ + int keyset_sw_regs; /* registers in a subword in an keyset */ + int actionset_sw_regs; /* registers in a subword in an actionset */ + int size; /* the size of the rule: max(entry, action) */ + u32 addr; /* address in the VCAP at insertion */ +}; + +/* Moving a rule in the VCAP address space */ +struct vcap_rule_move { + int addr; /* address to move */ + int offset; /* change in address */ + int count; /* blocksize of addresses to move */ +}; + +/* Bit iterator for the VCAP cache streams */ +struct vcap_stream_iter { + u32 offset; /* bit offset from the stream start */ + u32 sw_width; /* subword width in bits */ + u32 regs_per_sw; /* registers per subword */ + u32 reg_idx; /* current register index */ + u32 reg_bitpos; /* bit offset in current register */ + const struct vcap_typegroup *tg; /* current typegroup */ +}; + +static void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width, + const struct vcap_typegroup *tg, u32 offset) +{ + memset(itr, 0, sizeof(*itr)); + itr->offset = offset; + itr->sw_width = sw_width; + itr->regs_per_sw = DIV_ROUND_UP(sw_width, 32); + itr->tg = tg; +} + +static void vcap_iter_skip_tg(struct vcap_stream_iter *itr) +{ + /* Compensate the field offset for preceding typegroups. + * A typegroup table ends with an all-zero terminator. + */ + while (itr->tg->width && itr->offset >= itr->tg->offset) { + itr->offset += itr->tg->width; + itr->tg++; /* next typegroup */ + } +} + +static void vcap_iter_update(struct vcap_stream_iter *itr) +{ + int sw_idx, sw_bitpos; + + /* Calculate the subword index and bitposition for current bit */ + sw_idx = itr->offset / itr->sw_width; + sw_bitpos = itr->offset % itr->sw_width; + /* Calculate the register index and bitposition for current bit */ + itr->reg_idx = (sw_idx * itr->regs_per_sw) + (sw_bitpos / 32); + itr->reg_bitpos = sw_bitpos % 32; +} + +static void vcap_iter_init(struct vcap_stream_iter *itr, int sw_width, + const struct vcap_typegroup *tg, u32 offset) +{ + vcap_iter_set(itr, sw_width, tg, offset); + vcap_iter_skip_tg(itr); + vcap_iter_update(itr); +} + +static void vcap_iter_next(struct vcap_stream_iter *itr) +{ + itr->offset++; + vcap_iter_skip_tg(itr); + vcap_iter_update(itr); +} + +static void vcap_set_bit(u32 *stream, struct vcap_stream_iter *itr, bool value) +{ + u32 mask = BIT(itr->reg_bitpos); + u32 *p = &stream[itr->reg_idx]; + + if (value) + *p |= mask; + else + *p &= ~mask; +} + +static void vcap_encode_bit(u32 *stream, struct vcap_stream_iter *itr, bool val) +{ + /* When intersected by a type group field, stream the type group bits + * before continuing with the value bit + */ + while (itr->tg->width && + itr->offset >= itr->tg->offset && + itr->offset < itr->tg->offset + itr->tg->width) { + int tg_bitpos = itr->tg->offset - itr->offset; + + vcap_set_bit(stream, itr, (itr->tg->value >> tg_bitpos) & 0x1); + itr->offset++; + vcap_iter_update(itr); + } + vcap_set_bit(stream, itr, val); +} + +static void vcap_encode_field(u32 *stream, struct vcap_stream_iter *itr, + int width, const u8 *value) +{ + int idx; + + /* Loop over the field value bits and add the value bits one by one to + * the output stream. + */ + for (idx = 0; idx < width; idx++) { + u8 bidx = idx & GENMASK(2, 0); + + /* Encode one field value bit */ + vcap_encode_bit(stream, itr, (value[idx / 8] >> bidx) & 0x1); + vcap_iter_next(itr); + } +} + +static void vcap_encode_typegroups(u32 *stream, int sw_width, + const struct vcap_typegroup *tg, + bool mask) +{ + struct vcap_stream_iter iter; + int idx; + + /* Mask bits must be set to zeros (inverted later when writing to the + * mask cache register), so that the mask typegroup bits consist of + * match-1 or match-0, or both + */ + vcap_iter_set(&iter, sw_width, tg, 0); + while (iter.tg->width) { + /* Set position to current typegroup bit */ + iter.offset = iter.tg->offset; + vcap_iter_update(&iter); + for (idx = 0; idx < iter.tg->width; idx++) { + /* Iterate over current typegroup bits. Mask typegroup + * bits are always set + */ + if (mask) + vcap_set_bit(stream, &iter, 0x1); + else + vcap_set_bit(stream, &iter, + (iter.tg->value >> idx) & 0x1); + iter.offset++; + vcap_iter_update(&iter); + } + iter.tg++; /* next typegroup */ + } +} + +/* Return the list of keyfields for the keyset */ +static const struct vcap_field *vcap_keyfields(struct vcap_control *vctrl, + enum vcap_type vt, + enum vcap_keyfield_set keyset) +{ + /* Check that the keyset exists in the vcap keyset list */ + if (keyset >= vctrl->vcaps[vt].keyfield_set_size) + return NULL; + return vctrl->vcaps[vt].keyfield_set_map[keyset]; +} + +/* Return the keyset information for the keyset */ +static const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl, + enum vcap_type vt, + enum vcap_keyfield_set keyset) +{ + const struct vcap_set *kset; + + /* Check that the keyset exists in the vcap keyset list */ + if (keyset >= vctrl->vcaps[vt].keyfield_set_size) + return NULL; + kset = &vctrl->vcaps[vt].keyfield_set[keyset]; + if (kset->sw_per_item == 0 || kset->sw_per_item > vctrl->vcaps[vt].sw_count) + return NULL; + return kset; +} + +/* Return the typegroup table for the matching keyset (using subword size) */ +static const struct vcap_typegroup * +vcap_keyfield_typegroup(struct vcap_control *vctrl, + enum vcap_type vt, enum vcap_keyfield_set keyset) +{ + const struct vcap_set *kset = vcap_keyfieldset(vctrl, vt, keyset); + + /* Check that the keyset is valid */ + if (!kset) + return NULL; + return vctrl->vcaps[vt].keyfield_set_typegroups[kset->sw_per_item]; +} + +/* Return the number of keyfields in the keyset */ +static int vcap_keyfield_count(struct vcap_control *vctrl, + enum vcap_type vt, enum vcap_keyfield_set keyset) +{ + /* Check that the keyset exists in the vcap keyset list */ + if (keyset >= vctrl->vcaps[vt].keyfield_set_size) + return 0; + return vctrl->vcaps[vt].keyfield_set_map_size[keyset]; +} + +static void vcap_encode_keyfield(struct vcap_rule_internal *ri, + const struct vcap_client_keyfield *kf, + const struct vcap_field *rf, + const struct vcap_typegroup *tgt) +{ + int sw_width = ri->vctrl->vcaps[ri->admin->vtype].sw_width; + struct vcap_cache_data *cache = &ri->admin->cache; + struct vcap_stream_iter iter; + const u8 *value, *mask; + + /* Encode the fields for the key and the mask in their respective + * streams, respecting the subword width. + */ + switch (kf->ctrl.type) { + case VCAP_FIELD_BIT: + value = &kf->data.u1.value; + mask = &kf->data.u1.mask; + break; + case VCAP_FIELD_U32: + value = (const u8 *)&kf->data.u32.value; + mask = (const u8 *)&kf->data.u32.mask; + break; + case VCAP_FIELD_U48: + value = kf->data.u48.value; + mask = kf->data.u48.mask; + break; + case VCAP_FIELD_U56: + value = kf->data.u56.value; + mask = kf->data.u56.mask; + break; + case VCAP_FIELD_U64: + value = kf->data.u64.value; + mask = kf->data.u64.mask; + break; + case VCAP_FIELD_U72: + value = kf->data.u72.value; + mask = kf->data.u72.mask; + break; + case VCAP_FIELD_U112: + value = kf->data.u112.value; + mask = kf->data.u112.mask; + break; + case VCAP_FIELD_U128: + value = kf->data.u128.value; + mask = kf->data.u128.mask; + break; + } + vcap_iter_init(&iter, sw_width, tgt, rf->offset); + vcap_encode_field(cache->keystream, &iter, rf->width, value); + vcap_iter_init(&iter, sw_width, tgt, rf->offset); + vcap_encode_field(cache->maskstream, &iter, rf->width, mask); +} + +static void vcap_encode_keyfield_typegroups(struct vcap_control *vctrl, + struct vcap_rule_internal *ri, + const struct vcap_typegroup *tgt) +{ + int sw_width = vctrl->vcaps[ri->admin->vtype].sw_width; + struct vcap_cache_data *cache = &ri->admin->cache; + + /* Encode the typegroup bits for the key and the mask in their streams, + * respecting the subword width. + */ + vcap_encode_typegroups(cache->keystream, sw_width, tgt, false); + vcap_encode_typegroups(cache->maskstream, sw_width, tgt, true); +} + +static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri) +{ + const struct vcap_client_keyfield *ckf; + const struct vcap_typegroup *tg_table; + const struct vcap_field *kf_table; + int keyset_size; + + /* Get a valid set of fields for the specific keyset */ + kf_table = vcap_keyfields(ri->vctrl, ri->admin->vtype, ri->data.keyset); + if (!kf_table) { + pr_err("%s:%d: no fields available for this keyset: %d\n", + __func__, __LINE__, ri->data.keyset); + return -EINVAL; + } + /* Get a valid typegroup for the specific keyset */ + tg_table = vcap_keyfield_typegroup(ri->vctrl, ri->admin->vtype, + ri->data.keyset); + if (!tg_table) { + pr_err("%s:%d: no typegroups available for this keyset: %d\n", + __func__, __LINE__, ri->data.keyset); + return -EINVAL; + } + /* Get a valid size for the specific keyset */ + keyset_size = vcap_keyfield_count(ri->vctrl, ri->admin->vtype, + ri->data.keyset); + if (keyset_size == 0) { + pr_err("%s:%d: zero field count for this keyset: %d\n", + __func__, __LINE__, ri->data.keyset); + return -EINVAL; + } + /* Iterate over the keyfields (key, mask) in the rule + * and encode these bits + */ + if (list_empty(&ri->data.keyfields)) { + pr_err("%s:%d: no keyfields in the rule\n", __func__, __LINE__); + return -EINVAL; + } + list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) { + /* Check that the client entry exists in the keyset */ + if (ckf->ctrl.key >= keyset_size) { + pr_err("%s:%d: key %d is not in vcap\n", + __func__, __LINE__, ckf->ctrl.key); + return -EINVAL; + } + vcap_encode_keyfield(ri, ckf, &kf_table[ckf->ctrl.key], tg_table); + } + /* Add typegroup bits to the key/mask bitstreams */ + vcap_encode_keyfield_typegroups(ri->vctrl, ri, tg_table); + return 0; +} + +/* Return the list of actionfields for the actionset */ +static const struct vcap_field * +vcap_actionfields(struct vcap_control *vctrl, + enum vcap_type vt, enum vcap_actionfield_set actionset) +{ + /* Check that the actionset exists in the vcap actionset list */ + if (actionset >= vctrl->vcaps[vt].actionfield_set_size) + return NULL; + return vctrl->vcaps[vt].actionfield_set_map[actionset]; +} + +static const struct vcap_set * +vcap_actionfieldset(struct vcap_control *vctrl, + enum vcap_type vt, enum vcap_actionfield_set actionset) +{ + const struct vcap_set *aset; + + /* Check that the actionset exists in the vcap actionset list */ + if (actionset >= vctrl->vcaps[vt].actionfield_set_size) + return NULL; + aset = &vctrl->vcaps[vt].actionfield_set[actionset]; + if (aset->sw_per_item == 0 || aset->sw_per_item > vctrl->vcaps[vt].sw_count) + return NULL; + return aset; +} + +/* Return the typegroup table for the matching actionset (using subword size) */ +static const struct vcap_typegroup * +vcap_actionfield_typegroup(struct vcap_control *vctrl, + enum vcap_type vt, enum vcap_actionfield_set actionset) +{ + const struct vcap_set *aset = vcap_actionfieldset(vctrl, vt, actionset); + + /* Check that the actionset is valid */ + if (!aset) + return NULL; + return vctrl->vcaps[vt].actionfield_set_typegroups[aset->sw_per_item]; +} + +/* Return the number of actionfields in the actionset */ +static int vcap_actionfield_count(struct vcap_control *vctrl, + enum vcap_type vt, + enum vcap_actionfield_set actionset) +{ + /* Check that the actionset exists in the vcap actionset list */ + if (actionset >= vctrl->vcaps[vt].actionfield_set_size) + return 0; + return vctrl->vcaps[vt].actionfield_set_map_size[actionset]; +} + +static void vcap_encode_actionfield(struct vcap_rule_internal *ri, + const struct vcap_client_actionfield *af, + const struct vcap_field *rf, + const struct vcap_typegroup *tgt) +{ + int act_width = ri->vctrl->vcaps[ri->admin->vtype].act_width; + + struct vcap_cache_data *cache = &ri->admin->cache; + struct vcap_stream_iter iter; + const u8 *value; + + /* Encode the action field in the stream, respecting the subword width */ + switch (af->ctrl.type) { + case VCAP_FIELD_BIT: + value = &af->data.u1.value; + break; + case VCAP_FIELD_U32: + value = (const u8 *)&af->data.u32.value; + break; + case VCAP_FIELD_U48: + value = af->data.u48.value; + break; + case VCAP_FIELD_U56: + value = af->data.u56.value; + break; + case VCAP_FIELD_U64: + value = af->data.u64.value; + break; + case VCAP_FIELD_U72: + value = af->data.u72.value; + break; + case VCAP_FIELD_U112: + value = af->data.u112.value; + break; + case VCAP_FIELD_U128: + value = af->data.u128.value; + break; + } + vcap_iter_init(&iter, act_width, tgt, rf->offset); + vcap_encode_field(cache->actionstream, &iter, rf->width, value); +} + +static void vcap_encode_actionfield_typegroups(struct vcap_rule_internal *ri, + const struct vcap_typegroup *tgt) +{ + int sw_width = ri->vctrl->vcaps[ri->admin->vtype].act_width; + struct vcap_cache_data *cache = &ri->admin->cache; + + /* Encode the typegroup bits for the actionstream respecting the subword + * width. + */ + vcap_encode_typegroups(cache->actionstream, sw_width, tgt, false); +} + +static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri) +{ + const struct vcap_client_actionfield *caf; + const struct vcap_typegroup *tg_table; + const struct vcap_field *af_table; + int actionset_size; + + /* Get a valid set of actionset fields for the specific actionset */ + af_table = vcap_actionfields(ri->vctrl, ri->admin->vtype, + ri->data.actionset); + if (!af_table) { + pr_err("%s:%d: no fields available for this actionset: %d\n", + __func__, __LINE__, ri->data.actionset); + return -EINVAL; + } + /* Get a valid typegroup for the specific actionset */ + tg_table = vcap_actionfield_typegroup(ri->vctrl, ri->admin->vtype, + ri->data.actionset); + if (!tg_table) { + pr_err("%s:%d: no typegroups available for this actionset: %d\n", + __func__, __LINE__, ri->data.actionset); + return -EINVAL; + } + /* Get a valid actionset size for the specific actionset */ + actionset_size = vcap_actionfield_count(ri->vctrl, ri->admin->vtype, + ri->data.actionset); + if (actionset_size == 0) { + pr_err("%s:%d: zero field count for this actionset: %d\n", + __func__, __LINE__, ri->data.actionset); + return -EINVAL; + } + /* Iterate over the actionfields in the rule + * and encode these bits + */ + if (list_empty(&ri->data.actionfields)) + pr_warn("%s:%d: no actionfields in the rule\n", + __func__, __LINE__); + list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) { + /* Check that the client action exists in the actionset */ + if (caf->ctrl.action >= actionset_size) { + pr_err("%s:%d: action %d is not in vcap\n", + __func__, __LINE__, caf->ctrl.action); + return -EINVAL; + } + vcap_encode_actionfield(ri, caf, &af_table[caf->ctrl.action], + tg_table); + } + /* Add typegroup bits to the entry bitstreams */ + vcap_encode_actionfield_typegroups(ri, tg_table); + return 0; +} + +static int vcap_encode_rule(struct vcap_rule_internal *ri) +{ + int err; + + err = vcap_encode_rule_keyset(ri); + if (err) + return err; + err = vcap_encode_rule_actionset(ri); + if (err) + return err; + return 0; +} + +static int vcap_api_check(struct vcap_control *ctrl) +{ + if (!ctrl) { + pr_err("%s:%d: vcap control is missing\n", __func__, __LINE__); + return -EINVAL; + } + if (!ctrl->ops || !ctrl->ops->validate_keyset || + !ctrl->ops->add_default_fields || !ctrl->ops->cache_erase || + !ctrl->ops->cache_write || !ctrl->ops->cache_read || + !ctrl->ops->init || !ctrl->ops->update || !ctrl->ops->move || + !ctrl->ops->port_info) { + pr_err("%s:%d: client operations are missing\n", + __func__, __LINE__); + return -ENOENT; + } + return 0; +} + +static void vcap_erase_cache(struct vcap_rule_internal *ri) +{ + ri->vctrl->ops->cache_erase(ri->admin); +} + +/* Update the keyset for the rule */ +int vcap_set_rule_set_keyset(struct vcap_rule *rule, + enum vcap_keyfield_set keyset) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + const struct vcap_set *kset; + int sw_width; + + kset = vcap_keyfieldset(ri->vctrl, ri->admin->vtype, keyset); + /* Check that the keyset is valid */ + if (!kset) + return -EINVAL; + ri->keyset_sw = kset->sw_per_item; + sw_width = ri->vctrl->vcaps[ri->admin->vtype].sw_width; + ri->keyset_sw_regs = DIV_ROUND_UP(sw_width, 32); + ri->data.keyset = keyset; + return 0; +} +EXPORT_SYMBOL_GPL(vcap_set_rule_set_keyset); + +/* Update the actionset for the rule */ +int vcap_set_rule_set_actionset(struct vcap_rule *rule, + enum vcap_actionfield_set actionset) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + const struct vcap_set *aset; + int act_width; + + aset = vcap_actionfieldset(ri->vctrl, ri->admin->vtype, actionset); + /* Check that the actionset is valid */ + if (!aset) + return -EINVAL; + ri->actionset_sw = aset->sw_per_item; + act_width = ri->vctrl->vcaps[ri->admin->vtype].act_width; + ri->actionset_sw_regs = DIV_ROUND_UP(act_width, 32); + ri->data.actionset = actionset; + return 0; +} +EXPORT_SYMBOL_GPL(vcap_set_rule_set_actionset); + +/* Find a rule with a provided rule id */ +static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl, + u32 id) +{ + struct vcap_rule_internal *ri; + struct vcap_admin *admin; + + /* Look for the rule id in all vcaps */ + list_for_each_entry(admin, &vctrl->list, list) + list_for_each_entry(ri, &admin->rules, list) + if (ri->data.id == id) + return ri; + return NULL; +} + +/* Find a rule id with a provided cookie */ +int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie) +{ + struct vcap_rule_internal *ri; + struct vcap_admin *admin; + + /* Look for the rule id in all vcaps */ + list_for_each_entry(admin, &vctrl->list, list) + list_for_each_entry(ri, &admin->rules, list) + if (ri->data.cookie == cookie) + return ri->data.id; + return -ENOENT; +} +EXPORT_SYMBOL_GPL(vcap_lookup_rule_by_cookie); + +/* Make a shallow copy of the rule without the fields */ +static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri) +{ + struct vcap_rule_internal *duprule; + + /* Allocate the client part */ + duprule = kzalloc(sizeof(*duprule), GFP_KERNEL); + if (!duprule) + return ERR_PTR(-ENOMEM); + *duprule = *ri; + /* Not inserted in the VCAP */ + INIT_LIST_HEAD(&duprule->list); + /* No elements in these lists */ + INIT_LIST_HEAD(&duprule->data.keyfields); + INIT_LIST_HEAD(&duprule->data.actionfields); + return duprule; +} + +/* Write VCAP cache content to the VCAP HW instance */ +static int vcap_write_rule(struct vcap_rule_internal *ri) +{ + struct vcap_admin *admin = ri->admin; + int sw_idx, ent_idx = 0, act_idx = 0; + u32 addr = ri->addr; + + if (!ri->size || !ri->keyset_sw_regs || !ri->actionset_sw_regs) { + pr_err("%s:%d: rule is empty\n", __func__, __LINE__); + return -EINVAL; + } + /* Use the values in the streams to write the VCAP cache */ + for (sw_idx = 0; sw_idx < ri->size; sw_idx++, addr++) { + ri->vctrl->ops->cache_write(ri->ndev, admin, + VCAP_SEL_ENTRY, ent_idx, + ri->keyset_sw_regs); + ri->vctrl->ops->cache_write(ri->ndev, admin, + VCAP_SEL_ACTION, act_idx, + ri->actionset_sw_regs); + ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_WRITE, + VCAP_SEL_ALL, addr); + ent_idx += ri->keyset_sw_regs; + act_idx += ri->actionset_sw_regs; + } + return 0; +} + +/* Lookup a vcap instance using chain id */ +struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid) +{ + struct vcap_admin *admin; + + if (vcap_api_check(vctrl)) + return NULL; + + list_for_each_entry(admin, &vctrl->list, list) { + if (cid >= admin->first_cid && cid <= admin->last_cid) + return admin; + } + return NULL; +} +EXPORT_SYMBOL_GPL(vcap_find_admin); + +/* Check if there is room for a new rule */ +static int vcap_rule_space(struct vcap_admin *admin, int size) +{ + if (admin->last_used_addr - size < admin->first_valid_addr) { + pr_err("%s:%d: No room for rule size: %u, %u\n", + __func__, __LINE__, size, admin->first_valid_addr); + return -ENOSPC; + } + return 0; +} + +/* Add the keyset typefield to the list of rule keyfields */ +static int vcap_add_type_keyfield(struct vcap_rule *rule) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + enum vcap_keyfield_set keyset = rule->keyset; + enum vcap_type vt = ri->admin->vtype; + const struct vcap_field *fields; + const struct vcap_set *kset; + int ret = -EINVAL; + + kset = vcap_keyfieldset(ri->vctrl, vt, keyset); + if (!kset) + return ret; + if (kset->type_id == (u8)-1) /* No type field is needed */ + return 0; + + fields = vcap_keyfields(ri->vctrl, vt, keyset); + if (!fields) + return -EINVAL; + if (fields[VCAP_KF_TYPE].width > 1) { + ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, + kset->type_id, 0xff); + } else { + if (kset->type_id) + ret = vcap_rule_add_key_bit(rule, VCAP_KF_TYPE, + VCAP_BIT_1); + else + ret = vcap_rule_add_key_bit(rule, VCAP_KF_TYPE, + VCAP_BIT_0); + } + return 0; +} + +/* Validate a rule with respect to available port keys */ +int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + enum vcap_keyfield_set keysets[10]; + struct vcap_keyset_list kslist; + int ret; + + /* This validation will be much expanded later */ + ret = vcap_api_check(ri->vctrl); + if (ret) + return ret; + if (!ri->admin) { + ri->data.exterr = VCAP_ERR_NO_ADMIN; + return -EINVAL; + } + if (!ri->ndev) { + ri->data.exterr = VCAP_ERR_NO_NETDEV; + return -EINVAL; + } + if (ri->data.keyset == VCAP_KFS_NO_VALUE) { + ri->data.exterr = VCAP_ERR_NO_KEYSET_MATCH; + return -EINVAL; + } + /* prepare for keyset validation */ + keysets[0] = ri->data.keyset; + kslist.keysets = keysets; + kslist.cnt = 1; + /* Pick a keyset that is supported in the port lookups */ + ret = ri->vctrl->ops->validate_keyset(ri->ndev, ri->admin, rule, &kslist, + l3_proto); + if (ret < 0) { + pr_err("%s:%d: keyset validation failed: %d\n", + __func__, __LINE__, ret); + ri->data.exterr = VCAP_ERR_NO_PORT_KEYSET_MATCH; + return ret; + } + if (ri->data.actionset == VCAP_AFS_NO_VALUE) { + ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH; + return -EINVAL; + } + vcap_add_type_keyfield(rule); + /* Add default fields to this rule */ + ri->vctrl->ops->add_default_fields(ri->ndev, ri->admin, rule); + + /* Rule size is the maximum of the entry and action subword count */ + ri->size = max(ri->keyset_sw, ri->actionset_sw); + + /* Finally check if there is room for the rule in the VCAP */ + return vcap_rule_space(ri->admin, ri->size); +} +EXPORT_SYMBOL_GPL(vcap_val_rule); + +/* calculate the address of the next rule after this (lower address and prio) */ +static u32 vcap_next_rule_addr(u32 addr, struct vcap_rule_internal *ri) +{ + return ((addr - ri->size) / ri->size) * ri->size; +} + +/* Assign a unique rule id and autogenerate one if id == 0 */ +static u32 vcap_set_rule_id(struct vcap_rule_internal *ri) +{ + u32 next_id; + + if (ri->data.id != 0) + return ri->data.id; + + next_id = ri->vctrl->rule_id + 1; + + for (next_id = ri->vctrl->rule_id + 1; next_id < ~0; ++next_id) { + if (!vcap_lookup_rule(ri->vctrl, next_id)) { + ri->data.id = next_id; + ri->vctrl->rule_id = next_id; + break; + } + } + return ri->data.id; +} + +static int vcap_insert_rule(struct vcap_rule_internal *ri, + struct vcap_rule_move *move) +{ + struct vcap_admin *admin = ri->admin; + struct vcap_rule_internal *duprule; + + /* Only support appending rules for now */ + ri->addr = vcap_next_rule_addr(admin->last_used_addr, ri); + admin->last_used_addr = ri->addr; + /* Add a shallow copy of the rule to the VCAP list */ + duprule = vcap_dup_rule(ri); + if (IS_ERR(duprule)) + return PTR_ERR(duprule); + list_add_tail(&duprule->list, &admin->rules); + return 0; +} + +static void vcap_move_rules(struct vcap_rule_internal *ri, + struct vcap_rule_move *move) +{ + ri->vctrl->ops->move(ri->ndev, ri->admin, move->addr, + move->offset, move->count); +} + +/* Encode and write a validated rule to the VCAP */ +int vcap_add_rule(struct vcap_rule *rule) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + struct vcap_rule_move move = {0}; + int ret; + + ret = vcap_api_check(ri->vctrl); + if (ret) + return ret; + /* Insert the new rule in the list of vcap rules */ + ret = vcap_insert_rule(ri, &move); + if (ret < 0) { + pr_err("%s:%d: could not insert rule in vcap list: %d\n", + __func__, __LINE__, ret); + goto out; + } + if (move.count > 0) + vcap_move_rules(ri, &move); + ret = vcap_encode_rule(ri); + if (ret) { + pr_err("%s:%d: rule encoding error: %d\n", __func__, __LINE__, ret); + goto out; + } + + ret = vcap_write_rule(ri); + if (ret) + pr_err("%s:%d: rule write error: %d\n", __func__, __LINE__, ret); +out: + return ret; +} +EXPORT_SYMBOL_GPL(vcap_add_rule); + +/* Allocate a new rule with the provided arguments */ +struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl, + struct net_device *ndev, int vcap_chain_id, + enum vcap_user user, u16 priority, + u32 id) +{ + struct vcap_rule_internal *ri; + struct vcap_admin *admin; + int maxsize; + + if (!ndev) + return ERR_PTR(-ENODEV); + /* Get the VCAP instance */ + admin = vcap_find_admin(vctrl, vcap_chain_id); + if (!admin) + return ERR_PTR(-ENOENT); + /* Sanity check that this VCAP is supported on this platform */ + if (vctrl->vcaps[admin->vtype].rows == 0) + return ERR_PTR(-EINVAL); + /* Check if a rule with this id already exists */ + if (vcap_lookup_rule(vctrl, id)) + return ERR_PTR(-EEXIST); + /* Check if there is room for the rule in the block(s) of the VCAP */ + maxsize = vctrl->vcaps[admin->vtype].sw_count; /* worst case rule size */ + if (vcap_rule_space(admin, maxsize)) + return ERR_PTR(-ENOSPC); + /* Create a container for the rule and return it */ + ri = kzalloc(sizeof(*ri), GFP_KERNEL); + if (!ri) + return ERR_PTR(-ENOMEM); + ri->data.vcap_chain_id = vcap_chain_id; + ri->data.user = user; + ri->data.priority = priority; + ri->data.id = id; + ri->data.keyset = VCAP_KFS_NO_VALUE; + ri->data.actionset = VCAP_AFS_NO_VALUE; + INIT_LIST_HEAD(&ri->list); + INIT_LIST_HEAD(&ri->data.keyfields); + INIT_LIST_HEAD(&ri->data.actionfields); + ri->ndev = ndev; + ri->admin = admin; /* refer to the vcap instance */ + ri->vctrl = vctrl; /* refer to the client */ + if (vcap_set_rule_id(ri) == 0) + goto out_free; + vcap_erase_cache(ri); + return (struct vcap_rule *)ri; + +out_free: + kfree(ri); + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(vcap_alloc_rule); + +/* Free mem of a rule owned by client after the rule as been added to the VCAP */ +void vcap_free_rule(struct vcap_rule *rule) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + struct vcap_client_actionfield *caf, *next_caf; + struct vcap_client_keyfield *ckf, *next_ckf; + + /* Deallocate the list of keys and actions */ + list_for_each_entry_safe(ckf, next_ckf, &ri->data.keyfields, ctrl.list) { + list_del(&ckf->ctrl.list); + kfree(ckf); + } + list_for_each_entry_safe(caf, next_caf, &ri->data.actionfields, ctrl.list) { + list_del(&caf->ctrl.list); + kfree(caf); + } + /* Deallocate the rule */ + kfree(rule); +} +EXPORT_SYMBOL_GPL(vcap_free_rule); + +/* Delete rule in a VCAP instance */ +int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id) +{ + struct vcap_rule_internal *ri, *elem; + struct vcap_admin *admin; + int err; + + /* This will later also handle rule moving */ + if (!ndev) + return -ENODEV; + err = vcap_api_check(vctrl); + if (err) + return err; + /* Look for the rule id in all vcaps */ + ri = vcap_lookup_rule(vctrl, id); + if (!ri) + return -EINVAL; + admin = ri->admin; + list_del(&ri->list); + + /* delete the rule in the cache */ + vctrl->ops->init(ndev, admin, ri->addr, ri->size); + if (list_empty(&admin->rules)) { + admin->last_used_addr = admin->last_valid_addr; + } else { + /* update the address range end marker from the last rule in the list */ + elem = list_last_entry(&admin->rules, struct vcap_rule_internal, list); + admin->last_used_addr = elem->addr; + } + kfree(ri); + return 0; +} +EXPORT_SYMBOL_GPL(vcap_del_rule); + +/* Delete all rules in the VCAP instance */ +int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin) +{ + struct vcap_rule_internal *ri, *next_ri; + int ret = vcap_api_check(vctrl); + + if (ret) + return ret; + list_for_each_entry_safe(ri, next_ri, &admin->rules, list) { + vctrl->ops->init(ri->ndev, admin, ri->addr, ri->size); + list_del(&ri->list); + kfree(ri); + } + admin->last_used_addr = admin->last_valid_addr; + return 0; +} +EXPORT_SYMBOL_GPL(vcap_del_rules); + +/* Find information on a key field in a rule */ +const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule, + enum vcap_key_field key) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + enum vcap_keyfield_set keyset = rule->keyset; + enum vcap_type vt = ri->admin->vtype; + const struct vcap_field *fields; + + if (keyset == VCAP_KFS_NO_VALUE) + return NULL; + fields = vcap_keyfields(ri->vctrl, vt, keyset); + if (!fields) + return NULL; + return &fields[key]; +} +EXPORT_SYMBOL_GPL(vcap_lookup_keyfield); + +static void vcap_copy_from_client_keyfield(struct vcap_rule *rule, + struct vcap_client_keyfield *field, + struct vcap_client_keyfield_data *data) +{ + /* This will be expanded later to handle different vcap memory layouts */ + memcpy(&field->data, data, sizeof(field->data)); +} + +static int vcap_rule_add_key(struct vcap_rule *rule, + enum vcap_key_field key, + enum vcap_field_type ftype, + struct vcap_client_keyfield_data *data) +{ + struct vcap_client_keyfield *field; + + /* More validation will be added here later */ + field = kzalloc(sizeof(*field), GFP_KERNEL); + if (!field) + return -ENOMEM; + field->ctrl.key = key; + field->ctrl.type = ftype; + vcap_copy_from_client_keyfield(rule, field, data); + list_add_tail(&field->ctrl.list, &rule->keyfields); + return 0; +} + +static void vcap_rule_set_key_bitsize(struct vcap_u1_key *u1, enum vcap_bit val) +{ + switch (val) { + case VCAP_BIT_0: + u1->value = 0; + u1->mask = 1; + break; + case VCAP_BIT_1: + u1->value = 1; + u1->mask = 1; + break; + case VCAP_BIT_ANY: + u1->value = 0; + u1->mask = 0; + break; + } +} + +/* Add a bit key with value and mask to the rule */ +int vcap_rule_add_key_bit(struct vcap_rule *rule, enum vcap_key_field key, + enum vcap_bit val) +{ + struct vcap_client_keyfield_data data; + + vcap_rule_set_key_bitsize(&data.u1, val); + return vcap_rule_add_key(rule, key, VCAP_FIELD_BIT, &data); +} +EXPORT_SYMBOL_GPL(vcap_rule_add_key_bit); + +/* Add a 32 bit key field with value and mask to the rule */ +int vcap_rule_add_key_u32(struct vcap_rule *rule, enum vcap_key_field key, + u32 value, u32 mask) +{ + struct vcap_client_keyfield_data data; + + data.u32.value = value; + data.u32.mask = mask; + return vcap_rule_add_key(rule, key, VCAP_FIELD_U32, &data); +} +EXPORT_SYMBOL_GPL(vcap_rule_add_key_u32); + +/* Add a 48 bit key with value and mask to the rule */ +int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key, + struct vcap_u48_key *fieldval) +{ + struct vcap_client_keyfield_data data; + + memcpy(&data.u48, fieldval, sizeof(data.u48)); + return vcap_rule_add_key(rule, key, VCAP_FIELD_U48, &data); +} +EXPORT_SYMBOL_GPL(vcap_rule_add_key_u48); + +/* Add a 72 bit key with value and mask to the rule */ +int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key, + struct vcap_u72_key *fieldval) +{ + struct vcap_client_keyfield_data data; + + memcpy(&data.u72, fieldval, sizeof(data.u72)); + return vcap_rule_add_key(rule, key, VCAP_FIELD_U72, &data); +} +EXPORT_SYMBOL_GPL(vcap_rule_add_key_u72); + +static void vcap_copy_from_client_actionfield(struct vcap_rule *rule, + struct vcap_client_actionfield *field, + struct vcap_client_actionfield_data *data) +{ + /* This will be expanded later to handle different vcap memory layouts */ + memcpy(&field->data, data, sizeof(field->data)); +} + +static int vcap_rule_add_action(struct vcap_rule *rule, + enum vcap_action_field action, + enum vcap_field_type ftype, + struct vcap_client_actionfield_data *data) +{ + struct vcap_client_actionfield *field; + + /* More validation will be added here later */ + field = kzalloc(sizeof(*field), GFP_KERNEL); + if (!field) + return -ENOMEM; + field->ctrl.action = action; + field->ctrl.type = ftype; + vcap_copy_from_client_actionfield(rule, field, data); + list_add_tail(&field->ctrl.list, &rule->actionfields); + return 0; +} + +static void vcap_rule_set_action_bitsize(struct vcap_u1_action *u1, + enum vcap_bit val) +{ + switch (val) { + case VCAP_BIT_0: + u1->value = 0; + break; + case VCAP_BIT_1: + u1->value = 1; + break; + case VCAP_BIT_ANY: + u1->value = 0; + break; + } +} + +/* Add a bit action with value to the rule */ +int vcap_rule_add_action_bit(struct vcap_rule *rule, + enum vcap_action_field action, + enum vcap_bit val) +{ + struct vcap_client_actionfield_data data; + + vcap_rule_set_action_bitsize(&data.u1, val); + return vcap_rule_add_action(rule, action, VCAP_FIELD_BIT, &data); +} +EXPORT_SYMBOL_GPL(vcap_rule_add_action_bit); + +/* Add a 32 bit action field with value to the rule */ +int vcap_rule_add_action_u32(struct vcap_rule *rule, + enum vcap_action_field action, + u32 value) +{ + struct vcap_client_actionfield_data data; + + data.u32.value = value; + return vcap_rule_add_action(rule, action, VCAP_FIELD_U32, &data); +} +EXPORT_SYMBOL_GPL(vcap_rule_add_action_u32); + +/* Copy to host byte order */ +void vcap_netbytes_copy(u8 *dst, u8 *src, int count) +{ + int idx; + + for (idx = 0; idx < count; ++idx, ++dst) + *dst = src[count - idx - 1]; +} +EXPORT_SYMBOL_GPL(vcap_netbytes_copy); + +/* Convert validation error code into tc extact error message */ +void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule) +{ + switch (vrule->exterr) { + case VCAP_ERR_NONE: + break; + case VCAP_ERR_NO_ADMIN: + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Missing VCAP instance"); + break; + case VCAP_ERR_NO_NETDEV: + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Missing network interface"); + break; + case VCAP_ERR_NO_KEYSET_MATCH: + NL_SET_ERR_MSG_MOD(fco->common.extack, + "No keyset matched the filter keys"); + break; + case VCAP_ERR_NO_ACTIONSET_MATCH: + NL_SET_ERR_MSG_MOD(fco->common.extack, + "No actionset matched the filter actions"); + break; + case VCAP_ERR_NO_PORT_KEYSET_MATCH: + NL_SET_ERR_MSG_MOD(fco->common.extack, + "No port keyset matched the filter keys"); + break; + } +} +EXPORT_SYMBOL_GPL(vcap_set_tc_exterr); + +#ifdef CONFIG_VCAP_KUNIT_TEST +#include "vcap_api_kunit.c" +#endif diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h new file mode 100644 index 000000000000..eb2eae75c7e8 --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API + */ + +#ifndef __VCAP_API__ +#define __VCAP_API__ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/netdevice.h> + +/* Use the generated API model */ +#ifdef CONFIG_VCAP_KUNIT_TEST +#include "vcap_ag_api_kunit.h" +#endif +#include "vcap_ag_api.h" + +#define VCAP_CID_LOOKUP_SIZE 100000 /* Chains in a lookup */ +#define VCAP_CID_INGRESS_L0 1000000 /* Ingress Stage 1 Lookup 0 */ +#define VCAP_CID_INGRESS_L1 1100000 /* Ingress Stage 1 Lookup 1 */ +#define VCAP_CID_INGRESS_L2 1200000 /* Ingress Stage 1 Lookup 2 */ +#define VCAP_CID_INGRESS_L3 1300000 /* Ingress Stage 1 Lookup 3 */ +#define VCAP_CID_INGRESS_L4 1400000 /* Ingress Stage 1 Lookup 4 */ +#define VCAP_CID_INGRESS_L5 1500000 /* Ingress Stage 1 Lookup 5 */ + +#define VCAP_CID_PREROUTING_IPV6 3000000 /* Prerouting Stage */ +#define VCAP_CID_PREROUTING 6000000 /* Prerouting Stage */ + +#define VCAP_CID_INGRESS_STAGE2_L0 8000000 /* Ingress Stage 2 Lookup 0 */ +#define VCAP_CID_INGRESS_STAGE2_L1 8100000 /* Ingress Stage 2 Lookup 1 */ +#define VCAP_CID_INGRESS_STAGE2_L2 8200000 /* Ingress Stage 2 Lookup 2 */ +#define VCAP_CID_INGRESS_STAGE2_L3 8300000 /* Ingress Stage 2 Lookup 3 */ + +#define VCAP_CID_EGRESS_L0 10000000 /* Egress Lookup 0 */ +#define VCAP_CID_EGRESS_L1 10100000 /* Egress Lookup 1 */ + +#define VCAP_CID_EGRESS_STAGE2_L0 20000000 /* Egress Stage 2 Lookup 0 */ +#define VCAP_CID_EGRESS_STAGE2_L1 20100000 /* Egress Stage 2 Lookup 1 */ + +/* Known users of the VCAP API */ +enum vcap_user { + VCAP_USER_PTP, + VCAP_USER_MRP, + VCAP_USER_CFM, + VCAP_USER_VLAN, + VCAP_USER_QOS, + VCAP_USER_VCAP_UTIL, + VCAP_USER_TC, + VCAP_USER_TC_EXTRA, + + /* add new users above here */ + + /* used to define VCAP_USER_MAX below */ + __VCAP_USER_AFTER_LAST, + VCAP_USER_MAX = __VCAP_USER_AFTER_LAST - 1, +}; + +/* VCAP information used for displaying data */ +struct vcap_statistics { + char *name; + int count; + const char * const *keyfield_set_names; + const char * const *actionfield_set_names; + const char * const *keyfield_names; + const char * const *actionfield_names; +}; + +/* VCAP key/action field type, position and width */ +struct vcap_field { + u16 type; + u16 width; + u16 offset; +}; + +/* VCAP keyset or actionset type and width */ +struct vcap_set { + u8 type_id; + u8 sw_per_item; + u8 sw_cnt; +}; + +/* VCAP typegroup position and bitvalue */ +struct vcap_typegroup { + u16 offset; + u16 width; + u16 value; +}; + +/* VCAP model data */ +struct vcap_info { + char *name; /* user-friendly name */ + u16 rows; /* number of row in instance */ + u16 sw_count; /* maximum subwords used per rule */ + u16 sw_width; /* bits per subword in a keyset */ + u16 sticky_width; /* sticky bits per rule */ + u16 act_width; /* bits per subword in an actionset */ + u16 default_cnt; /* number of default rules */ + u16 require_cnt_dis; /* not used */ + u16 version; /* vcap rtl version */ + const struct vcap_set *keyfield_set; /* keysets */ + int keyfield_set_size; /* number of keysets */ + const struct vcap_set *actionfield_set; /* actionsets */ + int actionfield_set_size; /* number of actionsets */ + /* map of keys per keyset */ + const struct vcap_field **keyfield_set_map; + /* number of entries in the above map */ + int *keyfield_set_map_size; + /* map of actions per actionset */ + const struct vcap_field **actionfield_set_map; + /* number of entries in the above map */ + int *actionfield_set_map_size; + /* map of keyset typegroups per subword size */ + const struct vcap_typegroup **keyfield_set_typegroups; + /* map of actionset typegroups per subword size */ + const struct vcap_typegroup **actionfield_set_typegroups; +}; + +enum vcap_field_type { + VCAP_FIELD_BIT, + VCAP_FIELD_U32, + VCAP_FIELD_U48, + VCAP_FIELD_U56, + VCAP_FIELD_U64, + VCAP_FIELD_U72, + VCAP_FIELD_U112, + VCAP_FIELD_U128, +}; + +/* VCAP rule data towards the VCAP cache */ +struct vcap_cache_data { + u32 *keystream; + u32 *maskstream; + u32 *actionstream; + u32 counter; + bool sticky; +}; + +/* Selects which part of the rule must be updated */ +enum vcap_selection { + VCAP_SEL_ENTRY = 0x01, + VCAP_SEL_ACTION = 0x02, + VCAP_SEL_COUNTER = 0x04, + VCAP_SEL_ALL = 0xff, +}; + +/* Commands towards the VCAP cache */ +enum vcap_command { + VCAP_CMD_WRITE = 0, + VCAP_CMD_READ = 1, + VCAP_CMD_MOVE_DOWN = 2, + VCAP_CMD_MOVE_UP = 3, + VCAP_CMD_INITIALIZE = 4, +}; + +enum vcap_rule_error { + VCAP_ERR_NONE = 0, /* No known error */ + VCAP_ERR_NO_ADMIN, /* No admin instance */ + VCAP_ERR_NO_NETDEV, /* No netdev instance */ + VCAP_ERR_NO_KEYSET_MATCH, /* No keyset matched the rule keys */ + VCAP_ERR_NO_ACTIONSET_MATCH, /* No actionset matched the rule actions */ + VCAP_ERR_NO_PORT_KEYSET_MATCH, /* No port keyset matched the rule keys */ +}; + +/* Administration of each VCAP instance */ +struct vcap_admin { + struct list_head list; /* for insertion in vcap_control */ + struct list_head rules; /* list of rules */ + enum vcap_type vtype; /* type of vcap */ + int vinst; /* instance number within the same type */ + int first_cid; /* first chain id in this vcap */ + int last_cid; /* last chain id in this vcap */ + int tgt_inst; /* hardware instance number */ + int lookups; /* number of lookups in this vcap type */ + int lookups_per_instance; /* number of lookups in this instance */ + int last_valid_addr; /* top of address range to be used */ + int first_valid_addr; /* bottom of address range to be used */ + int last_used_addr; /* address of lowest added rule */ + bool w32be; /* vcap uses "32bit-word big-endian" encoding */ + struct vcap_cache_data cache; /* encoded rule data */ +}; + +/* Client supplied VCAP rule data */ +struct vcap_rule { + int vcap_chain_id; /* chain used for this rule */ + enum vcap_user user; /* rule owner */ + u16 priority; + u32 id; /* vcap rule id, must be unique, 0 will auto-generate a value */ + u64 cookie; /* used by the client to identify the rule */ + struct list_head keyfields; /* list of vcap_client_keyfield */ + struct list_head actionfields; /* list of vcap_client_actionfield */ + enum vcap_keyfield_set keyset; /* keyset used: may be derived from fields */ + enum vcap_actionfield_set actionset; /* actionset used: may be derived from fields */ + enum vcap_rule_error exterr; /* extended error - used by TC */ + u64 client; /* space for client defined data */ +}; + +/* List of keysets */ +struct vcap_keyset_list { + int max; /* size of the keyset list */ + int cnt; /* count of keysets actually in the list */ + enum vcap_keyfield_set *keysets; /* the list of keysets */ +}; + +/* Client supplied VCAP callback operations */ +struct vcap_operations { + /* validate port keyset operation */ + enum vcap_keyfield_set (*validate_keyset) + (struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule, + struct vcap_keyset_list *kslist, + u16 l3_proto); + /* add default rule fields for the selected keyset operations */ + void (*add_default_fields) + (struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule); + /* cache operations */ + void (*cache_erase) + (struct vcap_admin *admin); + void (*cache_write) + (struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_selection sel, + u32 idx, u32 count); + void (*cache_read) + (struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_selection sel, + u32 idx, + u32 count); + /* block operations */ + void (*init) + (struct net_device *ndev, + struct vcap_admin *admin, + u32 addr, + u32 count); + void (*update) + (struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_command cmd, + enum vcap_selection sel, + u32 addr); + void (*move) + (struct net_device *ndev, + struct vcap_admin *admin, + u32 addr, + int offset, + int count); + /* informational */ + int (*port_info) + (struct net_device *ndev, + enum vcap_type vtype, + int (*pf)(void *out, int arg, const char *fmt, ...), + void *out, + int arg); +}; + +/* VCAP API Client control interface */ +struct vcap_control { + u32 rule_id; /* last used rule id (unique across VCAP instances) */ + struct vcap_operations *ops; /* client supplied operations */ + const struct vcap_info *vcaps; /* client supplied vcap models */ + const struct vcap_statistics *stats; /* client supplied vcap stats */ + struct list_head list; /* list of vcap instances */ +}; + +/* Set client control interface on the API */ +int vcap_api_set_client(struct vcap_control *vctrl); + +#endif /* __VCAP_API__ */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h new file mode 100644 index 000000000000..5df6808679ff --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API + */ + +#ifndef __VCAP_API_CLIENT__ +#define __VCAP_API_CLIENT__ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <net/flow_offload.h> + +#include "vcap_api.h" + +/* Client supplied VCAP rule key control part */ +struct vcap_client_keyfield_ctrl { + struct list_head list; /* For insertion into a rule */ + enum vcap_key_field key; + enum vcap_field_type type; +}; + +struct vcap_u1_key { + u8 value; + u8 mask; +}; + +struct vcap_u32_key { + u32 value; + u32 mask; +}; + +struct vcap_u48_key { + u8 value[6]; + u8 mask[6]; +}; + +struct vcap_u56_key { + u8 value[7]; + u8 mask[7]; +}; + +struct vcap_u64_key { + u8 value[8]; + u8 mask[8]; +}; + +struct vcap_u72_key { + u8 value[9]; + u8 mask[9]; +}; + +struct vcap_u112_key { + u8 value[14]; + u8 mask[14]; +}; + +struct vcap_u128_key { + u8 value[16]; + u8 mask[16]; +}; + +/* Client supplied VCAP rule field data */ +struct vcap_client_keyfield_data { + union { + struct vcap_u1_key u1; + struct vcap_u32_key u32; + struct vcap_u48_key u48; + struct vcap_u56_key u56; + struct vcap_u64_key u64; + struct vcap_u72_key u72; + struct vcap_u112_key u112; + struct vcap_u128_key u128; + }; +}; + +/* Client supplied VCAP rule key (value, mask) */ +struct vcap_client_keyfield { + struct vcap_client_keyfield_ctrl ctrl; + struct vcap_client_keyfield_data data; +}; + +/* Client supplied VCAP rule action control part */ +struct vcap_client_actionfield_ctrl { + struct list_head list; /* For insertion into a rule */ + enum vcap_action_field action; + enum vcap_field_type type; +}; + +struct vcap_u1_action { + u8 value; +}; + +struct vcap_u32_action { + u32 value; +}; + +struct vcap_u48_action { + u8 value[6]; +}; + +struct vcap_u56_action { + u8 value[7]; +}; + +struct vcap_u64_action { + u8 value[8]; +}; + +struct vcap_u72_action { + u8 value[9]; +}; + +struct vcap_u112_action { + u8 value[14]; +}; + +struct vcap_u128_action { + u8 value[16]; +}; + +struct vcap_client_actionfield_data { + union { + struct vcap_u1_action u1; + struct vcap_u32_action u32; + struct vcap_u48_action u48; + struct vcap_u56_action u56; + struct vcap_u64_action u64; + struct vcap_u72_action u72; + struct vcap_u112_action u112; + struct vcap_u128_action u128; + }; +}; + +struct vcap_client_actionfield { + struct vcap_client_actionfield_ctrl ctrl; + struct vcap_client_actionfield_data data; +}; + +enum vcap_bit { + VCAP_BIT_ANY, + VCAP_BIT_0, + VCAP_BIT_1 +}; + +/* VCAP rule operations */ +/* Allocate a rule and fill in the basic information */ +struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl, + struct net_device *ndev, + int vcap_chain_id, + enum vcap_user user, + u16 priority, + u32 id); +/* Free mem of a rule owned by client */ +void vcap_free_rule(struct vcap_rule *rule); +/* Validate a rule before adding it to the VCAP */ +int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto); +/* Add rule to a VCAP instance */ +int vcap_add_rule(struct vcap_rule *rule); +/* Delete rule in a VCAP instance */ +int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id); + +/* Update the keyset for the rule */ +int vcap_set_rule_set_keyset(struct vcap_rule *rule, + enum vcap_keyfield_set keyset); +/* Update the actionset for the rule */ +int vcap_set_rule_set_actionset(struct vcap_rule *rule, + enum vcap_actionfield_set actionset); + +/* VCAP rule field operations */ +int vcap_rule_add_key_bit(struct vcap_rule *rule, enum vcap_key_field key, + enum vcap_bit val); +int vcap_rule_add_key_u32(struct vcap_rule *rule, enum vcap_key_field key, + u32 value, u32 mask); +int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key, + struct vcap_u48_key *fieldval); +int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key, + struct vcap_u72_key *fieldval); +int vcap_rule_add_action_bit(struct vcap_rule *rule, + enum vcap_action_field action, enum vcap_bit val); +int vcap_rule_add_action_u32(struct vcap_rule *rule, + enum vcap_action_field action, u32 value); + +/* VCAP lookup operations */ +/* Lookup a vcap instance using chain id */ +struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid); +/* Find information on a key field in a rule */ +const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule, + enum vcap_key_field key); +/* Find a rule id with a provided cookie */ +int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie); + +/* Copy to host byte order */ +void vcap_netbytes_copy(u8 *dst, u8 *src, int count); + +/* Convert validation error code into tc extact error message */ +void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule); + +/* Cleanup a VCAP instance */ +int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin); + +#endif /* __VCAP_API_CLIENT__ */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c new file mode 100644 index 000000000000..b01a6e5039b0 --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -0,0 +1,933 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API kunit test suite + */ + +#include <kunit/test.h> +#include "vcap_api.h" +#include "vcap_api_client.h" +#include "vcap_model_kunit.h" + +/* First we have the test infrastructure that emulates the platform + * implementation + */ +#define TEST_BUF_CNT 100 +#define TEST_BUF_SZ 350 +#define STREAMWSIZE 64 + +static u32 test_updateaddr[STREAMWSIZE] = {}; +static int test_updateaddridx; +static int test_cache_erase_count; +static u32 test_init_start; +static u32 test_init_count; +static u32 test_hw_counter_id; +static struct vcap_cache_data test_hw_cache; + +/* Callback used by the VCAP API */ +static enum vcap_keyfield_set test_val_keyset(struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule, + struct vcap_keyset_list *kslist, + u16 l3_proto) +{ + int idx; + + if (kslist->cnt > 0) { + switch (admin->vtype) { + case VCAP_TYPE_IS0: + for (idx = 0; idx < kslist->cnt; idx++) { + if (kslist->keysets[idx] == VCAP_KFS_ETAG) + return kslist->keysets[idx]; + if (kslist->keysets[idx] == VCAP_KFS_PURE_5TUPLE_IP4) + return kslist->keysets[idx]; + if (kslist->keysets[idx] == VCAP_KFS_NORMAL_5TUPLE_IP4) + return kslist->keysets[idx]; + if (kslist->keysets[idx] == VCAP_KFS_NORMAL_7TUPLE) + return kslist->keysets[idx]; + } + break; + case VCAP_TYPE_IS2: + for (idx = 0; idx < kslist->cnt; idx++) { + if (kslist->keysets[idx] == VCAP_KFS_MAC_ETYPE) + return kslist->keysets[idx]; + if (kslist->keysets[idx] == VCAP_KFS_ARP) + return kslist->keysets[idx]; + if (kslist->keysets[idx] == VCAP_KFS_IP_7TUPLE) + return kslist->keysets[idx]; + } + break; + default: + pr_info("%s:%d: no validation for VCAP %d\n", + __func__, __LINE__, admin->vtype); + break; + } + } + return -EINVAL; +} + +/* Callback used by the VCAP API */ +static void test_add_def_fields(struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule) +{ + if (admin->vinst == 0 || admin->vinst == 2) + vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1); + else + vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0); +} + +/* Callback used by the VCAP API */ +static void test_cache_erase(struct vcap_admin *admin) +{ + if (test_cache_erase_count) { + memset(admin->cache.keystream, 0, test_cache_erase_count); + memset(admin->cache.maskstream, 0, test_cache_erase_count); + memset(admin->cache.actionstream, 0, test_cache_erase_count); + test_cache_erase_count = 0; + } +} + +/* Callback used by the VCAP API */ +static void test_cache_init(struct net_device *ndev, struct vcap_admin *admin, + u32 start, u32 count) +{ + test_init_start = start; + test_init_count = count; +} + +/* Callback used by the VCAP API */ +static void test_cache_read(struct net_device *ndev, struct vcap_admin *admin, + enum vcap_selection sel, u32 start, u32 count) +{ + u32 *keystr, *mskstr, *actstr; + int idx; + + pr_debug("%s:%d: %d %d\n", __func__, __LINE__, start, count); + switch (sel) { + case VCAP_SEL_ENTRY: + keystr = &admin->cache.keystream[start]; + mskstr = &admin->cache.maskstream[start]; + for (idx = 0; idx < count; ++idx) { + pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__, + __LINE__, start + idx, keystr[idx]); + } + for (idx = 0; idx < count; ++idx) { + /* Invert the mask before decoding starts */ + mskstr[idx] = ~mskstr[idx]; + pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__, + __LINE__, start + idx, mskstr[idx]); + } + break; + case VCAP_SEL_ACTION: + actstr = &admin->cache.actionstream[start]; + for (idx = 0; idx < count; ++idx) { + pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__, + __LINE__, start + idx, actstr[idx]); + } + break; + case VCAP_SEL_COUNTER: + pr_debug("%s:%d\n", __func__, __LINE__); + test_hw_counter_id = start; + admin->cache.counter = test_hw_cache.counter; + admin->cache.sticky = test_hw_cache.sticky; + break; + case VCAP_SEL_ALL: + pr_debug("%s:%d\n", __func__, __LINE__); + break; + } +} + +/* Callback used by the VCAP API */ +static void test_cache_write(struct net_device *ndev, struct vcap_admin *admin, + enum vcap_selection sel, u32 start, u32 count) +{ + u32 *keystr, *mskstr, *actstr; + int idx; + + switch (sel) { + case VCAP_SEL_ENTRY: + keystr = &admin->cache.keystream[start]; + mskstr = &admin->cache.maskstream[start]; + for (idx = 0; idx < count; ++idx) { + pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__, + __LINE__, start + idx, keystr[idx]); + } + for (idx = 0; idx < count; ++idx) { + /* Invert the mask before encoding starts */ + mskstr[idx] = ~mskstr[idx]; + pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__, + __LINE__, start + idx, mskstr[idx]); + } + break; + case VCAP_SEL_ACTION: + actstr = &admin->cache.actionstream[start]; + for (idx = 0; idx < count; ++idx) { + pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__, + __LINE__, start + idx, actstr[idx]); + } + break; + case VCAP_SEL_COUNTER: + pr_debug("%s:%d\n", __func__, __LINE__); + test_hw_counter_id = start; + test_hw_cache.counter = admin->cache.counter; + test_hw_cache.sticky = admin->cache.sticky; + break; + case VCAP_SEL_ALL: + pr_err("%s:%d: cannot write all streams at once\n", + __func__, __LINE__); + break; + } +} + +/* Callback used by the VCAP API */ +static void test_cache_update(struct net_device *ndev, struct vcap_admin *admin, + enum vcap_command cmd, + enum vcap_selection sel, u32 addr) +{ + if (test_updateaddridx < ARRAY_SIZE(test_updateaddr)) + test_updateaddr[test_updateaddridx] = addr; + else + pr_err("%s:%d: overflow: %d\n", __func__, __LINE__, test_updateaddridx); + test_updateaddridx++; +} + +static void test_cache_move(struct net_device *ndev, struct vcap_admin *admin, + u32 addr, int offset, int count) +{ +} + +/* Provide port information via a callback interface */ +static int vcap_test_port_info(struct net_device *ndev, enum vcap_type vtype, + int (*pf)(void *out, int arg, const char *fmt, ...), + void *out, int arg) +{ + return 0; +} + +struct vcap_operations test_callbacks = { + .validate_keyset = test_val_keyset, + .add_default_fields = test_add_def_fields, + .cache_erase = test_cache_erase, + .cache_write = test_cache_write, + .cache_read = test_cache_read, + .init = test_cache_init, + .update = test_cache_update, + .move = test_cache_move, + .port_info = vcap_test_port_info, +}; + +struct vcap_control test_vctrl = { + .vcaps = kunit_test_vcaps, + .stats = &kunit_test_vcap_stats, + .ops = &test_callbacks, +}; + +static void vcap_test_api_init(struct vcap_admin *admin) +{ + /* Initialize the shared objects */ + INIT_LIST_HEAD(&test_vctrl.list); + INIT_LIST_HEAD(&admin->list); + INIT_LIST_HEAD(&admin->rules); + list_add_tail(&admin->list, &test_vctrl.list); + memset(test_updateaddr, 0, sizeof(test_updateaddr)); + test_updateaddridx = 0; +} + +/* Define the test cases. */ + +static void vcap_api_set_bit_1_test(struct kunit *test) +{ + struct vcap_stream_iter iter = { + .offset = 35, + .sw_width = 52, + .reg_idx = 1, + .reg_bitpos = 20, + .tg = 0 + }; + u32 stream[2] = {0}; + + vcap_set_bit(stream, &iter, 1); + + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)BIT(20), stream[1]); +} + +static void vcap_api_set_bit_0_test(struct kunit *test) +{ + struct vcap_stream_iter iter = { + .offset = 35, + .sw_width = 52, + .reg_idx = 2, + .reg_bitpos = 11, + .tg = 0 + }; + u32 stream[3] = {~0, ~0, ~0}; + + vcap_set_bit(stream, &iter, 0); + + KUNIT_EXPECT_EQ(test, (u32)~0, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)~0, stream[1]); + KUNIT_EXPECT_EQ(test, (u32)~BIT(11), stream[2]); +} + +static void vcap_api_iterator_init_test(struct kunit *test) +{ + struct vcap_stream_iter iter; + struct vcap_typegroup typegroups[] = { + { .offset = 0, .width = 2, .value = 2, }, + { .offset = 156, .width = 1, .value = 0, }, + { .offset = 0, .width = 0, .value = 0, }, + }; + struct vcap_typegroup typegroups2[] = { + { .offset = 0, .width = 3, .value = 4, }, + { .offset = 49, .width = 2, .value = 0, }, + { .offset = 98, .width = 2, .value = 0, }, + }; + + vcap_iter_init(&iter, 52, typegroups, 86); + + KUNIT_EXPECT_EQ(test, 52, iter.sw_width); + KUNIT_EXPECT_EQ(test, 86 + 2, iter.offset); + KUNIT_EXPECT_EQ(test, 3, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 4, iter.reg_bitpos); + + vcap_iter_init(&iter, 49, typegroups2, 134); + + KUNIT_EXPECT_EQ(test, 49, iter.sw_width); + KUNIT_EXPECT_EQ(test, 134 + 7, iter.offset); + KUNIT_EXPECT_EQ(test, 5, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos); +} + +static void vcap_api_iterator_next_test(struct kunit *test) +{ + struct vcap_stream_iter iter; + struct vcap_typegroup typegroups[] = { + { .offset = 0, .width = 4, .value = 8, }, + { .offset = 49, .width = 1, .value = 0, }, + { .offset = 98, .width = 2, .value = 0, }, + { .offset = 147, .width = 3, .value = 0, }, + { .offset = 196, .width = 2, .value = 0, }, + { .offset = 245, .width = 1, .value = 0, }, + }; + int idx; + + vcap_iter_init(&iter, 49, typegroups, 86); + + KUNIT_EXPECT_EQ(test, 49, iter.sw_width); + KUNIT_EXPECT_EQ(test, 86 + 5, iter.offset); + KUNIT_EXPECT_EQ(test, 3, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos); + + vcap_iter_next(&iter); + + KUNIT_EXPECT_EQ(test, 91 + 1, iter.offset); + KUNIT_EXPECT_EQ(test, 3, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos); + + for (idx = 0; idx < 6; idx++) + vcap_iter_next(&iter); + + KUNIT_EXPECT_EQ(test, 92 + 6 + 2, iter.offset); + KUNIT_EXPECT_EQ(test, 4, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 2, iter.reg_bitpos); +} + +static void vcap_api_encode_typegroups_test(struct kunit *test) +{ + u32 stream[12] = {0}; + struct vcap_typegroup typegroups[] = { + { .offset = 0, .width = 4, .value = 8, }, + { .offset = 49, .width = 1, .value = 1, }, + { .offset = 98, .width = 2, .value = 3, }, + { .offset = 147, .width = 3, .value = 5, }, + { .offset = 196, .width = 2, .value = 2, }, + { .offset = 245, .width = 5, .value = 27, }, + { .offset = 0, .width = 0, .value = 0, }, + }; + + vcap_encode_typegroups(stream, 49, typegroups, false); + + KUNIT_EXPECT_EQ(test, (u32)0x8, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]); + KUNIT_EXPECT_EQ(test, (u32)0x1, stream[2]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[3]); + KUNIT_EXPECT_EQ(test, (u32)0x3, stream[4]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]); + KUNIT_EXPECT_EQ(test, (u32)0x5, stream[6]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[7]); + KUNIT_EXPECT_EQ(test, (u32)0x2, stream[8]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[9]); + KUNIT_EXPECT_EQ(test, (u32)27, stream[10]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[11]); +} + +static void vcap_api_encode_bit_test(struct kunit *test) +{ + struct vcap_stream_iter iter; + u32 stream[4] = {0}; + struct vcap_typegroup typegroups[] = { + { .offset = 0, .width = 4, .value = 8, }, + { .offset = 49, .width = 1, .value = 1, }, + { .offset = 98, .width = 2, .value = 3, }, + { .offset = 147, .width = 3, .value = 5, }, + { .offset = 196, .width = 2, .value = 2, }, + { .offset = 245, .width = 1, .value = 0, }, + }; + + vcap_iter_init(&iter, 49, typegroups, 44); + + KUNIT_EXPECT_EQ(test, 48, iter.offset); + KUNIT_EXPECT_EQ(test, 1, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 16, iter.reg_bitpos); + + vcap_encode_bit(stream, &iter, 1); + + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)BIT(16), stream[1]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]); +} + +static void vcap_api_encode_field_test(struct kunit *test) +{ + struct vcap_stream_iter iter; + u32 stream[16] = {0}; + struct vcap_typegroup typegroups[] = { + { .offset = 0, .width = 4, .value = 8, }, + { .offset = 49, .width = 1, .value = 1, }, + { .offset = 98, .width = 2, .value = 3, }, + { .offset = 147, .width = 3, .value = 5, }, + { .offset = 196, .width = 2, .value = 2, }, + { .offset = 245, .width = 5, .value = 27, }, + { .offset = 0, .width = 0, .value = 0, }, + }; + struct vcap_field rf = { + .type = VCAP_FIELD_U32, + .offset = 86, + .width = 4, + }; + u8 value[] = {0x5}; + + vcap_iter_init(&iter, 49, typegroups, rf.offset); + + KUNIT_EXPECT_EQ(test, 91, iter.offset); + KUNIT_EXPECT_EQ(test, 3, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos); + + vcap_encode_field(stream, &iter, rf.width, value); + + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]); + KUNIT_EXPECT_EQ(test, (u32)(0x5 << 10), stream[3]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[4]); + + vcap_encode_typegroups(stream, 49, typegroups, false); + + KUNIT_EXPECT_EQ(test, (u32)0x8, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]); + KUNIT_EXPECT_EQ(test, (u32)0x1, stream[2]); + KUNIT_EXPECT_EQ(test, (u32)(0x5 << 10), stream[3]); + KUNIT_EXPECT_EQ(test, (u32)0x3, stream[4]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]); + KUNIT_EXPECT_EQ(test, (u32)0x5, stream[6]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[7]); + KUNIT_EXPECT_EQ(test, (u32)0x2, stream[8]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[9]); + KUNIT_EXPECT_EQ(test, (u32)27, stream[10]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[11]); +} + +/* In this testcase the subword is smaller than a register */ +static void vcap_api_encode_short_field_test(struct kunit *test) +{ + struct vcap_stream_iter iter; + int sw_width = 21; + u32 stream[6] = {0}; + struct vcap_typegroup tgt[] = { + { .offset = 0, .width = 3, .value = 7, }, + { .offset = 21, .width = 2, .value = 3, }, + { .offset = 42, .width = 1, .value = 1, }, + { .offset = 0, .width = 0, .value = 0, }, + }; + struct vcap_field rf = { + .type = VCAP_FIELD_U32, + .offset = 25, + .width = 4, + }; + u8 value[] = {0x5}; + + vcap_iter_init(&iter, sw_width, tgt, rf.offset); + + KUNIT_EXPECT_EQ(test, 1, iter.regs_per_sw); + KUNIT_EXPECT_EQ(test, 21, iter.sw_width); + KUNIT_EXPECT_EQ(test, 25 + 3 + 2, iter.offset); + KUNIT_EXPECT_EQ(test, 1, iter.reg_idx); + KUNIT_EXPECT_EQ(test, 25 + 3 + 2 - sw_width, iter.reg_bitpos); + + vcap_encode_field(stream, &iter, rf.width, value); + + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)(0x5 << (25 + 3 + 2 - sw_width)), stream[1]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[3]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[4]); + KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]); + + vcap_encode_typegroups(stream, sw_width, tgt, false); + + KUNIT_EXPECT_EQ(test, (u32)7, stream[0]); + KUNIT_EXPECT_EQ(test, (u32)((0x5 << (25 + 3 + 2 - sw_width)) + 3), stream[1]); + KUNIT_EXPECT_EQ(test, (u32)1, stream[2]); + KUNIT_EXPECT_EQ(test, (u32)0, stream[3]); + KUNIT_EXPECT_EQ(test, (u32)0, stream[4]); + KUNIT_EXPECT_EQ(test, (u32)0, stream[5]); +} + +static void vcap_api_encode_keyfield_test(struct kunit *test) +{ + u32 keywords[16] = {0}; + u32 maskwords[16] = {0}; + struct vcap_admin admin = { + .vtype = VCAP_TYPE_IS2, + .cache = { + .keystream = keywords, + .maskstream = maskwords, + .actionstream = keywords, + }, + }; + struct vcap_rule_internal rule = { + .admin = &admin, + .data = { + .keyset = VCAP_KFS_MAC_ETYPE, + }, + .vctrl = &test_vctrl, + }; + struct vcap_client_keyfield ckf = { + .ctrl.list = {}, + .ctrl.key = VCAP_KF_ISDX_CLS, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0xeef014a1, + .data.u32.mask = 0xfff, + }; + struct vcap_field rf = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }; + struct vcap_typegroup tgt[] = { + { .offset = 0, .width = 2, .value = 2, }, + { .offset = 156, .width = 1, .value = 1, }, + { .offset = 0, .width = 0, .value = 0, }, + }; + + vcap_test_api_init(&admin); + vcap_encode_keyfield(&rule, &ckf, &rf, tgt); + + /* Key */ + KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[0]); + KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[1]); + KUNIT_EXPECT_EQ(test, (u32)(0x04a1 << 6), keywords[2]); + KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[3]); + KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[4]); + KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[5]); + KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[6]); + + /* Mask */ + KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[0]); + KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[1]); + KUNIT_EXPECT_EQ(test, (u32)(0x0fff << 6), maskwords[2]); + KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[3]); + KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[4]); + KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[5]); + KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[6]); +} + +static void vcap_api_encode_max_keyfield_test(struct kunit *test) +{ + int idx; + u32 keywords[6] = {0}; + u32 maskwords[6] = {0}; + struct vcap_admin admin = { + .vtype = VCAP_TYPE_IS2, + /* IS2 sw_width = 52 bit */ + .cache = { + .keystream = keywords, + .maskstream = maskwords, + .actionstream = keywords, + }, + }; + struct vcap_rule_internal rule = { + .admin = &admin, + .data = { + .keyset = VCAP_KFS_IP_7TUPLE, + }, + .vctrl = &test_vctrl, + }; + struct vcap_client_keyfield ckf = { + .ctrl.list = {}, + .ctrl.key = VCAP_KF_L3_IP6_DIP, + .ctrl.type = VCAP_FIELD_U128, + .data.u128.value = { 0xa1, 0xa2, 0xa3, 0xa4, 0, 0, 0x43, 0, + 0, 0, 0, 0, 0, 0, 0x78, 0x8e, }, + .data.u128.mask = { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, + 0, 0, 0, 0, 0, 0, 0xff, 0xff }, + }; + struct vcap_field rf = { + .type = VCAP_FIELD_U128, + .offset = 0, + .width = 128, + }; + struct vcap_typegroup tgt[] = { + { .offset = 0, .width = 2, .value = 2, }, + { .offset = 156, .width = 1, .value = 1, }, + { .offset = 0, .width = 0, .value = 0, }, + }; + u32 keyres[] = { + 0x928e8a84, + 0x000c0002, + 0x00000010, + 0x00000000, + 0x0239e000, + 0x00000000, + }; + u32 mskres[] = { + 0xfffffffc, + 0x000c0003, + 0x0000003f, + 0x00000000, + 0x03fffc00, + 0x00000000, + }; + + vcap_encode_keyfield(&rule, &ckf, &rf, tgt); + + /* Key */ + for (idx = 0; idx < ARRAY_SIZE(keyres); ++idx) + KUNIT_EXPECT_EQ(test, keyres[idx], keywords[idx]); + /* Mask */ + for (idx = 0; idx < ARRAY_SIZE(mskres); ++idx) + KUNIT_EXPECT_EQ(test, mskres[idx], maskwords[idx]); +} + +static void vcap_api_encode_actionfield_test(struct kunit *test) +{ + u32 actwords[16] = {0}; + int sw_width = 21; + struct vcap_admin admin = { + .vtype = VCAP_TYPE_ES2, /* act_width = 21 */ + .cache = { + .actionstream = actwords, + }, + }; + struct vcap_rule_internal rule = { + .admin = &admin, + .data = { + .actionset = VCAP_AFS_BASE_TYPE, + }, + .vctrl = &test_vctrl, + }; + struct vcap_client_actionfield caf = { + .ctrl.list = {}, + .ctrl.action = VCAP_AF_POLICE_IDX, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0x67908032, + }; + struct vcap_field rf = { + .type = VCAP_FIELD_U32, + .offset = 35, + .width = 6, + }; + struct vcap_typegroup tgt[] = { + { .offset = 0, .width = 2, .value = 2, }, + { .offset = 21, .width = 1, .value = 1, }, + { .offset = 42, .width = 1, .value = 0, }, + { .offset = 0, .width = 0, .value = 0, }, + }; + + vcap_encode_actionfield(&rule, &caf, &rf, tgt); + + /* Action */ + KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[0]); + KUNIT_EXPECT_EQ(test, (u32)((0x32 << (35 + 2 + 1 - sw_width)) & 0x1fffff), actwords[1]); + KUNIT_EXPECT_EQ(test, (u32)((0x32 >> ((2 * sw_width) - 38 - 1))), actwords[2]); + KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[3]); + KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[4]); + KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[5]); + KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[6]); +} + +static void vcap_api_keyfield_typegroup_test(struct kunit *test) +{ + const struct vcap_typegroup *tg; + + tg = vcap_keyfield_typegroup(&test_vctrl, VCAP_TYPE_IS2, VCAP_KFS_MAC_ETYPE); + KUNIT_EXPECT_PTR_NE(test, NULL, tg); + KUNIT_EXPECT_EQ(test, 0, tg[0].offset); + KUNIT_EXPECT_EQ(test, 2, tg[0].width); + KUNIT_EXPECT_EQ(test, 2, tg[0].value); + KUNIT_EXPECT_EQ(test, 156, tg[1].offset); + KUNIT_EXPECT_EQ(test, 1, tg[1].width); + KUNIT_EXPECT_EQ(test, 0, tg[1].value); + KUNIT_EXPECT_EQ(test, 0, tg[2].offset); + KUNIT_EXPECT_EQ(test, 0, tg[2].width); + KUNIT_EXPECT_EQ(test, 0, tg[2].value); + + tg = vcap_keyfield_typegroup(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_LL_FULL); + KUNIT_EXPECT_PTR_EQ(test, NULL, tg); +} + +static void vcap_api_actionfield_typegroup_test(struct kunit *test) +{ + const struct vcap_typegroup *tg; + + tg = vcap_actionfield_typegroup(&test_vctrl, VCAP_TYPE_IS0, VCAP_AFS_FULL); + KUNIT_EXPECT_PTR_NE(test, NULL, tg); + KUNIT_EXPECT_EQ(test, 0, tg[0].offset); + KUNIT_EXPECT_EQ(test, 3, tg[0].width); + KUNIT_EXPECT_EQ(test, 4, tg[0].value); + KUNIT_EXPECT_EQ(test, 110, tg[1].offset); + KUNIT_EXPECT_EQ(test, 2, tg[1].width); + KUNIT_EXPECT_EQ(test, 0, tg[1].value); + KUNIT_EXPECT_EQ(test, 220, tg[2].offset); + KUNIT_EXPECT_EQ(test, 2, tg[2].width); + KUNIT_EXPECT_EQ(test, 0, tg[2].value); + KUNIT_EXPECT_EQ(test, 0, tg[3].offset); + KUNIT_EXPECT_EQ(test, 0, tg[3].width); + KUNIT_EXPECT_EQ(test, 0, tg[3].value); + + tg = vcap_actionfield_typegroup(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_CLASSIFICATION); + KUNIT_EXPECT_PTR_EQ(test, NULL, tg); +} + +static void vcap_api_vcap_keyfields_test(struct kunit *test) +{ + const struct vcap_field *ft; + + ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_KFS_MAC_ETYPE); + KUNIT_EXPECT_PTR_NE(test, NULL, ft); + + /* Keyset that is not available and within the maximum keyset enum value */ + ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_PURE_5TUPLE_IP4); + KUNIT_EXPECT_PTR_EQ(test, NULL, ft); + + /* Keyset that is not available and beyond the maximum keyset enum value */ + ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_LL_FULL); + KUNIT_EXPECT_PTR_EQ(test, NULL, ft); +} + +static void vcap_api_vcap_actionfields_test(struct kunit *test) +{ + const struct vcap_field *ft; + + ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS0, VCAP_AFS_FULL); + KUNIT_EXPECT_PTR_NE(test, NULL, ft); + + ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_FULL); + KUNIT_EXPECT_PTR_EQ(test, NULL, ft); + + ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_CLASSIFICATION); + KUNIT_EXPECT_PTR_EQ(test, NULL, ft); +} + +static void vcap_api_encode_rule_keyset_test(struct kunit *test) +{ + u32 keywords[16] = {0}; + u32 maskwords[16] = {0}; + struct vcap_admin admin = { + .vtype = VCAP_TYPE_IS2, + .cache = { + .keystream = keywords, + .maskstream = maskwords, + }, + }; + struct vcap_rule_internal rule = { + .admin = &admin, + .data = { + .keyset = VCAP_KFS_MAC_ETYPE, + }, + .vctrl = &test_vctrl, + }; + struct vcap_client_keyfield ckf[] = { + { + .ctrl.key = VCAP_KF_TYPE, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0x00, + .data.u32.mask = 0x0f, + }, + { + .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS, + .ctrl.type = VCAP_FIELD_BIT, + .data.u1.value = 0x01, + .data.u1.mask = 0x01, + }, + { + .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_L3, + .ctrl.type = VCAP_FIELD_BIT, + .data.u1.value = 0x00, + .data.u1.mask = 0x01, + }, + { + .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_RNG, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0x00, + .data.u32.mask = 0x0f, + }, + { + .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK, + .ctrl.type = VCAP_FIELD_U72, + .data.u72.value = {0x0, 0x00, 0x00, 0x00}, + .data.u72.mask = {0xfd, 0xff, 0xff, 0xff}, + }, + { + .ctrl.key = VCAP_KF_L2_DMAC, + .ctrl.type = VCAP_FIELD_U48, + /* Opposite endianness */ + .data.u48.value = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}, + .data.u48.mask = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + }, + { + .ctrl.key = VCAP_KF_ETYPE_LEN_IS, + .ctrl.type = VCAP_FIELD_BIT, + .data.u1.value = 0x01, + .data.u1.mask = 0x01, + }, + { + .ctrl.key = VCAP_KF_ETYPE, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0xaabb, + .data.u32.mask = 0xffff, + }, + }; + int idx; + int ret; + + /* Empty entry list */ + INIT_LIST_HEAD(&rule.data.keyfields); + ret = vcap_encode_rule_keyset(&rule); + KUNIT_EXPECT_EQ(test, -EINVAL, ret); + + for (idx = 0; idx < ARRAY_SIZE(ckf); idx++) + list_add_tail(&ckf[idx].ctrl.list, &rule.data.keyfields); + ret = vcap_encode_rule_keyset(&rule); + KUNIT_EXPECT_EQ(test, 0, ret); + + /* The key and mask values below are from an actual Sparx5 rule config */ + /* Key */ + KUNIT_EXPECT_EQ(test, (u32)0x00000042, keywords[0]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[1]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[2]); + KUNIT_EXPECT_EQ(test, (u32)0x00020100, keywords[3]); + KUNIT_EXPECT_EQ(test, (u32)0x60504030, keywords[4]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[5]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[6]); + KUNIT_EXPECT_EQ(test, (u32)0x0002aaee, keywords[7]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[8]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[9]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[10]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[11]); + + /* Mask: they will be inverted when applied to the register */ + KUNIT_EXPECT_EQ(test, (u32)~0x00b07f80, maskwords[0]); + KUNIT_EXPECT_EQ(test, (u32)~0xfff00000, maskwords[1]); + KUNIT_EXPECT_EQ(test, (u32)~0xfffffffc, maskwords[2]); + KUNIT_EXPECT_EQ(test, (u32)~0xfff000ff, maskwords[3]); + KUNIT_EXPECT_EQ(test, (u32)~0x00000000, maskwords[4]); + KUNIT_EXPECT_EQ(test, (u32)~0xfffffff0, maskwords[5]); + KUNIT_EXPECT_EQ(test, (u32)~0xfffffffe, maskwords[6]); + KUNIT_EXPECT_EQ(test, (u32)~0xfffc0001, maskwords[7]); + KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[8]); + KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[9]); + KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[10]); + KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[11]); +} + +static void vcap_api_encode_rule_actionset_test(struct kunit *test) +{ + u32 actwords[16] = {0}; + struct vcap_admin admin = { + .vtype = VCAP_TYPE_IS2, + .cache = { + .actionstream = actwords, + }, + }; + struct vcap_rule_internal rule = { + .admin = &admin, + .data = { + .actionset = VCAP_AFS_BASE_TYPE, + }, + .vctrl = &test_vctrl, + }; + struct vcap_client_actionfield caf[] = { + { + .ctrl.action = VCAP_AF_MATCH_ID, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0x01, + }, + { + .ctrl.action = VCAP_AF_MATCH_ID_MASK, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0x01, + }, + { + .ctrl.action = VCAP_AF_CNT_ID, + .ctrl.type = VCAP_FIELD_U32, + .data.u32.value = 0x64, + }, + }; + int idx; + int ret; + + /* Empty entry list */ + INIT_LIST_HEAD(&rule.data.actionfields); + ret = vcap_encode_rule_actionset(&rule); + /* We allow rules with no actions */ + KUNIT_EXPECT_EQ(test, 0, ret); + + for (idx = 0; idx < ARRAY_SIZE(caf); idx++) + list_add_tail(&caf[idx].ctrl.list, &rule.data.actionfields); + ret = vcap_encode_rule_actionset(&rule); + KUNIT_EXPECT_EQ(test, 0, ret); + + /* The action values below are from an actual Sparx5 rule config */ + KUNIT_EXPECT_EQ(test, (u32)0x00000002, actwords[0]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[1]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[2]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[3]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[4]); + KUNIT_EXPECT_EQ(test, (u32)0x00100000, actwords[5]); + KUNIT_EXPECT_EQ(test, (u32)0x06400010, actwords[6]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[7]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[8]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[9]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[10]); + KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]); +} + +static struct kunit_case vcap_api_encoding_test_cases[] = { + KUNIT_CASE(vcap_api_set_bit_1_test), + KUNIT_CASE(vcap_api_set_bit_0_test), + KUNIT_CASE(vcap_api_iterator_init_test), + KUNIT_CASE(vcap_api_iterator_next_test), + KUNIT_CASE(vcap_api_encode_typegroups_test), + KUNIT_CASE(vcap_api_encode_bit_test), + KUNIT_CASE(vcap_api_encode_field_test), + KUNIT_CASE(vcap_api_encode_short_field_test), + KUNIT_CASE(vcap_api_encode_keyfield_test), + KUNIT_CASE(vcap_api_encode_max_keyfield_test), + KUNIT_CASE(vcap_api_encode_actionfield_test), + KUNIT_CASE(vcap_api_keyfield_typegroup_test), + KUNIT_CASE(vcap_api_actionfield_typegroup_test), + KUNIT_CASE(vcap_api_vcap_keyfields_test), + KUNIT_CASE(vcap_api_vcap_actionfields_test), + KUNIT_CASE(vcap_api_encode_rule_keyset_test), + KUNIT_CASE(vcap_api_encode_rule_actionset_test), + {} +}; + +static struct kunit_suite vcap_api_encoding_test_suite = { + .name = "VCAP_API_Encoding_Testsuite", + .test_cases = vcap_api_encoding_test_cases, +}; + +kunit_test_suite(vcap_api_encoding_test_suite); diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c new file mode 100644 index 000000000000..5d681d2697cd --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c @@ -0,0 +1,5570 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API Test VCAP Model Data + */ + +#include <linux/types.h> +#include <linux/kernel.h> + +#include "vcap_api.h" +#include "vcap_model_kunit.h" + +/* keyfields */ +static const struct vcap_field is0_mll_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 7, + }, + [VCAP_KF_8021Q_VLAN_TAGS] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 3, + }, + [VCAP_KF_8021Q_TPID0] = { + .type = VCAP_FIELD_U32, + .offset = 13, + .width = 3, + }, + [VCAP_KF_8021Q_VID0] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 12, + }, + [VCAP_KF_8021Q_TPID1] = { + .type = VCAP_FIELD_U32, + .offset = 28, + .width = 3, + }, + [VCAP_KF_8021Q_VID1] = { + .type = VCAP_FIELD_U32, + .offset = 31, + .width = 12, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 43, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 91, + .width = 48, + }, + [VCAP_KF_ETYPE_MPLS] = { + .type = VCAP_FIELD_U32, + .offset = 139, + .width = 2, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 141, + .width = 8, + }, +}; + +static const struct vcap_field is0_tri_vid_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 7, + }, + [VCAP_KF_LOOKUP_GEN_IDX_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 2, + }, + [VCAP_KF_LOOKUP_GEN_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 12, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGS] = { + .type = VCAP_FIELD_U32, + .offset = 24, + .width = 3, + }, + [VCAP_KF_8021Q_TPID0] = { + .type = VCAP_FIELD_U32, + .offset = 27, + .width = 3, + }, + [VCAP_KF_8021Q_PCP0] = { + .type = VCAP_FIELD_U32, + .offset = 30, + .width = 3, + }, + [VCAP_KF_8021Q_DEI0] = { + .type = VCAP_FIELD_BIT, + .offset = 33, + .width = 1, + }, + [VCAP_KF_8021Q_VID0] = { + .type = VCAP_FIELD_U32, + .offset = 34, + .width = 12, + }, + [VCAP_KF_8021Q_TPID1] = { + .type = VCAP_FIELD_U32, + .offset = 46, + .width = 3, + }, + [VCAP_KF_8021Q_PCP1] = { + .type = VCAP_FIELD_U32, + .offset = 49, + .width = 3, + }, + [VCAP_KF_8021Q_DEI1] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_8021Q_VID1] = { + .type = VCAP_FIELD_U32, + .offset = 53, + .width = 12, + }, + [VCAP_KF_8021Q_TPID2] = { + .type = VCAP_FIELD_U32, + .offset = 65, + .width = 3, + }, + [VCAP_KF_8021Q_PCP2] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 3, + }, + [VCAP_KF_8021Q_DEI2] = { + .type = VCAP_FIELD_BIT, + .offset = 71, + .width = 1, + }, + [VCAP_KF_8021Q_VID2] = { + .type = VCAP_FIELD_U32, + .offset = 72, + .width = 12, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 84, + .width = 8, + }, + [VCAP_KF_OAM_Y1731_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 92, + .width = 1, + }, + [VCAP_KF_OAM_MEL_FLAGS] = { + .type = VCAP_FIELD_U32, + .offset = 93, + .width = 7, + }, +}; + +static const struct vcap_field is0_ll_full_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 7, + }, + [VCAP_KF_8021Q_VLAN_TAGS] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 3, + }, + [VCAP_KF_8021Q_TPID0] = { + .type = VCAP_FIELD_U32, + .offset = 13, + .width = 3, + }, + [VCAP_KF_8021Q_PCP0] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 3, + }, + [VCAP_KF_8021Q_DEI0] = { + .type = VCAP_FIELD_BIT, + .offset = 19, + .width = 1, + }, + [VCAP_KF_8021Q_VID0] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 12, + }, + [VCAP_KF_8021Q_TPID1] = { + .type = VCAP_FIELD_U32, + .offset = 32, + .width = 3, + }, + [VCAP_KF_8021Q_PCP1] = { + .type = VCAP_FIELD_U32, + .offset = 35, + .width = 3, + }, + [VCAP_KF_8021Q_DEI1] = { + .type = VCAP_FIELD_BIT, + .offset = 38, + .width = 1, + }, + [VCAP_KF_8021Q_VID1] = { + .type = VCAP_FIELD_U32, + .offset = 39, + .width = 12, + }, + [VCAP_KF_8021Q_TPID2] = { + .type = VCAP_FIELD_U32, + .offset = 51, + .width = 3, + }, + [VCAP_KF_8021Q_PCP2] = { + .type = VCAP_FIELD_U32, + .offset = 54, + .width = 3, + }, + [VCAP_KF_8021Q_DEI2] = { + .type = VCAP_FIELD_BIT, + .offset = 57, + .width = 1, + }, + [VCAP_KF_8021Q_VID2] = { + .type = VCAP_FIELD_U32, + .offset = 58, + .width = 12, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 70, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 118, + .width = 48, + }, + [VCAP_KF_ETYPE_LEN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 166, + .width = 1, + }, + [VCAP_KF_ETYPE] = { + .type = VCAP_FIELD_U32, + .offset = 167, + .width = 16, + }, + [VCAP_KF_IP_SNAP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 183, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 184, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 185, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 187, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 188, + .width = 1, + }, + [VCAP_KF_L3_DSCP] = { + .type = VCAP_FIELD_U32, + .offset = 189, + .width = 6, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 195, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 227, + .width = 32, + }, + [VCAP_KF_TCP_UDP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 259, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 260, + .width = 1, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 261, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 277, + .width = 8, + }, +}; + +static const struct vcap_field is0_normal_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_LOOKUP_GEN_IDX_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 2, + }, + [VCAP_KF_LOOKUP_GEN_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 12, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 19, + .width = 65, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 84, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGS] = { + .type = VCAP_FIELD_U32, + .offset = 86, + .width = 3, + }, + [VCAP_KF_8021Q_TPID0] = { + .type = VCAP_FIELD_U32, + .offset = 89, + .width = 3, + }, + [VCAP_KF_8021Q_PCP0] = { + .type = VCAP_FIELD_U32, + .offset = 92, + .width = 3, + }, + [VCAP_KF_8021Q_DEI0] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_8021Q_VID0] = { + .type = VCAP_FIELD_U32, + .offset = 96, + .width = 12, + }, + [VCAP_KF_8021Q_TPID1] = { + .type = VCAP_FIELD_U32, + .offset = 108, + .width = 3, + }, + [VCAP_KF_8021Q_PCP1] = { + .type = VCAP_FIELD_U32, + .offset = 111, + .width = 3, + }, + [VCAP_KF_8021Q_DEI1] = { + .type = VCAP_FIELD_BIT, + .offset = 114, + .width = 1, + }, + [VCAP_KF_8021Q_VID1] = { + .type = VCAP_FIELD_U32, + .offset = 115, + .width = 12, + }, + [VCAP_KF_8021Q_TPID2] = { + .type = VCAP_FIELD_U32, + .offset = 127, + .width = 3, + }, + [VCAP_KF_8021Q_PCP2] = { + .type = VCAP_FIELD_U32, + .offset = 130, + .width = 3, + }, + [VCAP_KF_8021Q_DEI2] = { + .type = VCAP_FIELD_BIT, + .offset = 133, + .width = 1, + }, + [VCAP_KF_8021Q_VID2] = { + .type = VCAP_FIELD_U32, + .offset = 134, + .width = 12, + }, + [VCAP_KF_DST_ENTRY] = { + .type = VCAP_FIELD_BIT, + .offset = 146, + .width = 1, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 147, + .width = 48, + }, + [VCAP_KF_IP_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 195, + .width = 1, + }, + [VCAP_KF_ETYPE_LEN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 196, + .width = 1, + }, + [VCAP_KF_ETYPE] = { + .type = VCAP_FIELD_U32, + .offset = 197, + .width = 16, + }, + [VCAP_KF_IP_SNAP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 213, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 214, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 215, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 217, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 218, + .width = 1, + }, + [VCAP_KF_L3_DSCP] = { + .type = VCAP_FIELD_U32, + .offset = 219, + .width = 6, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 225, + .width = 32, + }, + [VCAP_KF_TCP_UDP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 257, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 258, + .width = 1, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 259, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 275, + .width = 8, + }, +}; + +static const struct vcap_field is0_normal_7tuple_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_KF_LOOKUP_GEN_IDX_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 2, + }, + [VCAP_KF_LOOKUP_GEN_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 4, + .width = 12, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 18, + .width = 65, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 83, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 84, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGS] = { + .type = VCAP_FIELD_U32, + .offset = 85, + .width = 3, + }, + [VCAP_KF_8021Q_TPID0] = { + .type = VCAP_FIELD_U32, + .offset = 88, + .width = 3, + }, + [VCAP_KF_8021Q_PCP0] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 3, + }, + [VCAP_KF_8021Q_DEI0] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_8021Q_VID0] = { + .type = VCAP_FIELD_U32, + .offset = 95, + .width = 12, + }, + [VCAP_KF_8021Q_TPID1] = { + .type = VCAP_FIELD_U32, + .offset = 107, + .width = 3, + }, + [VCAP_KF_8021Q_PCP1] = { + .type = VCAP_FIELD_U32, + .offset = 110, + .width = 3, + }, + [VCAP_KF_8021Q_DEI1] = { + .type = VCAP_FIELD_BIT, + .offset = 113, + .width = 1, + }, + [VCAP_KF_8021Q_VID1] = { + .type = VCAP_FIELD_U32, + .offset = 114, + .width = 12, + }, + [VCAP_KF_8021Q_TPID2] = { + .type = VCAP_FIELD_U32, + .offset = 126, + .width = 3, + }, + [VCAP_KF_8021Q_PCP2] = { + .type = VCAP_FIELD_U32, + .offset = 129, + .width = 3, + }, + [VCAP_KF_8021Q_DEI2] = { + .type = VCAP_FIELD_BIT, + .offset = 132, + .width = 1, + }, + [VCAP_KF_8021Q_VID2] = { + .type = VCAP_FIELD_U32, + .offset = 133, + .width = 12, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 145, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 193, + .width = 48, + }, + [VCAP_KF_IP_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 241, + .width = 1, + }, + [VCAP_KF_ETYPE_LEN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 242, + .width = 1, + }, + [VCAP_KF_ETYPE] = { + .type = VCAP_FIELD_U32, + .offset = 243, + .width = 16, + }, + [VCAP_KF_IP_SNAP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 259, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 260, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 261, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 263, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 264, + .width = 1, + }, + [VCAP_KF_L3_DSCP] = { + .type = VCAP_FIELD_U32, + .offset = 265, + .width = 6, + }, + [VCAP_KF_L3_IP6_DIP] = { + .type = VCAP_FIELD_U128, + .offset = 271, + .width = 128, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 399, + .width = 128, + }, + [VCAP_KF_TCP_UDP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 527, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 528, + .width = 1, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 529, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 545, + .width = 8, + }, +}; + +static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_LOOKUP_GEN_IDX_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 2, + }, + [VCAP_KF_LOOKUP_GEN_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 12, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 19, + .width = 65, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 84, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGS] = { + .type = VCAP_FIELD_U32, + .offset = 86, + .width = 3, + }, + [VCAP_KF_8021Q_TPID0] = { + .type = VCAP_FIELD_U32, + .offset = 89, + .width = 3, + }, + [VCAP_KF_8021Q_PCP0] = { + .type = VCAP_FIELD_U32, + .offset = 92, + .width = 3, + }, + [VCAP_KF_8021Q_DEI0] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_8021Q_VID0] = { + .type = VCAP_FIELD_U32, + .offset = 96, + .width = 12, + }, + [VCAP_KF_8021Q_TPID1] = { + .type = VCAP_FIELD_U32, + .offset = 108, + .width = 3, + }, + [VCAP_KF_8021Q_PCP1] = { + .type = VCAP_FIELD_U32, + .offset = 111, + .width = 3, + }, + [VCAP_KF_8021Q_DEI1] = { + .type = VCAP_FIELD_BIT, + .offset = 114, + .width = 1, + }, + [VCAP_KF_8021Q_VID1] = { + .type = VCAP_FIELD_U32, + .offset = 115, + .width = 12, + }, + [VCAP_KF_8021Q_TPID2] = { + .type = VCAP_FIELD_U32, + .offset = 127, + .width = 3, + }, + [VCAP_KF_8021Q_PCP2] = { + .type = VCAP_FIELD_U32, + .offset = 130, + .width = 3, + }, + [VCAP_KF_8021Q_DEI2] = { + .type = VCAP_FIELD_BIT, + .offset = 133, + .width = 1, + }, + [VCAP_KF_8021Q_VID2] = { + .type = VCAP_FIELD_U32, + .offset = 134, + .width = 12, + }, + [VCAP_KF_IP_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 146, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 147, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 148, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 150, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 151, + .width = 1, + }, + [VCAP_KF_L3_DSCP] = { + .type = VCAP_FIELD_U32, + .offset = 152, + .width = 6, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 158, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 190, + .width = 32, + }, + [VCAP_KF_L3_IP_PROTO] = { + .type = VCAP_FIELD_U32, + .offset = 222, + .width = 8, + }, + [VCAP_KF_TCP_UDP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 230, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 231, + .width = 1, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 232, + .width = 8, + }, + [VCAP_KF_IP_PAYLOAD_5TUPLE] = { + .type = VCAP_FIELD_U32, + .offset = 240, + .width = 32, + }, +}; + +static const struct vcap_field is0_pure_5tuple_ip4_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_LOOKUP_GEN_IDX_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 2, + }, + [VCAP_KF_LOOKUP_GEN_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 12, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 19, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 20, + .width = 1, + }, + [VCAP_KF_L3_DSCP] = { + .type = VCAP_FIELD_U32, + .offset = 21, + .width = 6, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 27, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 59, + .width = 32, + }, + [VCAP_KF_L3_IP_PROTO] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 8, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 99, + .width = 8, + }, + [VCAP_KF_IP_PAYLOAD_5TUPLE] = { + .type = VCAP_FIELD_U32, + .offset = 107, + .width = 32, + }, +}; + +static const struct vcap_field is0_etag_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 7, + }, + [VCAP_KF_8021BR_E_TAGGED] = { + .type = VCAP_FIELD_BIT, + .offset = 10, + .width = 1, + }, + [VCAP_KF_8021BR_GRP] = { + .type = VCAP_FIELD_U32, + .offset = 11, + .width = 2, + }, + [VCAP_KF_8021BR_ECID_EXT] = { + .type = VCAP_FIELD_U32, + .offset = 13, + .width = 8, + }, + [VCAP_KF_8021BR_ECID_BASE] = { + .type = VCAP_FIELD_U32, + .offset = 21, + .width = 12, + }, + [VCAP_KF_8021BR_IGR_ECID_EXT] = { + .type = VCAP_FIELD_U32, + .offset = 33, + .width = 8, + }, + [VCAP_KF_8021BR_IGR_ECID_BASE] = { + .type = VCAP_FIELD_U32, + .offset = 41, + .width = 12, + }, +}; + +static const struct vcap_field is2_mac_etype_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_SMAC_SIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 86, + .width = 1, + }, + [VCAP_KF_L3_DMAC_DIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 87, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 89, + .width = 1, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 90, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 138, + .width = 48, + }, + [VCAP_KF_ETYPE_LEN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 186, + .width = 1, + }, + [VCAP_KF_ETYPE] = { + .type = VCAP_FIELD_U32, + .offset = 187, + .width = 16, + }, + [VCAP_KF_L2_PAYLOAD_ETYPE] = { + .type = VCAP_FIELD_U64, + .offset = 203, + .width = 64, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 267, + .width = 16, + }, + [VCAP_KF_OAM_CCM_CNTS_EQ0] = { + .type = VCAP_FIELD_BIT, + .offset = 283, + .width = 1, + }, + [VCAP_KF_OAM_Y1731_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 284, + .width = 1, + }, +}; + +static const struct vcap_field is2_arp_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 86, + .width = 48, + }, + [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 134, + .width = 1, + }, + [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 135, + .width = 1, + }, + [VCAP_KF_ARP_LEN_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 136, + .width = 1, + }, + [VCAP_KF_ARP_TGT_MATCH_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 137, + .width = 1, + }, + [VCAP_KF_ARP_SENDER_MATCH_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 138, + .width = 1, + }, + [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 139, + .width = 1, + }, + [VCAP_KF_ARP_OPCODE] = { + .type = VCAP_FIELD_U32, + .offset = 140, + .width = 2, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 142, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 174, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 206, + .width = 1, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 207, + .width = 16, + }, +}; + +static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_SMAC_SIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 86, + .width = 1, + }, + [VCAP_KF_L3_DMAC_DIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 87, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 89, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 93, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 96, + .width = 8, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 104, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 136, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 168, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 169, + .width = 1, + }, + [VCAP_KF_L4_DPORT] = { + .type = VCAP_FIELD_U32, + .offset = 170, + .width = 16, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 186, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 202, + .width = 16, + }, + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 218, + .width = 1, + }, + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 219, + .width = 1, + }, + [VCAP_KF_L4_FIN] = { + .type = VCAP_FIELD_BIT, + .offset = 220, + .width = 1, + }, + [VCAP_KF_L4_SYN] = { + .type = VCAP_FIELD_BIT, + .offset = 221, + .width = 1, + }, + [VCAP_KF_L4_RST] = { + .type = VCAP_FIELD_BIT, + .offset = 222, + .width = 1, + }, + [VCAP_KF_L4_PSH] = { + .type = VCAP_FIELD_BIT, + .offset = 223, + .width = 1, + }, + [VCAP_KF_L4_ACK] = { + .type = VCAP_FIELD_BIT, + .offset = 224, + .width = 1, + }, + [VCAP_KF_L4_URG] = { + .type = VCAP_FIELD_BIT, + .offset = 225, + .width = 1, + }, + [VCAP_KF_L4_PAYLOAD] = { + .type = VCAP_FIELD_U64, + .offset = 226, + .width = 64, + }, +}; + +static const struct vcap_field is2_ip4_other_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_SMAC_SIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 86, + .width = 1, + }, + [VCAP_KF_L3_DMAC_DIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 87, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 89, + .width = 1, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 2, + }, + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = { + .type = VCAP_FIELD_BIT, + .offset = 93, + .width = 1, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 96, + .width = 8, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 104, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 136, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 168, + .width = 1, + }, + [VCAP_KF_L3_IP_PROTO] = { + .type = VCAP_FIELD_U32, + .offset = 169, + .width = 8, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 177, + .width = 16, + }, + [VCAP_KF_L3_PAYLOAD] = { + .type = VCAP_FIELD_U112, + .offset = 193, + .width = 96, + }, +}; + +static const struct vcap_field is2_ip6_std_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 18, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 32, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 52, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 53, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 54, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 56, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 81, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 82, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_L3_SMAC_SIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 86, + .width = 1, + }, + [VCAP_KF_L3_DMAC_DIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 87, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 88, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 89, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 91, + .width = 128, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 219, + .width = 1, + }, + [VCAP_KF_L3_IP_PROTO] = { + .type = VCAP_FIELD_U32, + .offset = 220, + .width = 8, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 228, + .width = 16, + }, + [VCAP_KF_L3_PAYLOAD] = { + .type = VCAP_FIELD_U48, + .offset = 244, + .width = 40, + }, +}; + +static const struct vcap_field is2_ip_7tuple_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 2, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 2, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 3, + .width = 8, + }, + [VCAP_KF_IF_IGR_PORT_MASK_L3] = { + .type = VCAP_FIELD_BIT, + .offset = 11, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 12, + .width = 4, + }, + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 2, + }, + [VCAP_KF_IF_IGR_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 18, + .width = 65, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 83, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 84, + .width = 1, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 86, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 87, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 99, + .width = 13, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 112, + .width = 1, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 113, + .width = 3, + }, + [VCAP_KF_L2_FWD_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 116, + .width = 1, + }, + [VCAP_KF_L3_SMAC_SIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 117, + .width = 1, + }, + [VCAP_KF_L3_DMAC_DIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 118, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 119, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 120, + .width = 1, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 121, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 169, + .width = 48, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 217, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 218, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 219, + .width = 8, + }, + [VCAP_KF_L3_IP6_DIP] = { + .type = VCAP_FIELD_U128, + .offset = 227, + .width = 128, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 355, + .width = 128, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 483, + .width = 1, + }, + [VCAP_KF_TCP_UDP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 484, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 485, + .width = 1, + }, + [VCAP_KF_L4_DPORT] = { + .type = VCAP_FIELD_U32, + .offset = 486, + .width = 16, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 502, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 518, + .width = 16, + }, + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 534, + .width = 1, + }, + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 535, + .width = 1, + }, + [VCAP_KF_L4_FIN] = { + .type = VCAP_FIELD_BIT, + .offset = 536, + .width = 1, + }, + [VCAP_KF_L4_SYN] = { + .type = VCAP_FIELD_BIT, + .offset = 537, + .width = 1, + }, + [VCAP_KF_L4_RST] = { + .type = VCAP_FIELD_BIT, + .offset = 538, + .width = 1, + }, + [VCAP_KF_L4_PSH] = { + .type = VCAP_FIELD_BIT, + .offset = 539, + .width = 1, + }, + [VCAP_KF_L4_ACK] = { + .type = VCAP_FIELD_BIT, + .offset = 540, + .width = 1, + }, + [VCAP_KF_L4_URG] = { + .type = VCAP_FIELD_BIT, + .offset = 541, + .width = 1, + }, + [VCAP_KF_L4_PAYLOAD] = { + .type = VCAP_FIELD_U64, + .offset = 542, + .width = 64, + }, +}; + +static const struct vcap_field is2_ip6_vid_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 4, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 4, + .width = 1, + }, + [VCAP_KF_LOOKUP_PAG] = { + .type = VCAP_FIELD_U32, + .offset = 5, + .width = 8, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 12, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 26, + .width = 13, + }, + [VCAP_KF_L3_SMAC_SIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 39, + .width = 1, + }, + [VCAP_KF_L3_DMAC_DIP_MATCH] = { + .type = VCAP_FIELD_BIT, + .offset = 40, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 41, + .width = 1, + }, + [VCAP_KF_L3_DST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 42, + .width = 1, + }, + [VCAP_KF_L3_IP6_DIP] = { + .type = VCAP_FIELD_U128, + .offset = 43, + .width = 128, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 171, + .width = 128, + }, +}; + +static const struct vcap_field es2_mac_etype_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 3, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 3, + .width = 1, + }, + [VCAP_KF_ACL_GRP_ID] = { + .type = VCAP_FIELD_U32, + .offset = 4, + .width = 8, + }, + [VCAP_KF_PROT_ACTIVE] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 28, + .width = 1, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 13, + }, + [VCAP_KF_IF_EGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 42, + .width = 3, + }, + [VCAP_KF_IF_EGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 45, + .width = 32, + }, + [VCAP_KF_IF_IGR_PORT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 77, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 78, + .width = 9, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 87, + .width = 3, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_COSID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 3, + }, + [VCAP_KF_L3_DPL_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_ES0_ISDX_KEY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 96, + .width = 1, + }, + [VCAP_KF_MIRROR_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 97, + .width = 2, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 99, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 147, + .width = 48, + }, + [VCAP_KF_ETYPE_LEN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 195, + .width = 1, + }, + [VCAP_KF_ETYPE] = { + .type = VCAP_FIELD_U32, + .offset = 196, + .width = 16, + }, + [VCAP_KF_L2_PAYLOAD_ETYPE] = { + .type = VCAP_FIELD_U64, + .offset = 212, + .width = 64, + }, + [VCAP_KF_OAM_CCM_CNTS_EQ0] = { + .type = VCAP_FIELD_BIT, + .offset = 276, + .width = 1, + }, + [VCAP_KF_OAM_Y1731_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 277, + .width = 1, + }, +}; + +static const struct vcap_field es2_arp_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 3, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 3, + .width = 1, + }, + [VCAP_KF_ACL_GRP_ID] = { + .type = VCAP_FIELD_U32, + .offset = 4, + .width = 8, + }, + [VCAP_KF_PROT_ACTIVE] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 28, + .width = 1, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 13, + }, + [VCAP_KF_IF_EGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 42, + .width = 3, + }, + [VCAP_KF_IF_EGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 45, + .width = 32, + }, + [VCAP_KF_IF_IGR_PORT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 77, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 78, + .width = 9, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 87, + .width = 3, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_COSID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 3, + }, + [VCAP_KF_L3_DPL_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_ES0_ISDX_KEY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_MIRROR_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 96, + .width = 2, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 98, + .width = 48, + }, + [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 146, + .width = 1, + }, + [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 147, + .width = 1, + }, + [VCAP_KF_ARP_LEN_OK_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 148, + .width = 1, + }, + [VCAP_KF_ARP_TGT_MATCH_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 149, + .width = 1, + }, + [VCAP_KF_ARP_SENDER_MATCH_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 150, + .width = 1, + }, + [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 151, + .width = 1, + }, + [VCAP_KF_ARP_OPCODE] = { + .type = VCAP_FIELD_U32, + .offset = 152, + .width = 2, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 154, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 186, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 218, + .width = 1, + }, +}; + +static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 3, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 3, + .width = 1, + }, + [VCAP_KF_ACL_GRP_ID] = { + .type = VCAP_FIELD_U32, + .offset = 4, + .width = 8, + }, + [VCAP_KF_PROT_ACTIVE] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 28, + .width = 1, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 13, + }, + [VCAP_KF_IF_EGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 42, + .width = 3, + }, + [VCAP_KF_IF_EGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 45, + .width = 32, + }, + [VCAP_KF_IF_IGR_PORT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 77, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 78, + .width = 9, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 87, + .width = 3, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_COSID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 3, + }, + [VCAP_KF_L3_DPL_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_ES0_ISDX_KEY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 96, + .width = 1, + }, + [VCAP_KF_MIRROR_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 97, + .width = 2, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 99, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 100, + .width = 2, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 102, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 103, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 104, + .width = 8, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 112, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 144, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 176, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 177, + .width = 1, + }, + [VCAP_KF_L4_DPORT] = { + .type = VCAP_FIELD_U32, + .offset = 178, + .width = 16, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 194, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 210, + .width = 16, + }, + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 226, + .width = 1, + }, + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 227, + .width = 1, + }, + [VCAP_KF_L4_FIN] = { + .type = VCAP_FIELD_BIT, + .offset = 228, + .width = 1, + }, + [VCAP_KF_L4_SYN] = { + .type = VCAP_FIELD_BIT, + .offset = 229, + .width = 1, + }, + [VCAP_KF_L4_RST] = { + .type = VCAP_FIELD_BIT, + .offset = 230, + .width = 1, + }, + [VCAP_KF_L4_PSH] = { + .type = VCAP_FIELD_BIT, + .offset = 231, + .width = 1, + }, + [VCAP_KF_L4_ACK] = { + .type = VCAP_FIELD_BIT, + .offset = 232, + .width = 1, + }, + [VCAP_KF_L4_URG] = { + .type = VCAP_FIELD_BIT, + .offset = 233, + .width = 1, + }, + [VCAP_KF_L4_PAYLOAD] = { + .type = VCAP_FIELD_U64, + .offset = 234, + .width = 64, + }, +}; + +static const struct vcap_field es2_ip4_other_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 3, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 3, + .width = 1, + }, + [VCAP_KF_ACL_GRP_ID] = { + .type = VCAP_FIELD_U32, + .offset = 4, + .width = 8, + }, + [VCAP_KF_PROT_ACTIVE] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 28, + .width = 1, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 13, + }, + [VCAP_KF_IF_EGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 42, + .width = 3, + }, + [VCAP_KF_IF_EGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 45, + .width = 32, + }, + [VCAP_KF_IF_IGR_PORT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 77, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 78, + .width = 9, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 87, + .width = 3, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 90, + .width = 1, + }, + [VCAP_KF_COSID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 91, + .width = 3, + }, + [VCAP_KF_L3_DPL_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_KF_ES0_ISDX_KEY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 96, + .width = 1, + }, + [VCAP_KF_MIRROR_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 97, + .width = 2, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 99, + .width = 1, + }, + [VCAP_KF_L3_FRAGMENT_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 100, + .width = 2, + }, + [VCAP_KF_L3_OPTIONS_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 102, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 103, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 104, + .width = 8, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 112, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 144, + .width = 32, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 176, + .width = 1, + }, + [VCAP_KF_L3_IP_PROTO] = { + .type = VCAP_FIELD_U32, + .offset = 177, + .width = 8, + }, + [VCAP_KF_L3_PAYLOAD] = { + .type = VCAP_FIELD_U112, + .offset = 185, + .width = 96, + }, +}; + +static const struct vcap_field es2_ip_7tuple_keyfield[] = { + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_KF_ACL_GRP_ID] = { + .type = VCAP_FIELD_U32, + .offset = 1, + .width = 8, + }, + [VCAP_KF_PROT_ACTIVE] = { + .type = VCAP_FIELD_BIT, + .offset = 9, + .width = 1, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 10, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 11, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 13, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 25, + .width = 1, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 26, + .width = 13, + }, + [VCAP_KF_IF_EGR_PORT_MASK_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 39, + .width = 3, + }, + [VCAP_KF_IF_EGR_PORT_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 42, + .width = 32, + }, + [VCAP_KF_IF_IGR_PORT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 74, + .width = 1, + }, + [VCAP_KF_IF_IGR_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 75, + .width = 9, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 84, + .width = 3, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 87, + .width = 1, + }, + [VCAP_KF_COSID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 88, + .width = 3, + }, + [VCAP_KF_L3_DPL_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 91, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 92, + .width = 1, + }, + [VCAP_KF_ES0_ISDX_KEY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 93, + .width = 1, + }, + [VCAP_KF_MIRROR_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 94, + .width = 2, + }, + [VCAP_KF_L2_DMAC] = { + .type = VCAP_FIELD_U48, + .offset = 96, + .width = 48, + }, + [VCAP_KF_L2_SMAC] = { + .type = VCAP_FIELD_U48, + .offset = 144, + .width = 48, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 192, + .width = 1, + }, + [VCAP_KF_L3_TTL_GT0] = { + .type = VCAP_FIELD_BIT, + .offset = 193, + .width = 1, + }, + [VCAP_KF_L3_TOS] = { + .type = VCAP_FIELD_U32, + .offset = 194, + .width = 8, + }, + [VCAP_KF_L3_IP6_DIP] = { + .type = VCAP_FIELD_U128, + .offset = 202, + .width = 128, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 330, + .width = 128, + }, + [VCAP_KF_L3_DIP_EQ_SIP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 458, + .width = 1, + }, + [VCAP_KF_TCP_UDP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 459, + .width = 1, + }, + [VCAP_KF_TCP_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 460, + .width = 1, + }, + [VCAP_KF_L4_DPORT] = { + .type = VCAP_FIELD_U32, + .offset = 461, + .width = 16, + }, + [VCAP_KF_L4_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 477, + .width = 16, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 493, + .width = 16, + }, + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 509, + .width = 1, + }, + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 510, + .width = 1, + }, + [VCAP_KF_L4_FIN] = { + .type = VCAP_FIELD_BIT, + .offset = 511, + .width = 1, + }, + [VCAP_KF_L4_SYN] = { + .type = VCAP_FIELD_BIT, + .offset = 512, + .width = 1, + }, + [VCAP_KF_L4_RST] = { + .type = VCAP_FIELD_BIT, + .offset = 513, + .width = 1, + }, + [VCAP_KF_L4_PSH] = { + .type = VCAP_FIELD_BIT, + .offset = 514, + .width = 1, + }, + [VCAP_KF_L4_ACK] = { + .type = VCAP_FIELD_BIT, + .offset = 515, + .width = 1, + }, + [VCAP_KF_L4_URG] = { + .type = VCAP_FIELD_BIT, + .offset = 516, + .width = 1, + }, + [VCAP_KF_L4_PAYLOAD] = { + .type = VCAP_FIELD_U64, + .offset = 517, + .width = 64, + }, +}; + +static const struct vcap_field es2_ip4_vid_keyfield[] = { + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_KF_ACL_GRP_ID] = { + .type = VCAP_FIELD_U32, + .offset = 1, + .width = 8, + }, + [VCAP_KF_PROT_ACTIVE] = { + .type = VCAP_FIELD_BIT, + .offset = 9, + .width = 1, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 10, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 11, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 13, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 25, + .width = 1, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 26, + .width = 13, + }, + [VCAP_KF_8021Q_PCP_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 39, + .width = 3, + }, + [VCAP_KF_8021Q_DEI_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 42, + .width = 1, + }, + [VCAP_KF_COSID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 43, + .width = 3, + }, + [VCAP_KF_L3_DPL_CLS] = { + .type = VCAP_FIELD_BIT, + .offset = 46, + .width = 1, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 47, + .width = 1, + }, + [VCAP_KF_ES0_ISDX_KEY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 48, + .width = 1, + }, + [VCAP_KF_MIRROR_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 49, + .width = 2, + }, + [VCAP_KF_IP4_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 51, + .width = 1, + }, + [VCAP_KF_L3_IP4_DIP] = { + .type = VCAP_FIELD_U32, + .offset = 52, + .width = 32, + }, + [VCAP_KF_L3_IP4_SIP] = { + .type = VCAP_FIELD_U32, + .offset = 84, + .width = 32, + }, + [VCAP_KF_L4_RNG] = { + .type = VCAP_FIELD_U32, + .offset = 116, + .width = 16, + }, +}; + +static const struct vcap_field es2_ip6_vid_keyfield[] = { + [VCAP_KF_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 0, + .width = 3, + }, + [VCAP_KF_LOOKUP_FIRST_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 3, + .width = 1, + }, + [VCAP_KF_ACL_GRP_ID] = { + .type = VCAP_FIELD_U32, + .offset = 4, + .width = 8, + }, + [VCAP_KF_PROT_ACTIVE] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_KF_L2_MC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_KF_L2_BC_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_KF_ISDX_GT0_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_KF_ISDX_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 12, + }, + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 28, + .width = 1, + }, + [VCAP_KF_8021Q_VID_CLS] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 13, + }, + [VCAP_KF_L3_RT_IS] = { + .type = VCAP_FIELD_BIT, + .offset = 42, + .width = 1, + }, + [VCAP_KF_L3_IP6_DIP] = { + .type = VCAP_FIELD_U128, + .offset = 43, + .width = 128, + }, + [VCAP_KF_L3_IP6_SIP] = { + .type = VCAP_FIELD_U128, + .offset = 171, + .width = 128, + }, +}; + +/* keyfield_set */ +static const struct vcap_set is0_keyfield_set[] = { + [VCAP_KFS_MLL] = { + .type_id = 0, + .sw_per_item = 3, + .sw_cnt = 4, + }, + [VCAP_KFS_TRI_VID] = { + .type_id = 0, + .sw_per_item = 2, + .sw_cnt = 6, + }, + [VCAP_KFS_LL_FULL] = { + .type_id = 0, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_NORMAL] = { + .type_id = 1, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_NORMAL_7TUPLE] = { + .type_id = 0, + .sw_per_item = 12, + .sw_cnt = 1, + }, + [VCAP_KFS_NORMAL_5TUPLE_IP4] = { + .type_id = 2, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_PURE_5TUPLE_IP4] = { + .type_id = 2, + .sw_per_item = 3, + .sw_cnt = 4, + }, + [VCAP_KFS_ETAG] = { + .type_id = 3, + .sw_per_item = 2, + .sw_cnt = 6, + }, +}; + +static const struct vcap_set is2_keyfield_set[] = { + [VCAP_KFS_MAC_ETYPE] = { + .type_id = 0, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_ARP] = { + .type_id = 3, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP4_TCP_UDP] = { + .type_id = 4, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP4_OTHER] = { + .type_id = 5, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP6_STD] = { + .type_id = 6, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP_7TUPLE] = { + .type_id = 1, + .sw_per_item = 12, + .sw_cnt = 1, + }, + [VCAP_KFS_IP6_VID] = { + .type_id = 9, + .sw_per_item = 6, + .sw_cnt = 2, + }, +}; + +static const struct vcap_set es2_keyfield_set[] = { + [VCAP_KFS_MAC_ETYPE] = { + .type_id = 0, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_ARP] = { + .type_id = 1, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP4_TCP_UDP] = { + .type_id = 2, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP4_OTHER] = { + .type_id = 3, + .sw_per_item = 6, + .sw_cnt = 2, + }, + [VCAP_KFS_IP_7TUPLE] = { + .type_id = -1, + .sw_per_item = 12, + .sw_cnt = 1, + }, + [VCAP_KFS_IP4_VID] = { + .type_id = -1, + .sw_per_item = 3, + .sw_cnt = 4, + }, + [VCAP_KFS_IP6_VID] = { + .type_id = 5, + .sw_per_item = 6, + .sw_cnt = 2, + }, +}; + +/* keyfield_set map */ +static const struct vcap_field *is0_keyfield_set_map[] = { + [VCAP_KFS_MLL] = is0_mll_keyfield, + [VCAP_KFS_TRI_VID] = is0_tri_vid_keyfield, + [VCAP_KFS_LL_FULL] = is0_ll_full_keyfield, + [VCAP_KFS_NORMAL] = is0_normal_keyfield, + [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield, + [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield, + [VCAP_KFS_PURE_5TUPLE_IP4] = is0_pure_5tuple_ip4_keyfield, + [VCAP_KFS_ETAG] = is0_etag_keyfield, +}; + +static const struct vcap_field *is2_keyfield_set_map[] = { + [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield, + [VCAP_KFS_ARP] = is2_arp_keyfield, + [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield, + [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield, + [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield, + [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield, + [VCAP_KFS_IP6_VID] = is2_ip6_vid_keyfield, +}; + +static const struct vcap_field *es2_keyfield_set_map[] = { + [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield, + [VCAP_KFS_ARP] = es2_arp_keyfield, + [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield, + [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield, + [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield, + [VCAP_KFS_IP4_VID] = es2_ip4_vid_keyfield, + [VCAP_KFS_IP6_VID] = es2_ip6_vid_keyfield, +}; + +/* keyfield_set map sizes */ +static int is0_keyfield_set_map_size[] = { + [VCAP_KFS_MLL] = ARRAY_SIZE(is0_mll_keyfield), + [VCAP_KFS_TRI_VID] = ARRAY_SIZE(is0_tri_vid_keyfield), + [VCAP_KFS_LL_FULL] = ARRAY_SIZE(is0_ll_full_keyfield), + [VCAP_KFS_NORMAL] = ARRAY_SIZE(is0_normal_keyfield), + [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield), + [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield), + [VCAP_KFS_PURE_5TUPLE_IP4] = ARRAY_SIZE(is0_pure_5tuple_ip4_keyfield), + [VCAP_KFS_ETAG] = ARRAY_SIZE(is0_etag_keyfield), +}; + +static int is2_keyfield_set_map_size[] = { + [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield), + [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield), + [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield), + [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield), + [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield), + [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield), + [VCAP_KFS_IP6_VID] = ARRAY_SIZE(is2_ip6_vid_keyfield), +}; + +static int es2_keyfield_set_map_size[] = { + [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield), + [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield), + [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield), + [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield), + [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield), + [VCAP_KFS_IP4_VID] = ARRAY_SIZE(es2_ip4_vid_keyfield), + [VCAP_KFS_IP6_VID] = ARRAY_SIZE(es2_ip6_vid_keyfield), +}; + +/* actionfields */ +static const struct vcap_field is0_mlbs_actionfield[] = { + [VCAP_AF_TYPE] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_AF_COSID_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_AF_COSID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 3, + }, + [VCAP_AF_QOS_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 5, + .width = 1, + }, + [VCAP_AF_QOS_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 6, + .width = 3, + }, + [VCAP_AF_DP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 9, + .width = 1, + }, + [VCAP_AF_DP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 2, + }, + [VCAP_AF_MAP_LOOKUP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 12, + .width = 2, + }, + [VCAP_AF_MAP_KEY] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 3, + }, + [VCAP_AF_MAP_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 9, + }, + [VCAP_AF_CLS_VID_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 26, + .width = 3, + }, + [VCAP_AF_GVID_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 3, + }, + [VCAP_AF_VID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 32, + .width = 13, + }, + [VCAP_AF_ISDX_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 45, + .width = 1, + }, + [VCAP_AF_ISDX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 46, + .width = 12, + }, + [VCAP_AF_FWD_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 58, + .width = 1, + }, + [VCAP_AF_CPU_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 59, + .width = 1, + }, + [VCAP_AF_CPU_Q] = { + .type = VCAP_FIELD_U32, + .offset = 60, + .width = 3, + }, + [VCAP_AF_OAM_Y1731_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 63, + .width = 3, + }, + [VCAP_AF_OAM_TWAMP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 66, + .width = 1, + }, + [VCAP_AF_OAM_IP_BFD_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 67, + .width = 1, + }, + [VCAP_AF_TC_LABEL] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 3, + }, + [VCAP_AF_TTL_LABEL] = { + .type = VCAP_FIELD_U32, + .offset = 71, + .width = 3, + }, + [VCAP_AF_NUM_VLD_LABELS] = { + .type = VCAP_FIELD_U32, + .offset = 74, + .width = 2, + }, + [VCAP_AF_FWD_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 76, + .width = 3, + }, + [VCAP_AF_MPLS_OAM_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 79, + .width = 3, + }, + [VCAP_AF_MPLS_MEP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 82, + .width = 1, + }, + [VCAP_AF_MPLS_MIP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 83, + .width = 1, + }, + [VCAP_AF_MPLS_OAM_FLAVOR] = { + .type = VCAP_FIELD_BIT, + .offset = 84, + .width = 1, + }, + [VCAP_AF_MPLS_IP_CTRL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 85, + .width = 1, + }, + [VCAP_AF_PAG_OVERRIDE_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 86, + .width = 8, + }, + [VCAP_AF_PAG_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 94, + .width = 8, + }, + [VCAP_AF_S2_KEY_SEL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 102, + .width = 1, + }, + [VCAP_AF_S2_KEY_SEL_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 103, + .width = 6, + }, + [VCAP_AF_PIPELINE_FORCE_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 109, + .width = 2, + }, + [VCAP_AF_PIPELINE_ACT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 111, + .width = 1, + }, + [VCAP_AF_PIPELINE_PT] = { + .type = VCAP_FIELD_U32, + .offset = 112, + .width = 5, + }, + [VCAP_AF_NXT_KEY_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 117, + .width = 5, + }, + [VCAP_AF_NXT_NORM_W16_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 122, + .width = 5, + }, + [VCAP_AF_NXT_OFFSET_FROM_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 127, + .width = 2, + }, + [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 129, + .width = 2, + }, + [VCAP_AF_NXT_NORMALIZE] = { + .type = VCAP_FIELD_BIT, + .offset = 131, + .width = 1, + }, + [VCAP_AF_NXT_IDX_CTRL] = { + .type = VCAP_FIELD_U32, + .offset = 132, + .width = 3, + }, + [VCAP_AF_NXT_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 135, + .width = 12, + }, +}; + +static const struct vcap_field is0_mlbs_reduced_actionfield[] = { + [VCAP_AF_TYPE] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_AF_COSID_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_AF_COSID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 3, + }, + [VCAP_AF_QOS_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 5, + .width = 1, + }, + [VCAP_AF_QOS_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 6, + .width = 3, + }, + [VCAP_AF_DP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 9, + .width = 1, + }, + [VCAP_AF_DP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 2, + }, + [VCAP_AF_MAP_LOOKUP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 12, + .width = 2, + }, + [VCAP_AF_ISDX_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_AF_ISDX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 15, + .width = 12, + }, + [VCAP_AF_FWD_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 27, + .width = 1, + }, + [VCAP_AF_CPU_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 28, + .width = 1, + }, + [VCAP_AF_CPU_Q] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 3, + }, + [VCAP_AF_TC_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 32, + .width = 1, + }, + [VCAP_AF_TTL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 33, + .width = 1, + }, + [VCAP_AF_FWD_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 34, + .width = 3, + }, + [VCAP_AF_MPLS_OAM_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 37, + .width = 3, + }, + [VCAP_AF_MPLS_MEP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 40, + .width = 1, + }, + [VCAP_AF_MPLS_MIP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 41, + .width = 1, + }, + [VCAP_AF_MPLS_OAM_FLAVOR] = { + .type = VCAP_FIELD_BIT, + .offset = 42, + .width = 1, + }, + [VCAP_AF_MPLS_IP_CTRL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 43, + .width = 1, + }, + [VCAP_AF_PIPELINE_FORCE_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 44, + .width = 2, + }, + [VCAP_AF_PIPELINE_ACT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 46, + .width = 1, + }, + [VCAP_AF_PIPELINE_PT_REDUCED] = { + .type = VCAP_FIELD_U32, + .offset = 47, + .width = 3, + }, + [VCAP_AF_NXT_KEY_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 50, + .width = 5, + }, + [VCAP_AF_NXT_NORM_W32_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 55, + .width = 2, + }, + [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 57, + .width = 2, + }, + [VCAP_AF_NXT_NORMALIZE] = { + .type = VCAP_FIELD_BIT, + .offset = 59, + .width = 1, + }, + [VCAP_AF_NXT_IDX_CTRL] = { + .type = VCAP_FIELD_U32, + .offset = 60, + .width = 3, + }, + [VCAP_AF_NXT_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 63, + .width = 12, + }, +}; + +static const struct vcap_field is0_classification_actionfield[] = { + [VCAP_AF_TYPE] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_AF_DSCP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_AF_DSCP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 6, + }, + [VCAP_AF_COSID_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 8, + .width = 1, + }, + [VCAP_AF_COSID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 9, + .width = 3, + }, + [VCAP_AF_QOS_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 12, + .width = 1, + }, + [VCAP_AF_QOS_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 13, + .width = 3, + }, + [VCAP_AF_DP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 16, + .width = 1, + }, + [VCAP_AF_DP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 2, + }, + [VCAP_AF_DEI_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 19, + .width = 1, + }, + [VCAP_AF_DEI_VAL] = { + .type = VCAP_FIELD_BIT, + .offset = 20, + .width = 1, + }, + [VCAP_AF_PCP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 21, + .width = 1, + }, + [VCAP_AF_PCP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 22, + .width = 3, + }, + [VCAP_AF_MAP_LOOKUP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 25, + .width = 2, + }, + [VCAP_AF_MAP_KEY] = { + .type = VCAP_FIELD_U32, + .offset = 27, + .width = 3, + }, + [VCAP_AF_MAP_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 30, + .width = 9, + }, + [VCAP_AF_CLS_VID_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 39, + .width = 3, + }, + [VCAP_AF_GVID_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 42, + .width = 3, + }, + [VCAP_AF_VID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 45, + .width = 13, + }, + [VCAP_AF_VLAN_POP_CNT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 58, + .width = 1, + }, + [VCAP_AF_VLAN_POP_CNT] = { + .type = VCAP_FIELD_U32, + .offset = 59, + .width = 2, + }, + [VCAP_AF_VLAN_PUSH_CNT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 61, + .width = 1, + }, + [VCAP_AF_VLAN_PUSH_CNT] = { + .type = VCAP_FIELD_U32, + .offset = 62, + .width = 2, + }, + [VCAP_AF_TPID_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 64, + .width = 2, + }, + [VCAP_AF_VLAN_WAS_TAGGED] = { + .type = VCAP_FIELD_U32, + .offset = 66, + .width = 2, + }, + [VCAP_AF_ISDX_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 68, + .width = 1, + }, + [VCAP_AF_ISDX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 69, + .width = 12, + }, + [VCAP_AF_RT_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 81, + .width = 2, + }, + [VCAP_AF_LPM_AFFIX_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 83, + .width = 1, + }, + [VCAP_AF_LPM_AFFIX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 84, + .width = 10, + }, + [VCAP_AF_RLEG_DMAC_CHK_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 94, + .width = 1, + }, + [VCAP_AF_TTL_DECR_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 95, + .width = 1, + }, + [VCAP_AF_L3_MAC_UPDATE_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 96, + .width = 1, + }, + [VCAP_AF_FWD_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 97, + .width = 1, + }, + [VCAP_AF_CPU_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 98, + .width = 1, + }, + [VCAP_AF_CPU_Q] = { + .type = VCAP_FIELD_U32, + .offset = 99, + .width = 3, + }, + [VCAP_AF_MIP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 102, + .width = 2, + }, + [VCAP_AF_OAM_Y1731_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 104, + .width = 3, + }, + [VCAP_AF_OAM_TWAMP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 107, + .width = 1, + }, + [VCAP_AF_OAM_IP_BFD_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 108, + .width = 1, + }, + [VCAP_AF_PAG_OVERRIDE_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 109, + .width = 8, + }, + [VCAP_AF_PAG_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 117, + .width = 8, + }, + [VCAP_AF_S2_KEY_SEL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 125, + .width = 1, + }, + [VCAP_AF_S2_KEY_SEL_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 126, + .width = 6, + }, + [VCAP_AF_INJ_MASQ_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 132, + .width = 1, + }, + [VCAP_AF_INJ_MASQ_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 133, + .width = 7, + }, + [VCAP_AF_LPORT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 140, + .width = 1, + }, + [VCAP_AF_INJ_MASQ_LPORT] = { + .type = VCAP_FIELD_U32, + .offset = 141, + .width = 7, + }, + [VCAP_AF_PIPELINE_FORCE_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 148, + .width = 2, + }, + [VCAP_AF_PIPELINE_ACT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 150, + .width = 1, + }, + [VCAP_AF_PIPELINE_PT] = { + .type = VCAP_FIELD_U32, + .offset = 151, + .width = 5, + }, + [VCAP_AF_NXT_KEY_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 156, + .width = 5, + }, + [VCAP_AF_NXT_NORM_W16_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 161, + .width = 5, + }, + [VCAP_AF_NXT_OFFSET_FROM_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 166, + .width = 2, + }, + [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 168, + .width = 2, + }, + [VCAP_AF_NXT_NORMALIZE] = { + .type = VCAP_FIELD_BIT, + .offset = 170, + .width = 1, + }, + [VCAP_AF_NXT_IDX_CTRL] = { + .type = VCAP_FIELD_U32, + .offset = 171, + .width = 3, + }, + [VCAP_AF_NXT_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 174, + .width = 12, + }, +}; + +static const struct vcap_field is0_full_actionfield[] = { + [VCAP_AF_DSCP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_AF_DSCP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 1, + .width = 6, + }, + [VCAP_AF_COSID_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 7, + .width = 1, + }, + [VCAP_AF_COSID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 8, + .width = 3, + }, + [VCAP_AF_QOS_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 11, + .width = 1, + }, + [VCAP_AF_QOS_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 12, + .width = 3, + }, + [VCAP_AF_DP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_AF_DP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 16, + .width = 2, + }, + [VCAP_AF_DEI_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 18, + .width = 1, + }, + [VCAP_AF_DEI_VAL] = { + .type = VCAP_FIELD_BIT, + .offset = 19, + .width = 1, + }, + [VCAP_AF_PCP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 20, + .width = 1, + }, + [VCAP_AF_PCP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 21, + .width = 3, + }, + [VCAP_AF_MAP_LOOKUP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 24, + .width = 2, + }, + [VCAP_AF_MAP_KEY] = { + .type = VCAP_FIELD_U32, + .offset = 26, + .width = 3, + }, + [VCAP_AF_MAP_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 29, + .width = 9, + }, + [VCAP_AF_CLS_VID_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 38, + .width = 3, + }, + [VCAP_AF_GVID_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 41, + .width = 3, + }, + [VCAP_AF_VID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 44, + .width = 13, + }, + [VCAP_AF_VLAN_POP_CNT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 57, + .width = 1, + }, + [VCAP_AF_VLAN_POP_CNT] = { + .type = VCAP_FIELD_U32, + .offset = 58, + .width = 2, + }, + [VCAP_AF_VLAN_PUSH_CNT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 60, + .width = 1, + }, + [VCAP_AF_VLAN_PUSH_CNT] = { + .type = VCAP_FIELD_U32, + .offset = 61, + .width = 2, + }, + [VCAP_AF_TPID_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 63, + .width = 2, + }, + [VCAP_AF_VLAN_WAS_TAGGED] = { + .type = VCAP_FIELD_U32, + .offset = 65, + .width = 2, + }, + [VCAP_AF_ISDX_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 67, + .width = 1, + }, + [VCAP_AF_ISDX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 68, + .width = 12, + }, + [VCAP_AF_MASK_MODE] = { + .type = VCAP_FIELD_U32, + .offset = 80, + .width = 3, + }, + [VCAP_AF_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 83, + .width = 65, + }, + [VCAP_AF_RT_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 148, + .width = 2, + }, + [VCAP_AF_LPM_AFFIX_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 150, + .width = 1, + }, + [VCAP_AF_LPM_AFFIX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 151, + .width = 10, + }, + [VCAP_AF_RLEG_DMAC_CHK_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 161, + .width = 1, + }, + [VCAP_AF_TTL_DECR_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 162, + .width = 1, + }, + [VCAP_AF_L3_MAC_UPDATE_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 163, + .width = 1, + }, + [VCAP_AF_CPU_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 164, + .width = 1, + }, + [VCAP_AF_CPU_Q] = { + .type = VCAP_FIELD_U32, + .offset = 165, + .width = 3, + }, + [VCAP_AF_MIP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 168, + .width = 2, + }, + [VCAP_AF_OAM_Y1731_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 170, + .width = 3, + }, + [VCAP_AF_OAM_TWAMP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 173, + .width = 1, + }, + [VCAP_AF_OAM_IP_BFD_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 174, + .width = 1, + }, + [VCAP_AF_RSVD_LBL_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 175, + .width = 4, + }, + [VCAP_AF_TC_LABEL] = { + .type = VCAP_FIELD_U32, + .offset = 179, + .width = 3, + }, + [VCAP_AF_TTL_LABEL] = { + .type = VCAP_FIELD_U32, + .offset = 182, + .width = 3, + }, + [VCAP_AF_NUM_VLD_LABELS] = { + .type = VCAP_FIELD_U32, + .offset = 185, + .width = 2, + }, + [VCAP_AF_FWD_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 187, + .width = 3, + }, + [VCAP_AF_MPLS_OAM_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 190, + .width = 3, + }, + [VCAP_AF_MPLS_MEP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 193, + .width = 1, + }, + [VCAP_AF_MPLS_MIP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 194, + .width = 1, + }, + [VCAP_AF_MPLS_OAM_FLAVOR] = { + .type = VCAP_FIELD_BIT, + .offset = 195, + .width = 1, + }, + [VCAP_AF_MPLS_IP_CTRL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 196, + .width = 1, + }, + [VCAP_AF_CUSTOM_ACE_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 197, + .width = 5, + }, + [VCAP_AF_CUSTOM_ACE_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 202, + .width = 2, + }, + [VCAP_AF_PAG_OVERRIDE_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 204, + .width = 8, + }, + [VCAP_AF_PAG_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 212, + .width = 8, + }, + [VCAP_AF_S2_KEY_SEL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 220, + .width = 1, + }, + [VCAP_AF_S2_KEY_SEL_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 221, + .width = 6, + }, + [VCAP_AF_INJ_MASQ_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 227, + .width = 1, + }, + [VCAP_AF_INJ_MASQ_PORT] = { + .type = VCAP_FIELD_U32, + .offset = 228, + .width = 7, + }, + [VCAP_AF_LPORT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 235, + .width = 1, + }, + [VCAP_AF_INJ_MASQ_LPORT] = { + .type = VCAP_FIELD_U32, + .offset = 236, + .width = 7, + }, + [VCAP_AF_MATCH_ID] = { + .type = VCAP_FIELD_U32, + .offset = 243, + .width = 16, + }, + [VCAP_AF_MATCH_ID_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 259, + .width = 16, + }, + [VCAP_AF_PIPELINE_FORCE_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 275, + .width = 2, + }, + [VCAP_AF_PIPELINE_ACT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 277, + .width = 1, + }, + [VCAP_AF_PIPELINE_PT] = { + .type = VCAP_FIELD_U32, + .offset = 278, + .width = 5, + }, + [VCAP_AF_NXT_KEY_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 283, + .width = 5, + }, + [VCAP_AF_NXT_NORM_W16_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 288, + .width = 5, + }, + [VCAP_AF_NXT_OFFSET_FROM_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 293, + .width = 2, + }, + [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 295, + .width = 2, + }, + [VCAP_AF_NXT_NORMALIZE] = { + .type = VCAP_FIELD_BIT, + .offset = 297, + .width = 1, + }, + [VCAP_AF_NXT_IDX_CTRL] = { + .type = VCAP_FIELD_U32, + .offset = 298, + .width = 3, + }, + [VCAP_AF_NXT_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 301, + .width = 12, + }, +}; + +static const struct vcap_field is0_class_reduced_actionfield[] = { + [VCAP_AF_TYPE] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_AF_COSID_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_AF_COSID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 3, + }, + [VCAP_AF_QOS_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 5, + .width = 1, + }, + [VCAP_AF_QOS_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 6, + .width = 3, + }, + [VCAP_AF_DP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 9, + .width = 1, + }, + [VCAP_AF_DP_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 2, + }, + [VCAP_AF_MAP_LOOKUP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 12, + .width = 2, + }, + [VCAP_AF_MAP_KEY] = { + .type = VCAP_FIELD_U32, + .offset = 14, + .width = 3, + }, + [VCAP_AF_CLS_VID_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 3, + }, + [VCAP_AF_GVID_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 3, + }, + [VCAP_AF_VID_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 23, + .width = 13, + }, + [VCAP_AF_VLAN_POP_CNT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 36, + .width = 1, + }, + [VCAP_AF_VLAN_POP_CNT] = { + .type = VCAP_FIELD_U32, + .offset = 37, + .width = 2, + }, + [VCAP_AF_VLAN_PUSH_CNT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 39, + .width = 1, + }, + [VCAP_AF_VLAN_PUSH_CNT] = { + .type = VCAP_FIELD_U32, + .offset = 40, + .width = 2, + }, + [VCAP_AF_TPID_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 42, + .width = 2, + }, + [VCAP_AF_VLAN_WAS_TAGGED] = { + .type = VCAP_FIELD_U32, + .offset = 44, + .width = 2, + }, + [VCAP_AF_ISDX_ADD_REPLACE_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 46, + .width = 1, + }, + [VCAP_AF_ISDX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 47, + .width = 12, + }, + [VCAP_AF_FWD_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 59, + .width = 1, + }, + [VCAP_AF_CPU_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 60, + .width = 1, + }, + [VCAP_AF_CPU_Q] = { + .type = VCAP_FIELD_U32, + .offset = 61, + .width = 3, + }, + [VCAP_AF_MIP_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 64, + .width = 2, + }, + [VCAP_AF_OAM_Y1731_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 66, + .width = 3, + }, + [VCAP_AF_LPORT_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 69, + .width = 1, + }, + [VCAP_AF_INJ_MASQ_LPORT] = { + .type = VCAP_FIELD_U32, + .offset = 70, + .width = 7, + }, + [VCAP_AF_PIPELINE_FORCE_ENA] = { + .type = VCAP_FIELD_U32, + .offset = 77, + .width = 2, + }, + [VCAP_AF_PIPELINE_ACT_SEL] = { + .type = VCAP_FIELD_BIT, + .offset = 79, + .width = 1, + }, + [VCAP_AF_PIPELINE_PT] = { + .type = VCAP_FIELD_U32, + .offset = 80, + .width = 5, + }, + [VCAP_AF_NXT_KEY_TYPE] = { + .type = VCAP_FIELD_U32, + .offset = 85, + .width = 5, + }, + [VCAP_AF_NXT_IDX_CTRL] = { + .type = VCAP_FIELD_U32, + .offset = 90, + .width = 3, + }, + [VCAP_AF_NXT_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 93, + .width = 12, + }, +}; + +static const struct vcap_field is2_base_type_actionfield[] = { + [VCAP_AF_IS_INNER_ACL] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_AF_PIPELINE_FORCE_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_AF_PIPELINE_PT] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 5, + }, + [VCAP_AF_HIT_ME_ONCE] = { + .type = VCAP_FIELD_BIT, + .offset = 7, + .width = 1, + }, + [VCAP_AF_INTR_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 8, + .width = 1, + }, + [VCAP_AF_CPU_COPY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 9, + .width = 1, + }, + [VCAP_AF_CPU_QUEUE_NUM] = { + .type = VCAP_FIELD_U32, + .offset = 10, + .width = 3, + }, + [VCAP_AF_CPU_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 13, + .width = 1, + }, + [VCAP_AF_LRN_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 14, + .width = 1, + }, + [VCAP_AF_RT_DIS] = { + .type = VCAP_FIELD_BIT, + .offset = 15, + .width = 1, + }, + [VCAP_AF_POLICE_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 16, + .width = 1, + }, + [VCAP_AF_POLICE_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 17, + .width = 6, + }, + [VCAP_AF_IGNORE_PIPELINE_CTRL] = { + .type = VCAP_FIELD_BIT, + .offset = 23, + .width = 1, + }, + [VCAP_AF_DLB_OFFSET] = { + .type = VCAP_FIELD_U32, + .offset = 24, + .width = 3, + }, + [VCAP_AF_MASK_MODE] = { + .type = VCAP_FIELD_U32, + .offset = 27, + .width = 3, + }, + [VCAP_AF_PORT_MASK] = { + .type = VCAP_FIELD_U72, + .offset = 30, + .width = 68, + }, + [VCAP_AF_RSDX_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 98, + .width = 1, + }, + [VCAP_AF_RSDX_VAL] = { + .type = VCAP_FIELD_U32, + .offset = 99, + .width = 12, + }, + [VCAP_AF_MIRROR_PROBE] = { + .type = VCAP_FIELD_U32, + .offset = 111, + .width = 2, + }, + [VCAP_AF_REW_CMD] = { + .type = VCAP_FIELD_U32, + .offset = 113, + .width = 11, + }, + [VCAP_AF_TTL_UPDATE_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 124, + .width = 1, + }, + [VCAP_AF_SAM_SEQ_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 125, + .width = 1, + }, + [VCAP_AF_TCP_UDP_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 126, + .width = 1, + }, + [VCAP_AF_TCP_UDP_DPORT] = { + .type = VCAP_FIELD_U32, + .offset = 127, + .width = 16, + }, + [VCAP_AF_TCP_UDP_SPORT] = { + .type = VCAP_FIELD_U32, + .offset = 143, + .width = 16, + }, + [VCAP_AF_MATCH_ID] = { + .type = VCAP_FIELD_U32, + .offset = 159, + .width = 16, + }, + [VCAP_AF_MATCH_ID_MASK] = { + .type = VCAP_FIELD_U32, + .offset = 175, + .width = 16, + }, + [VCAP_AF_CNT_ID] = { + .type = VCAP_FIELD_U32, + .offset = 191, + .width = 12, + }, + [VCAP_AF_SWAP_MAC_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 203, + .width = 1, + }, + [VCAP_AF_ACL_RT_MODE] = { + .type = VCAP_FIELD_U32, + .offset = 204, + .width = 4, + }, + [VCAP_AF_ACL_MAC] = { + .type = VCAP_FIELD_U48, + .offset = 208, + .width = 48, + }, + [VCAP_AF_DMAC_OFFSET_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 256, + .width = 1, + }, + [VCAP_AF_PTP_MASTER_SEL] = { + .type = VCAP_FIELD_U32, + .offset = 257, + .width = 2, + }, + [VCAP_AF_LOG_MSG_INTERVAL] = { + .type = VCAP_FIELD_U32, + .offset = 259, + .width = 4, + }, + [VCAP_AF_SIP_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 263, + .width = 5, + }, + [VCAP_AF_RLEG_STAT_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 268, + .width = 3, + }, + [VCAP_AF_IGR_ACL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 271, + .width = 1, + }, + [VCAP_AF_EGR_ACL_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 272, + .width = 1, + }, +}; + +static const struct vcap_field es2_base_type_actionfield[] = { + [VCAP_AF_HIT_ME_ONCE] = { + .type = VCAP_FIELD_BIT, + .offset = 0, + .width = 1, + }, + [VCAP_AF_INTR_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 1, + .width = 1, + }, + [VCAP_AF_FWD_MODE] = { + .type = VCAP_FIELD_U32, + .offset = 2, + .width = 2, + }, + [VCAP_AF_COPY_QUEUE_NUM] = { + .type = VCAP_FIELD_U32, + .offset = 4, + .width = 16, + }, + [VCAP_AF_COPY_PORT_NUM] = { + .type = VCAP_FIELD_U32, + .offset = 20, + .width = 7, + }, + [VCAP_AF_MIRROR_PROBE_ID] = { + .type = VCAP_FIELD_U32, + .offset = 27, + .width = 2, + }, + [VCAP_AF_CPU_COPY_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 29, + .width = 1, + }, + [VCAP_AF_CPU_QUEUE_NUM] = { + .type = VCAP_FIELD_U32, + .offset = 30, + .width = 3, + }, + [VCAP_AF_POLICE_ENA] = { + .type = VCAP_FIELD_BIT, + .offset = 33, + .width = 1, + }, + [VCAP_AF_POLICE_REMARK] = { + .type = VCAP_FIELD_BIT, + .offset = 34, + .width = 1, + }, + [VCAP_AF_POLICE_IDX] = { + .type = VCAP_FIELD_U32, + .offset = 35, + .width = 6, + }, + [VCAP_AF_ES2_REW_CMD] = { + .type = VCAP_FIELD_U32, + .offset = 41, + .width = 3, + }, + [VCAP_AF_CNT_ID] = { + .type = VCAP_FIELD_U32, + .offset = 44, + .width = 11, + }, + [VCAP_AF_IGNORE_PIPELINE_CTRL] = { + .type = VCAP_FIELD_BIT, + .offset = 55, + .width = 1, + }, +}; + +/* actionfield_set */ +static const struct vcap_set is0_actionfield_set[] = { + [VCAP_AFS_MLBS] = { + .type_id = 0, + .sw_per_item = 2, + .sw_cnt = 6, + }, + [VCAP_AFS_MLBS_REDUCED] = { + .type_id = 0, + .sw_per_item = 1, + .sw_cnt = 12, + }, + [VCAP_AFS_CLASSIFICATION] = { + .type_id = 1, + .sw_per_item = 2, + .sw_cnt = 6, + }, + [VCAP_AFS_FULL] = { + .type_id = -1, + .sw_per_item = 3, + .sw_cnt = 4, + }, + [VCAP_AFS_CLASS_REDUCED] = { + .type_id = 1, + .sw_per_item = 1, + .sw_cnt = 12, + }, +}; + +static const struct vcap_set is2_actionfield_set[] = { + [VCAP_AFS_BASE_TYPE] = { + .type_id = -1, + .sw_per_item = 3, + .sw_cnt = 4, + }, +}; + +static const struct vcap_set es2_actionfield_set[] = { + [VCAP_AFS_BASE_TYPE] = { + .type_id = -1, + .sw_per_item = 3, + .sw_cnt = 4, + }, +}; + +/* actionfield_set map */ +static const struct vcap_field *is0_actionfield_set_map[] = { + [VCAP_AFS_MLBS] = is0_mlbs_actionfield, + [VCAP_AFS_MLBS_REDUCED] = is0_mlbs_reduced_actionfield, + [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield, + [VCAP_AFS_FULL] = is0_full_actionfield, + [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield, +}; + +static const struct vcap_field *is2_actionfield_set_map[] = { + [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield, +}; + +static const struct vcap_field *es2_actionfield_set_map[] = { + [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield, +}; + +/* actionfield_set map size */ +static int is0_actionfield_set_map_size[] = { + [VCAP_AFS_MLBS] = ARRAY_SIZE(is0_mlbs_actionfield), + [VCAP_AFS_MLBS_REDUCED] = ARRAY_SIZE(is0_mlbs_reduced_actionfield), + [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield), + [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield), + [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield), +}; + +static int is2_actionfield_set_map_size[] = { + [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield), +}; + +static int es2_actionfield_set_map_size[] = { + [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield), +}; + +/* Type Groups */ +static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 5, + .value = 16, + }, + { + .offset = 52, + .width = 1, + .value = 0, + }, + { + .offset = 104, + .width = 2, + .value = 0, + }, + { + .offset = 156, + .width = 3, + .value = 0, + }, + { + .offset = 208, + .width = 2, + .value = 0, + }, + { + .offset = 260, + .width = 1, + .value = 0, + }, + { + .offset = 312, + .width = 4, + .value = 0, + }, + { + .offset = 364, + .width = 1, + .value = 0, + }, + { + .offset = 416, + .width = 2, + .value = 0, + }, + { + .offset = 468, + .width = 3, + .value = 0, + }, + { + .offset = 520, + .width = 2, + .value = 0, + }, + { + .offset = 572, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 4, + .value = 8, + }, + { + .offset = 52, + .width = 1, + .value = 0, + }, + { + .offset = 104, + .width = 2, + .value = 0, + }, + { + .offset = 156, + .width = 3, + .value = 0, + }, + { + .offset = 208, + .width = 2, + .value = 0, + }, + { + .offset = 260, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 3, + .value = 4, + }, + { + .offset = 52, + .width = 2, + .value = 0, + }, + { + .offset = 104, + .width = 2, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 52, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 3, + .value = 4, + }, + { + .offset = 156, + .width = 1, + .value = 0, + }, + { + .offset = 312, + .width = 2, + .value = 0, + }, + { + .offset = 468, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 156, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 3, + .value = 4, + }, + { + .offset = 156, + .width = 1, + .value = 0, + }, + { + .offset = 312, + .width = 2, + .value = 0, + }, + { + .offset = 468, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 156, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = { + { + .offset = 0, + .width = 1, + .value = 1, + }, + {} +}; + +static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = { + [12] = is0_x12_keyfield_set_typegroups, + [6] = is0_x6_keyfield_set_typegroups, + [3] = is0_x3_keyfield_set_typegroups, + [2] = is0_x2_keyfield_set_typegroups, + [1] = is0_x1_keyfield_set_typegroups, + [13] = NULL, +}; + +static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = { + [12] = is2_x12_keyfield_set_typegroups, + [6] = is2_x6_keyfield_set_typegroups, + [3] = is2_x3_keyfield_set_typegroups, + [1] = is2_x1_keyfield_set_typegroups, + [13] = NULL, +}; + +static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = { + [12] = es2_x12_keyfield_set_typegroups, + [6] = es2_x6_keyfield_set_typegroups, + [3] = es2_x3_keyfield_set_typegroups, + [1] = es2_x1_keyfield_set_typegroups, + [13] = NULL, +}; + +static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = { + { + .offset = 0, + .width = 3, + .value = 4, + }, + { + .offset = 110, + .width = 2, + .value = 0, + }, + { + .offset = 220, + .width = 2, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 110, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = { + { + .offset = 0, + .width = 1, + .value = 1, + }, + {} +}; + +static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 110, + .width = 1, + .value = 0, + }, + { + .offset = 220, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = { + { + .offset = 0, + .width = 2, + .value = 2, + }, + { + .offset = 21, + .width = 1, + .value = 0, + }, + { + .offset = 42, + .width = 1, + .value = 0, + }, + {} +}; + +static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = { + {} +}; + +static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = { + [3] = is0_x3_actionfield_set_typegroups, + [2] = is0_x2_actionfield_set_typegroups, + [1] = is0_x1_actionfield_set_typegroups, + [13] = NULL, +}; + +static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = { + [3] = is2_x3_actionfield_set_typegroups, + [1] = is2_x1_actionfield_set_typegroups, + [13] = NULL, +}; + +static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = { + [3] = es2_x3_actionfield_set_typegroups, + [1] = es2_x1_actionfield_set_typegroups, + [13] = NULL, +}; + +/* Keyfieldset names */ +static const char * const vcap_keyfield_set_names[] = { + [VCAP_KFS_NO_VALUE] = "(None)", + [VCAP_KFS_ARP] = "VCAP_KFS_ARP", + [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG", + [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER", + [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP", + [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID", + [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD", + [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID", + [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE", + [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL", + [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE", + [VCAP_KFS_MLL] = "VCAP_KFS_MLL", + [VCAP_KFS_NORMAL] = "VCAP_KFS_NORMAL", + [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4", + [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE", + [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4", + [VCAP_KFS_TRI_VID] = "VCAP_KFS_TRI_VID", +}; + +/* Actionfieldset names */ +static const char * const vcap_actionfield_set_names[] = { + [VCAP_AFS_NO_VALUE] = "(None)", + [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE", + [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION", + [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED", + [VCAP_AFS_FULL] = "VCAP_AFS_FULL", + [VCAP_AFS_MLBS] = "VCAP_AFS_MLBS", + [VCAP_AFS_MLBS_REDUCED] = "VCAP_AFS_MLBS_REDUCED", +}; + +/* Keyfield names */ +static const char * const vcap_keyfield_names[] = { + [VCAP_KF_NO_VALUE] = "(None)", + [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE", + [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT", + [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED", + [VCAP_KF_8021BR_GRP] = "8021BR_GRP", + [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE", + [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT", + [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0", + [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1", + [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2", + [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS", + [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0", + [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1", + [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2", + [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS", + [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0", + [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1", + [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2", + [VCAP_KF_8021Q_VID0] = "8021Q_VID0", + [VCAP_KF_8021Q_VID1] = "8021Q_VID1", + [VCAP_KF_8021Q_VID2] = "8021Q_VID2", + [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS", + [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS", + [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS", + [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID", + [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS", + [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS", + [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE", + [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS", + [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS", + [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS", + [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS", + [VCAP_KF_COSID_CLS] = "COSID_CLS", + [VCAP_KF_DST_ENTRY] = "DST_ENTRY", + [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA", + [VCAP_KF_ETYPE] = "ETYPE", + [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS", + [VCAP_KF_ETYPE_MPLS] = "ETYPE_MPLS", + [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK", + [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG", + [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT", + [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK", + [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3", + [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG", + [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL", + [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL", + [VCAP_KF_IP4_IS] = "IP4_IS", + [VCAP_KF_IP_MC_IS] = "IP_MC_IS", + [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE", + [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS", + [VCAP_KF_ISDX_CLS] = "ISDX_CLS", + [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS", + [VCAP_KF_L2_BC_IS] = "L2_BC_IS", + [VCAP_KF_L2_DMAC] = "L2_DMAC", + [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS", + [VCAP_KF_L2_MC_IS] = "L2_MC_IS", + [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE", + [VCAP_KF_L2_SMAC] = "L2_SMAC", + [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS", + [VCAP_KF_L3_DMAC_DIP_MATCH] = "L3_DMAC_DIP_MATCH", + [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS", + [VCAP_KF_L3_DSCP] = "L3_DSCP", + [VCAP_KF_L3_DST_IS] = "L3_DST_IS", + [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE", + [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN", + [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP", + [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP", + [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP", + [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP", + [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO", + [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS", + [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD", + [VCAP_KF_L3_RT_IS] = "L3_RT_IS", + [VCAP_KF_L3_SMAC_SIP_MATCH] = "L3_SMAC_SIP_MATCH", + [VCAP_KF_L3_TOS] = "L3_TOS", + [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0", + [VCAP_KF_L4_ACK] = "L4_ACK", + [VCAP_KF_L4_DPORT] = "L4_DPORT", + [VCAP_KF_L4_FIN] = "L4_FIN", + [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD", + [VCAP_KF_L4_PSH] = "L4_PSH", + [VCAP_KF_L4_RNG] = "L4_RNG", + [VCAP_KF_L4_RST] = "L4_RST", + [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS", + [VCAP_KF_L4_SPORT] = "L4_SPORT", + [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS", + [VCAP_KF_L4_SYN] = "L4_SYN", + [VCAP_KF_L4_URG] = "L4_URG", + [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS", + [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX", + [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL", + [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG", + [VCAP_KF_MIRROR_ENA] = "MIRROR_ENA", + [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0", + [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS", + [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS", + [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE", + [VCAP_KF_TCP_IS] = "TCP_IS", + [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS", + [VCAP_KF_TYPE] = "TYPE", +}; + +/* Actionfield names */ +static const char * const vcap_actionfield_names[] = { + [VCAP_AF_NO_VALUE] = "(None)", + [VCAP_AF_ACL_MAC] = "ACL_MAC", + [VCAP_AF_ACL_RT_MODE] = "ACL_RT_MODE", + [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL", + [VCAP_AF_CNT_ID] = "CNT_ID", + [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM", + [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM", + [VCAP_AF_COSID_ENA] = "COSID_ENA", + [VCAP_AF_COSID_VAL] = "COSID_VAL", + [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA", + [VCAP_AF_CPU_DIS] = "CPU_DIS", + [VCAP_AF_CPU_ENA] = "CPU_ENA", + [VCAP_AF_CPU_Q] = "CPU_Q", + [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM", + [VCAP_AF_CUSTOM_ACE_ENA] = "CUSTOM_ACE_ENA", + [VCAP_AF_CUSTOM_ACE_OFFSET] = "CUSTOM_ACE_OFFSET", + [VCAP_AF_DEI_ENA] = "DEI_ENA", + [VCAP_AF_DEI_VAL] = "DEI_VAL", + [VCAP_AF_DLB_OFFSET] = "DLB_OFFSET", + [VCAP_AF_DMAC_OFFSET_ENA] = "DMAC_OFFSET_ENA", + [VCAP_AF_DP_ENA] = "DP_ENA", + [VCAP_AF_DP_VAL] = "DP_VAL", + [VCAP_AF_DSCP_ENA] = "DSCP_ENA", + [VCAP_AF_DSCP_VAL] = "DSCP_VAL", + [VCAP_AF_EGR_ACL_ENA] = "EGR_ACL_ENA", + [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD", + [VCAP_AF_FWD_DIS] = "FWD_DIS", + [VCAP_AF_FWD_MODE] = "FWD_MODE", + [VCAP_AF_FWD_TYPE] = "FWD_TYPE", + [VCAP_AF_GVID_ADD_REPLACE_SEL] = "GVID_ADD_REPLACE_SEL", + [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE", + [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL", + [VCAP_AF_IGR_ACL_ENA] = "IGR_ACL_ENA", + [VCAP_AF_INJ_MASQ_ENA] = "INJ_MASQ_ENA", + [VCAP_AF_INJ_MASQ_LPORT] = "INJ_MASQ_LPORT", + [VCAP_AF_INJ_MASQ_PORT] = "INJ_MASQ_PORT", + [VCAP_AF_INTR_ENA] = "INTR_ENA", + [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL", + [VCAP_AF_ISDX_VAL] = "ISDX_VAL", + [VCAP_AF_IS_INNER_ACL] = "IS_INNER_ACL", + [VCAP_AF_L3_MAC_UPDATE_DIS] = "L3_MAC_UPDATE_DIS", + [VCAP_AF_LOG_MSG_INTERVAL] = "LOG_MSG_INTERVAL", + [VCAP_AF_LPM_AFFIX_ENA] = "LPM_AFFIX_ENA", + [VCAP_AF_LPM_AFFIX_VAL] = "LPM_AFFIX_VAL", + [VCAP_AF_LPORT_ENA] = "LPORT_ENA", + [VCAP_AF_LRN_DIS] = "LRN_DIS", + [VCAP_AF_MAP_IDX] = "MAP_IDX", + [VCAP_AF_MAP_KEY] = "MAP_KEY", + [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL", + [VCAP_AF_MASK_MODE] = "MASK_MODE", + [VCAP_AF_MATCH_ID] = "MATCH_ID", + [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK", + [VCAP_AF_MIP_SEL] = "MIP_SEL", + [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE", + [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID", + [VCAP_AF_MPLS_IP_CTRL_ENA] = "MPLS_IP_CTRL_ENA", + [VCAP_AF_MPLS_MEP_ENA] = "MPLS_MEP_ENA", + [VCAP_AF_MPLS_MIP_ENA] = "MPLS_MIP_ENA", + [VCAP_AF_MPLS_OAM_FLAVOR] = "MPLS_OAM_FLAVOR", + [VCAP_AF_MPLS_OAM_TYPE] = "MPLS_OAM_TYPE", + [VCAP_AF_NUM_VLD_LABELS] = "NUM_VLD_LABELS", + [VCAP_AF_NXT_IDX] = "NXT_IDX", + [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL", + [VCAP_AF_NXT_KEY_TYPE] = "NXT_KEY_TYPE", + [VCAP_AF_NXT_NORMALIZE] = "NXT_NORMALIZE", + [VCAP_AF_NXT_NORM_W16_OFFSET] = "NXT_NORM_W16_OFFSET", + [VCAP_AF_NXT_NORM_W32_OFFSET] = "NXT_NORM_W32_OFFSET", + [VCAP_AF_NXT_OFFSET_FROM_TYPE] = "NXT_OFFSET_FROM_TYPE", + [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = "NXT_TYPE_AFTER_OFFSET", + [VCAP_AF_OAM_IP_BFD_ENA] = "OAM_IP_BFD_ENA", + [VCAP_AF_OAM_TWAMP_ENA] = "OAM_TWAMP_ENA", + [VCAP_AF_OAM_Y1731_SEL] = "OAM_Y1731_SEL", + [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK", + [VCAP_AF_PAG_VAL] = "PAG_VAL", + [VCAP_AF_PCP_ENA] = "PCP_ENA", + [VCAP_AF_PCP_VAL] = "PCP_VAL", + [VCAP_AF_PIPELINE_ACT_SEL] = "PIPELINE_ACT_SEL", + [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA", + [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT", + [VCAP_AF_PIPELINE_PT_REDUCED] = "PIPELINE_PT_REDUCED", + [VCAP_AF_POLICE_ENA] = "POLICE_ENA", + [VCAP_AF_POLICE_IDX] = "POLICE_IDX", + [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK", + [VCAP_AF_PORT_MASK] = "PORT_MASK", + [VCAP_AF_PTP_MASTER_SEL] = "PTP_MASTER_SEL", + [VCAP_AF_QOS_ENA] = "QOS_ENA", + [VCAP_AF_QOS_VAL] = "QOS_VAL", + [VCAP_AF_REW_CMD] = "REW_CMD", + [VCAP_AF_RLEG_DMAC_CHK_DIS] = "RLEG_DMAC_CHK_DIS", + [VCAP_AF_RLEG_STAT_IDX] = "RLEG_STAT_IDX", + [VCAP_AF_RSDX_ENA] = "RSDX_ENA", + [VCAP_AF_RSDX_VAL] = "RSDX_VAL", + [VCAP_AF_RSVD_LBL_VAL] = "RSVD_LBL_VAL", + [VCAP_AF_RT_DIS] = "RT_DIS", + [VCAP_AF_RT_SEL] = "RT_SEL", + [VCAP_AF_S2_KEY_SEL_ENA] = "S2_KEY_SEL_ENA", + [VCAP_AF_S2_KEY_SEL_IDX] = "S2_KEY_SEL_IDX", + [VCAP_AF_SAM_SEQ_ENA] = "SAM_SEQ_ENA", + [VCAP_AF_SIP_IDX] = "SIP_IDX", + [VCAP_AF_SWAP_MAC_ENA] = "SWAP_MAC_ENA", + [VCAP_AF_TCP_UDP_DPORT] = "TCP_UDP_DPORT", + [VCAP_AF_TCP_UDP_ENA] = "TCP_UDP_ENA", + [VCAP_AF_TCP_UDP_SPORT] = "TCP_UDP_SPORT", + [VCAP_AF_TC_ENA] = "TC_ENA", + [VCAP_AF_TC_LABEL] = "TC_LABEL", + [VCAP_AF_TPID_SEL] = "TPID_SEL", + [VCAP_AF_TTL_DECR_DIS] = "TTL_DECR_DIS", + [VCAP_AF_TTL_ENA] = "TTL_ENA", + [VCAP_AF_TTL_LABEL] = "TTL_LABEL", + [VCAP_AF_TTL_UPDATE_ENA] = "TTL_UPDATE_ENA", + [VCAP_AF_TYPE] = "TYPE", + [VCAP_AF_VID_VAL] = "VID_VAL", + [VCAP_AF_VLAN_POP_CNT] = "VLAN_POP_CNT", + [VCAP_AF_VLAN_POP_CNT_ENA] = "VLAN_POP_CNT_ENA", + [VCAP_AF_VLAN_PUSH_CNT] = "VLAN_PUSH_CNT", + [VCAP_AF_VLAN_PUSH_CNT_ENA] = "VLAN_PUSH_CNT_ENA", + [VCAP_AF_VLAN_WAS_TAGGED] = "VLAN_WAS_TAGGED", +}; + +/* VCAPs */ +const struct vcap_info kunit_test_vcaps[] = { + [VCAP_TYPE_IS0] = { + .name = "is0", + .rows = 1024, + .sw_count = 12, + .sw_width = 52, + .sticky_width = 1, + .act_width = 110, + .default_cnt = 140, + .require_cnt_dis = 0, + .version = 1, + .keyfield_set = is0_keyfield_set, + .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set), + .actionfield_set = is0_actionfield_set, + .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set), + .keyfield_set_map = is0_keyfield_set_map, + .keyfield_set_map_size = is0_keyfield_set_map_size, + .actionfield_set_map = is0_actionfield_set_map, + .actionfield_set_map_size = is0_actionfield_set_map_size, + .keyfield_set_typegroups = is0_keyfield_set_typegroups, + .actionfield_set_typegroups = is0_actionfield_set_typegroups, + }, + [VCAP_TYPE_IS2] = { + .name = "is2", + .rows = 256, + .sw_count = 12, + .sw_width = 52, + .sticky_width = 1, + .act_width = 110, + .default_cnt = 73, + .require_cnt_dis = 0, + .version = 1, + .keyfield_set = is2_keyfield_set, + .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set), + .actionfield_set = is2_actionfield_set, + .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set), + .keyfield_set_map = is2_keyfield_set_map, + .keyfield_set_map_size = is2_keyfield_set_map_size, + .actionfield_set_map = is2_actionfield_set_map, + .actionfield_set_map_size = is2_actionfield_set_map_size, + .keyfield_set_typegroups = is2_keyfield_set_typegroups, + .actionfield_set_typegroups = is2_actionfield_set_typegroups, + }, + [VCAP_TYPE_ES2] = { + .name = "es2", + .rows = 1024, + .sw_count = 12, + .sw_width = 52, + .sticky_width = 1, + .act_width = 21, + .default_cnt = 74, + .require_cnt_dis = 0, + .version = 1, + .keyfield_set = es2_keyfield_set, + .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set), + .actionfield_set = es2_actionfield_set, + .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set), + .keyfield_set_map = es2_keyfield_set_map, + .keyfield_set_map_size = es2_keyfield_set_map_size, + .actionfield_set_map = es2_actionfield_set_map, + .actionfield_set_map_size = es2_actionfield_set_map_size, + .keyfield_set_typegroups = es2_keyfield_set_typegroups, + .actionfield_set_typegroups = es2_actionfield_set_typegroups, + }, +}; + +const struct vcap_statistics kunit_test_vcap_stats = { + .name = "kunit_test", + .count = 3, + .keyfield_set_names = vcap_keyfield_set_names, + .actionfield_set_names = vcap_actionfield_set_names, + .keyfield_names = vcap_keyfield_names, + .actionfield_names = vcap_actionfield_names, +}; diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h new file mode 100644 index 000000000000..b5a74f0eef9b --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP test model interface for kunit testing + */ + +#ifndef __VCAP_MODEL_KUNIT_H__ +#define __VCAP_MODEL_KUNIT_H__ +extern const struct vcap_info kunit_test_vcaps[]; +extern const struct vcap_statistics kunit_test_vcap_stats; +#endif /* __VCAP_MODEL_KUNIT_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index e92860e20a24..88d6d992e7d0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -154,10 +154,11 @@ nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag, return NULL; } -int nfp_flower_lag_populate_pre_action(struct nfp_app *app, - struct net_device *master, - struct nfp_fl_pre_lag *pre_act, - struct netlink_ext_ack *extack) +static int nfp_fl_lag_get_group_info(struct nfp_app *app, + struct net_device *netdev, + __be16 *group_id, + u8 *batch_ver, + u8 *group_inst) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_lag_group *group = NULL; @@ -165,23 +166,52 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app, mutex_lock(&priv->nfp_lag.lock); group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag, - master); + netdev); if (!group) { mutex_unlock(&priv->nfp_lag.lock); - NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action"); return -ENOENT; } - pre_act->group_id = cpu_to_be16(group->group_id); - temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver << - NFP_FL_PRE_LAG_VER_OFF); - memcpy(pre_act->lag_version, &temp_vers, 3); - pre_act->instance = group->group_inst; + if (group_id) + *group_id = cpu_to_be16(group->group_id); + + if (batch_ver) { + temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver << + NFP_FL_PRE_LAG_VER_OFF); + memcpy(batch_ver, &temp_vers, 3); + } + + if (group_inst) + *group_inst = group->group_inst; + mutex_unlock(&priv->nfp_lag.lock); return 0; } +int nfp_flower_lag_populate_pre_action(struct nfp_app *app, + struct net_device *master, + struct nfp_fl_pre_lag *pre_act, + struct netlink_ext_ack *extack) +{ + if (nfp_fl_lag_get_group_info(app, master, &pre_act->group_id, + pre_act->lag_version, + &pre_act->instance)) { + NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action"); + return -ENOENT; + } + + return 0; +} + +void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app, + struct net_device *netdev, + struct nfp_tun_neigh_lag *lag) +{ + nfp_fl_lag_get_group_info(app, netdev, NULL, + lag->lag_version, &lag->lag_instance); +} + int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master) { struct nfp_flower_priv *priv = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 4d960a9641b3..83eaa5ae3cd4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -76,7 +76,9 @@ nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev) u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, struct net_device *netdev) { + struct nfp_flower_priv *priv = app->priv; int ext_port; + int gid; if (nfp_netdev_is_nfp_repr(netdev)) { return nfp_repr_get_port_id(netdev); @@ -86,6 +88,13 @@ u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, return 0; return nfp_flower_internal_port_get_port_id(ext_port); + } else if (netif_is_lag_master(netdev) && + priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) { + gid = nfp_flower_lag_get_output_id(app, netdev); + if (gid < 0) + return 0; + + return (NFP_FL_LAG_OUT | gid); } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index cb799d18682d..40372545148e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -52,6 +52,7 @@ struct nfp_app; #define NFP_FL_FEATS_QOS_PPS BIT(9) #define NFP_FL_FEATS_QOS_METER BIT(10) #define NFP_FL_FEATS_DECAP_V2 BIT(11) +#define NFP_FL_FEATS_TUNNEL_NEIGH_LAG BIT(12) #define NFP_FL_FEATS_HOST_ACK BIT(31) #define NFP_FL_ENABLE_FLOW_MERGE BIT(0) @@ -69,7 +70,8 @@ struct nfp_app; NFP_FL_FEATS_VLAN_QINQ | \ NFP_FL_FEATS_QOS_PPS | \ NFP_FL_FEATS_QOS_METER | \ - NFP_FL_FEATS_DECAP_V2) + NFP_FL_FEATS_DECAP_V2 | \ + NFP_FL_FEATS_TUNNEL_NEIGH_LAG) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; @@ -104,6 +106,16 @@ struct nfp_fl_tunnel_offloads { }; /** + * struct nfp_tun_neigh_lag - lag info + * @lag_version: lag version + * @lag_instance: lag instance + */ +struct nfp_tun_neigh_lag { + u8 lag_version[3]; + u8 lag_instance; +}; + +/** * struct nfp_tun_neigh - basic neighbour data * @dst_addr: Destination MAC address * @src_addr: Source MAC address @@ -133,12 +145,14 @@ struct nfp_tun_neigh_ext { * @src_ipv4: Source IPv4 address * @common: Neighbour/route common info * @ext: Neighbour/route extended info + * @lag: lag port info */ struct nfp_tun_neigh_v4 { __be32 dst_ipv4; __be32 src_ipv4; struct nfp_tun_neigh common; struct nfp_tun_neigh_ext ext; + struct nfp_tun_neigh_lag lag; }; /** @@ -147,12 +161,14 @@ struct nfp_tun_neigh_v4 { * @src_ipv6: Source IPv6 address * @common: Neighbour/route common info * @ext: Neighbour/route extended info + * @lag: lag port info */ struct nfp_tun_neigh_v6 { struct in6_addr dst_ipv6; struct in6_addr src_ipv6; struct nfp_tun_neigh common; struct nfp_tun_neigh_ext ext; + struct nfp_tun_neigh_lag lag; }; /** @@ -647,6 +663,9 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app, struct netlink_ext_ack *extack); int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master); +void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app, + struct net_device *netdev, + struct nfp_tun_neigh_lag *lag); void nfp_flower_qos_init(struct nfp_app *app); void nfp_flower_qos_cleanup(struct nfp_app *app); int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 52f67157bd0f..a8678d5612ee 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -290,6 +290,11 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) plen -= sizeof(struct nfp_tun_neigh_ext); + if (!(priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) && + (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || + mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) + plen -= sizeof(struct nfp_tun_neigh_lag); + skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); if (!skb) return -ENOMEM; @@ -468,6 +473,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, neigh_table_params); if (!nn_entry && !neigh_invalid) { struct nfp_tun_neigh_ext *ext; + struct nfp_tun_neigh_lag *lag; struct nfp_tun_neigh *common; nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size, @@ -488,6 +494,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, payload->dst_ipv6 = flowi6->daddr; common = &payload->common; ext = &payload->ext; + lag = &payload->lag; mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; } else { struct flowi4 *flowi4 = (struct flowi4 *)flow; @@ -498,6 +505,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, payload->dst_ipv4 = flowi4->daddr; common = &payload->common; ext = &payload->ext; + lag = &payload->lag; mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; } ext->host_ctx = cpu_to_be32(U32_MAX); @@ -505,6 +513,9 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ext->vlan_tci = cpu_to_be16(U16_MAX); ether_addr_copy(common->src_addr, netdev->dev_addr); neigh_ha_snapshot(common->dst_addr, neigh, netdev); + + if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) + nfp_flower_lag_get_info_from_netdev(app, netdev, lag); common->port_id = cpu_to_be32(port_id); if (rhashtable_insert_fast(&priv->neigh_table, @@ -547,13 +558,38 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, if (nn_entry->flow) list_del(&nn_entry->list_head); kfree(nn_entry); - } else if (nn_entry && !neigh_invalid && override) { - mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : - NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; - nfp_tun_link_predt_entries(app, nn_entry); - nfp_flower_xmit_tun_conf(app, mtype, neigh_size, - nn_entry->payload, - GFP_ATOMIC); + } else if (nn_entry && !neigh_invalid) { + struct nfp_tun_neigh *common; + u8 dst_addr[ETH_ALEN]; + bool is_mac_change; + + if (is_ipv6) { + struct nfp_tun_neigh_v6 *payload; + + payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; + common = &payload->common; + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; + } else { + struct nfp_tun_neigh_v4 *payload; + + payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; + common = &payload->common; + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; + } + + ether_addr_copy(dst_addr, common->dst_addr); + neigh_ha_snapshot(common->dst_addr, neigh, netdev); + is_mac_change = !ether_addr_equal(dst_addr, common->dst_addr); + if (override || is_mac_change) { + if (is_mac_change && nn_entry->flow) { + list_del(&nn_entry->list_head); + nn_entry->flow = NULL; + } + nfp_tun_link_predt_entries(app, nn_entry); + nfp_flower_xmit_tun_conf(app, mtype, neigh_size, + nn_entry->payload, + GFP_ATOMIC); + } } spin_unlock_bh(&priv->predt_lock); @@ -593,8 +629,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app; - if (!nfp_netdev_is_nfp_repr(n->dev) && - !nfp_flower_internal_port_can_offload(app, n->dev)) + if (!nfp_flower_get_port_id_from_netdev(app, n->dev)) return NOTIFY_DONE; #if IS_ENABLED(CONFIG_INET) diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c index 135ece2f1375..702abbe59b76 100644 --- a/drivers/net/ethernet/sfc/ef100_ethtool.c +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -43,8 +43,6 @@ const struct ethtool_ops ef100_ethtool_ops = { .get_pauseparam = efx_ethtool_get_pauseparam, .set_pauseparam = efx_ethtool_set_pauseparam, .get_sset_count = efx_ethtool_get_sset_count, - .get_priv_flags = efx_ethtool_get_priv_flags, - .set_priv_flags = efx_ethtool_set_priv_flags, .self_test = efx_ethtool_self_test, .get_strings = efx_ethtool_get_strings, .get_link_ksettings = efx_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index 6649a2327d03..a8cbceeb301b 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -101,14 +101,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) -static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = { - "log-tc-errors", -}; - -#define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(0) - -#define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings) - void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) { @@ -460,8 +452,6 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) efx_ptp_describe_stats(efx, NULL); case ETH_SS_TEST: return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); - case ETH_SS_PRIV_FLAGS: - return EFX_ETHTOOL_PRIV_FLAGS_COUNT; default: return -EINVAL; } @@ -488,39 +478,12 @@ void efx_ethtool_get_strings(struct net_device *net_dev, case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); break; - case ETH_SS_PRIV_FLAGS: - for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++) - strscpy(strings + i * ETH_GSTRING_LEN, - efx_ethtool_priv_flags_strings[i], - ETH_GSTRING_LEN); - break; default: /* No other string sets */ break; } } -u32 efx_ethtool_get_priv_flags(struct net_device *net_dev) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - u32 ret_flags = 0; - - if (efx->log_tc_errs) - ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS; - - return ret_flags; -} - -int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - - efx->log_tc_errs = - !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS); - - return 0; -} - void efx_ethtool_get_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index 0afc74021a5e..659491932101 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -27,8 +27,6 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx, int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set); void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, u8 *strings); -u32 efx_ethtool_get_priv_flags(struct net_device *net_dev); -int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags); void efx_ethtool_get_stats(struct net_device *net_dev, struct ethtool_stats *stats __attribute__ ((unused)), u64 *data); diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index 874c765b2465..6f472ea0638a 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -265,9 +265,8 @@ int efx_mae_match_check_caps(struct efx_nic *efx, rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT], ingress_port_mask_type); if (rc) { - efx_tc_err(efx, "No support for %s mask in field ingress_port\n", - mask_type_name(ingress_port_mask_type)); - NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port"); + NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field ingress_port", + mask_type_name(ingress_port_mask_type)); return rc; } return 0; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 2e9ba0cfe848..7ef823d7a89a 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -855,7 +855,6 @@ enum efx_xdp_tx_queues_mode { * @timer_max_ns: Interrupt timer maximum value, in nanoseconds * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues * @irqs_hooked: Channel interrupts are hooked - * @log_tc_errs: Error logging for TC filter insertion is enabled * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues * @irq_rx_moderation_us: IRQ moderation time for RX event queues * @msg_enable: Log message enable flags @@ -1018,7 +1017,6 @@ struct efx_nic { unsigned int timer_max_ns; bool irq_rx_adaptive; bool irqs_hooked; - bool log_tc_errs; unsigned int irq_mod_step_us; unsigned int irq_rx_moderation_us; u32 msg_enable; diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 3478860d4023..b21a961eabb1 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -137,17 +137,16 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, flow_rule_match_control(rule, &fm); if (fm.mask->flags) { - efx_tc_err(efx, "Unsupported match on control.flags %#x\n", - fm.mask->flags); - NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags"); + NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x", + fm.mask->flags); return -EOPNOTSUPP; } } if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC))) { - efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys); - NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered"); + NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x", + dissector->used_keys); return -EOPNOTSUPP; } @@ -156,11 +155,11 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, flow_rule_match_basic(rule, &fm); if (fm.mask->n_proto) { - EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported eth_proto match"); return -EOPNOTSUPP; } if (fm.mask->ip_proto) { - EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported ip_proto match"); return -EOPNOTSUPP; } } @@ -200,13 +199,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx, if (efv != from_efv) { /* can't happen */ - efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n", - netdev_name(net_dev), efv ? "non-" : "", - from_efv ? "non-" : ""); - if (efv) - NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)"); - else - NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)"); + NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)", + netdev_name(net_dev), efv ? "non-" : "", + from_efv ? "non-" : ""); return -EINVAL; } @@ -214,7 +209,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, memset(&match, 0, sizeof(match)); rc = efx_tc_flower_external_mport(efx, from_efv); if (rc < 0) { - EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port"); + NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port"); return rc; } match.value.ingress_port = rc; @@ -224,7 +219,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, return rc; if (tc->common.chain_index) { - EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index"); + NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index"); return -EOPNOTSUPP; } match.mask.recirc_id = 0xff; @@ -261,7 +256,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, if (!act) { /* more actions after a non-pipe action */ - EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action"); + NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action"); rc = -EINVAL; goto release; } @@ -270,7 +265,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, case FLOW_ACTION_DROP: rc = efx_mae_alloc_action_set(efx, act); if (rc) { - EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)"); + NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)"); goto release; } list_add_tail(&act->list, &rule->acts.list); @@ -281,20 +276,20 @@ static int efx_tc_flower_replace(struct efx_nic *efx, save = *act; to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); if (IS_ERR(to_efv)) { - EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch"); + NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch"); rc = PTR_ERR(to_efv); goto release; } rc = efx_tc_flower_external_mport(efx, to_efv); if (rc < 0) { - EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port"); + NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port"); goto release; } act->dest_mport = rc; act->deliver = 1; rc = efx_mae_alloc_action_set(efx, act); if (rc) { - EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)"); + NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)"); goto release; } list_add_tail(&act->list, &rule->acts.list); @@ -310,9 +305,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx, *act = save; break; default: - efx_tc_err(efx, "Unhandled action %u\n", fa->id); + NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", + fa->id); rc = -EOPNOTSUPP; - NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); goto release; } } @@ -334,7 +329,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, act->deliver = 1; rc = efx_mae_alloc_action_set(efx, act); if (rc) { - EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)"); + NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)"); goto release; } list_add_tail(&act->list, &rule->acts.list); @@ -349,13 +344,13 @@ static int efx_tc_flower_replace(struct efx_nic *efx, rc = efx_mae_alloc_action_set_list(efx, &rule->acts); if (rc) { - EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw"); + NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw"); goto release; } rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, rule->acts.fw_id, &rule->fw_id); if (rc) { - EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw"); + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); goto release_acts; } return 0; diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h index 196fd74ed973..4373c3243e3c 100644 --- a/drivers/net/ethernet/sfc/tc.h +++ b/drivers/net/ethernet/sfc/tc.h @@ -15,24 +15,6 @@ #include <linux/rhashtable.h> #include "net_driver.h" -/* Error reporting: convenience macros. For indicating why a given filter - * insertion is not supported; errors in internal operation or in the - * hardware should be netif_err()s instead. - */ -/* Used when error message is constant. */ -#define EFX_TC_ERR_MSG(efx, extack, message) do { \ - NL_SET_ERR_MSG_MOD(extack, message); \ - if (efx->log_tc_errs) \ - netif_info(efx, drv, efx->net_dev, "%s\n", message); \ -} while (0) -/* Used when error message is not constant; caller should also supply a - * constant extack message with NL_SET_ERR_MSG_MOD(). - */ -#define efx_tc_err(efx, fmt, args...) do { \ -if (efx->log_tc_errs) \ - netif_info(efx, drv, efx->net_dev, fmt, ##args);\ -} while (0) - struct efx_tc_action_set { u16 deliver:1; u32 dest_mport; diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 2524c907f386..5f22a8a4d27b 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -75,20 +75,6 @@ config EPIC100 More specific information and updates are available from <http://www.scyld.com/network/epic100.html>. -config SMC911X - tristate "SMSC LAN911[5678] support" - select CRC32 - select MII - depends on (ARM || SUPERH || COMPILE_TEST) - help - This is a driver for SMSC's LAN911x series of Ethernet chipsets - including the new LAN9115, LAN9116, LAN9117, and LAN9118. - Say Y here if you want it compiled into the kernel. - - This driver is also available as a module. The module will be - called smc911x. If you want to compile it as a module, say M - here and read <file:Documentation/kbuild/modules.rst> - config SMSC911X tristate "SMSC LAN911x/LAN921x families embedded ethernet support" depends on HAS_IOMEM diff --git a/drivers/net/ethernet/smsc/Makefile b/drivers/net/ethernet/smsc/Makefile index 4105912b1629..1501fa364c13 100644 --- a/drivers/net/ethernet/smsc/Makefile +++ b/drivers/net/ethernet/smsc/Makefile @@ -8,5 +8,4 @@ obj-$(CONFIG_SMC91X) += smc91x.o obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o obj-$(CONFIG_EPIC100) += epic100.o obj-$(CONFIG_SMSC9420) += smsc9420.o -obj-$(CONFIG_SMC911X) += smc911x.o obj-$(CONFIG_SMSC911X) += smsc911x.o diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c deleted file mode 100644 index 52ecfb461c41..000000000000 --- a/drivers/net/ethernet/smsc/smc911x.c +++ /dev/null @@ -1,2198 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * smc911x.c - * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices. - * - * Copyright (C) 2005 Sensoria Corp - * Derived from the unified SMC91x driver by Nicolas Pitre - * and the smsc911x.c reference driver by SMSC - * - * Arguments: - * watchdog = TX watchdog timeout - * tx_fifo_kb = Size of TX FIFO in KB - * - * History: - * 04/16/05 Dustin McIntire Initial version - */ -static const char version[] = - "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n"; - -/* Debugging options */ -#define ENABLE_SMC_DEBUG_RX 0 -#define ENABLE_SMC_DEBUG_TX 0 -#define ENABLE_SMC_DEBUG_DMA 0 -#define ENABLE_SMC_DEBUG_PKTS 0 -#define ENABLE_SMC_DEBUG_MISC 0 -#define ENABLE_SMC_DEBUG_FUNC 0 - -#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0) -#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1) -#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2) -#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3) -#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4) -#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5) - -#ifndef SMC_DEBUG -#define SMC_DEBUG ( SMC_DEBUG_RX | \ - SMC_DEBUG_TX | \ - SMC_DEBUG_DMA | \ - SMC_DEBUG_PKTS | \ - SMC_DEBUG_MISC | \ - SMC_DEBUG_FUNC \ - ) -#endif - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/crc32.h> -#include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> -#include <linux/ethtool.h> -#include <linux/mii.h> -#include <linux/workqueue.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> - -#include <linux/dmaengine.h> - -#include <asm/io.h> - -#include "smc911x.h" - -/* - * Transmit timeout, default 5 seconds. - */ -static int watchdog = 5000; -module_param(watchdog, int, 0400); -MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); - -static int tx_fifo_kb=8; -module_param(tx_fifo_kb, int, 0400); -MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)"); - -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:smc911x"); - -/* - * The internal workings of the driver. If you are changing anything - * here with the SMC stuff, you should have the datasheet and know - * what you are doing. - */ -#define CARDNAME "smc911x" - -/* - * Use power-down feature of the chip - */ -#define POWER_DOWN 1 - -#if SMC_DEBUG > 0 -#define DBG(n, dev, args...) \ - do { \ - if (SMC_DEBUG & (n)) \ - netdev_dbg(dev, args); \ - } while (0) - -#define PRINTK(dev, args...) netdev_info(dev, args) -#else -#define DBG(n, dev, args...) \ - while (0) { \ - netdev_dbg(dev, args); \ - } -#define PRINTK(dev, args...) netdev_dbg(dev, args) -#endif - -#if SMC_DEBUG_PKTS > 0 -static void PRINT_PKT(u_char *buf, int length) -{ - int i; - int remainder; - int lines; - - lines = length / 16; - remainder = length % 16; - - for (i = 0; i < lines ; i ++) { - int cur; - printk(KERN_DEBUG); - for (cur = 0; cur < 8; cur++) { - u_char a, b; - a = *buf++; - b = *buf++; - pr_cont("%02x%02x ", a, b); - } - pr_cont("\n"); - } - printk(KERN_DEBUG); - for (i = 0; i < remainder/2 ; i++) { - u_char a, b; - a = *buf++; - b = *buf++; - pr_cont("%02x%02x ", a, b); - } - pr_cont("\n"); -} -#else -static inline void PRINT_PKT(u_char *buf, int length) { } -#endif - - -/* this enables an interrupt in the interrupt mask register */ -#define SMC_ENABLE_INT(lp, x) do { \ - unsigned int __mask; \ - __mask = SMC_GET_INT_EN((lp)); \ - __mask |= (x); \ - SMC_SET_INT_EN((lp), __mask); \ -} while (0) - -/* this disables an interrupt from the interrupt mask register */ -#define SMC_DISABLE_INT(lp, x) do { \ - unsigned int __mask; \ - __mask = SMC_GET_INT_EN((lp)); \ - __mask &= ~(x); \ - SMC_SET_INT_EN((lp), __mask); \ -} while (0) - -/* - * this does a soft reset on the device - */ -static void smc911x_reset(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int reg, timeout=0, resets=1, irq_cfg; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - /* Take out of PM setting first */ - if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { - /* Write to the bytetest will take out of powerdown */ - SMC_SET_BYTE_TEST(lp, 0); - timeout=10; - do { - udelay(10); - reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_; - } while (--timeout && !reg); - if (timeout == 0) { - PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n"); - return; - } - } - - /* Disable all interrupts */ - spin_lock_irqsave(&lp->lock, flags); - SMC_SET_INT_EN(lp, 0); - spin_unlock_irqrestore(&lp->lock, flags); - - while (resets--) { - SMC_SET_HW_CFG(lp, HW_CFG_SRST_); - timeout=10; - do { - udelay(10); - reg = SMC_GET_HW_CFG(lp); - /* If chip indicates reset timeout then try again */ - if (reg & HW_CFG_SRST_TO_) { - PRINTK(dev, "chip reset timeout, retrying...\n"); - resets++; - break; - } - } while (--timeout && (reg & HW_CFG_SRST_)); - } - if (timeout == 0) { - PRINTK(dev, "smc911x_reset timeout waiting for reset\n"); - return; - } - - /* make sure EEPROM has finished loading before setting GPIO_CFG */ - timeout=1000; - while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_)) - udelay(10); - - if (timeout == 0){ - PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n"); - return; - } - - /* Initialize interrupts */ - SMC_SET_INT_EN(lp, 0); - SMC_ACK_INT(lp, -1); - - /* Reset the FIFO level and flow control settings */ - SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16); -//TODO: Figure out what appropriate pause time is - SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_); - SMC_SET_AFC_CFG(lp, lp->afc_cfg); - - - /* Set to LED outputs */ - SMC_SET_GPIO_CFG(lp, 0x70070000); - - /* - * Deassert IRQ for 1*10us for edge type interrupts - * and drive IRQ pin push-pull - */ - irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_; -#ifdef SMC_DYNAMIC_BUS_CONFIG - if (lp->cfg.irq_polarity) - irq_cfg |= INT_CFG_IRQ_POL_; -#endif - SMC_SET_IRQ_CFG(lp, irq_cfg); - - /* clear anything saved */ - if (lp->pending_tx_skb != NULL) { - dev_kfree_skb (lp->pending_tx_skb); - lp->pending_tx_skb = NULL; - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - } -} - -/* - * Enable Interrupts, Receive, and Transmit - */ -static void smc911x_enable(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned mask, cfg, cr; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - spin_lock_irqsave(&lp->lock, flags); - - SMC_SET_MAC_ADDR(lp, dev->dev_addr); - - /* Enable TX */ - cfg = SMC_GET_HW_CFG(lp); - cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF; - cfg |= HW_CFG_SF_; - SMC_SET_HW_CFG(lp, cfg); - SMC_SET_FIFO_TDA(lp, 0xFF); - /* Update TX stats on every 64 packets received or every 1 sec */ - SMC_SET_FIFO_TSL(lp, 64); - SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); - - SMC_GET_MAC_CR(lp, cr); - cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; - SMC_SET_MAC_CR(lp, cr); - SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); - - /* Add 2 byte padding to start of packets */ - SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); - - /* Turn on receiver and enable RX */ - if (cr & MAC_CR_RXEN_) - DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n"); - - SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); - - /* Interrupt on every received packet */ - SMC_SET_FIFO_RSA(lp, 0x01); - SMC_SET_FIFO_RSL(lp, 0x00); - - /* now, enable interrupts */ - mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ | - INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ | - INT_EN_PHY_INT_EN_; - if (IS_REV_A(lp->revision)) - mask|=INT_EN_RDFL_EN_; - else { - mask|=INT_EN_RDFO_EN_; - } - SMC_ENABLE_INT(lp, mask); - - spin_unlock_irqrestore(&lp->lock, flags); -} - -/* - * this puts the device in an inactive state - */ -static void smc911x_shutdown(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned cr; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__); - - /* Disable IRQ's */ - SMC_SET_INT_EN(lp, 0); - - /* Turn of Rx and TX */ - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MAC_CR(lp, cr); - cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); - SMC_SET_MAC_CR(lp, cr); - SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_); - spin_unlock_irqrestore(&lp->lock, flags); -} - -static inline void smc911x_drop_pkt(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int fifo_count, timeout, reg; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n", - CARDNAME, __func__); - fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; - if (fifo_count <= 4) { - /* Manually dump the packet data */ - while (fifo_count--) - SMC_GET_RX_FIFO(lp); - } else { - /* Fast forward through the bad packet */ - SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_); - timeout=50; - do { - udelay(10); - reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_; - } while (--timeout && reg); - if (timeout == 0) { - PRINTK(dev, "timeout waiting for RX fast forward\n"); - } - } -} - -/* - * This is the procedure to handle the receipt of a packet. - * It should be called after checking for packet presence in - * the RX status FIFO. It must be called with the spin lock - * already held. - */ -static inline void smc911x_rcv(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int pkt_len, status; - struct sk_buff *skb; - unsigned char *data; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n", - __func__); - status = SMC_GET_RX_STS_FIFO(lp); - DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n", - (status & 0x3fff0000) >> 16, status & 0xc000ffff); - pkt_len = (status & RX_STS_PKT_LEN_) >> 16; - if (status & RX_STS_ES_) { - /* Deal with a bad packet */ - dev->stats.rx_errors++; - if (status & RX_STS_CRC_ERR_) - dev->stats.rx_crc_errors++; - else { - if (status & RX_STS_LEN_ERR_) - dev->stats.rx_length_errors++; - if (status & RX_STS_MCAST_) - dev->stats.multicast++; - } - /* Remove the bad packet data from the RX FIFO */ - smc911x_drop_pkt(dev); - } else { - /* Receive a valid packet */ - /* Alloc a buffer with extra room for DMA alignment */ - skb = netdev_alloc_skb(dev, pkt_len+32); - if (unlikely(skb == NULL)) { - PRINTK(dev, "Low memory, rcvd packet dropped.\n"); - dev->stats.rx_dropped++; - smc911x_drop_pkt(dev); - return; - } - /* Align IP header to 32 bits - * Note that the device is configured to add a 2 - * byte padding to the packet start, so we really - * want to write to the orignal data pointer */ - data = skb->data; - skb_reserve(skb, 2); - skb_put(skb,pkt_len-4); -#ifdef SMC_USE_DMA - { - unsigned int fifo; - /* Lower the FIFO threshold if possible */ - fifo = SMC_GET_FIFO_INT(lp); - if (fifo & 0xFF) fifo--; - DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n", - fifo & 0xff); - SMC_SET_FIFO_INT(lp, fifo); - /* Setup RX DMA */ - SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); - lp->rxdma_active = 1; - lp->current_rx_skb = skb; - SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15); - /* Packet processing deferred to DMA RX interrupt */ - } -#else - SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); - SMC_PULL_DATA(lp, data, pkt_len+2+3); - - DBG(SMC_DEBUG_PKTS, dev, "Received packet\n"); - PRINT_PKT(data, min(pkt_len - 4, 64U)); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len-4; -#endif - } -} - -/* - * This is called to actually send a packet to the chip. - */ -static void smc911x_hardware_send_pkt(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - struct sk_buff *skb; - unsigned int cmdA, cmdB, len; - unsigned char *buf; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__); - BUG_ON(lp->pending_tx_skb == NULL); - - skb = lp->pending_tx_skb; - lp->pending_tx_skb = NULL; - - /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */ - /* cmdB {31:16] pkt tag [10:0] length */ -#ifdef SMC_USE_DMA - /* 16 byte buffer alignment mode */ - buf = (char*)((u32)(skb->data) & ~0xF); - len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF; - cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) | - TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | - skb->len; -#else - buf = (char *)((uintptr_t)skb->data & ~0x3); - len = (skb->len + 3 + ((uintptr_t)skb->data & 3)) & ~0x3; - cmdA = (((uintptr_t)skb->data & 0x3) << 16) | - TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | - skb->len; -#endif - /* tag is packet length so we can use this in stats update later */ - cmdB = (skb->len << 16) | (skb->len & 0x7FF); - - DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", - len, len, buf, cmdA, cmdB); - SMC_SET_TX_FIFO(lp, cmdA); - SMC_SET_TX_FIFO(lp, cmdB); - - DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n"); - PRINT_PKT(buf, min(len, 64U)); - - /* Send pkt via PIO or DMA */ -#ifdef SMC_USE_DMA - lp->current_tx_skb = skb; - SMC_PUSH_DATA(lp, buf, len); - /* DMA complete IRQ will free buffer and set jiffies */ -#else - SMC_PUSH_DATA(lp, buf, len); - netif_trans_update(dev); - dev_kfree_skb_irq(skb); -#endif - if (!lp->tx_throttle) { - netif_wake_queue(dev); - } - SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); -} - -/* - * Since I am not sure if I will have enough room in the chip's ram - * to store the packet, I call this routine which either sends it - * now, or set the card to generates an interrupt when ready - * for the packet. - */ -static netdev_tx_t -smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int free; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", - __func__); - - spin_lock_irqsave(&lp->lock, flags); - - BUG_ON(lp->pending_tx_skb != NULL); - - free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; - DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free); - - /* Turn off the flow when running out of space in FIFO */ - if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { - DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n", - free); - /* Reenable when at least 1 packet of size MTU present */ - SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); - lp->tx_throttle = 1; - netif_stop_queue(dev); - } - - /* Drop packets when we run out of space in TX FIFO - * Account for overhead required for: - * - * Tx command words 8 bytes - * Start offset 15 bytes - * End padding 15 bytes - */ - if (unlikely(free < (skb->len + 8 + 15 + 15))) { - netdev_warn(dev, "No Tx free space %d < %d\n", - free, skb->len); - lp->pending_tx_skb = NULL; - dev->stats.tx_errors++; - dev->stats.tx_dropped++; - spin_unlock_irqrestore(&lp->lock, flags); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - -#ifdef SMC_USE_DMA - { - /* If the DMA is already running then defer this packet Tx until - * the DMA IRQ starts it - */ - if (lp->txdma_active) { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n"); - lp->pending_tx_skb = skb; - netif_stop_queue(dev); - spin_unlock_irqrestore(&lp->lock, flags); - return NETDEV_TX_OK; - } else { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n"); - lp->txdma_active = 1; - } - } -#endif - lp->pending_tx_skb = skb; - smc911x_hardware_send_pkt(dev); - spin_unlock_irqrestore(&lp->lock, flags); - - return NETDEV_TX_OK; -} - -/* - * This handles a TX status interrupt, which is only called when: - * - a TX error occurred, or - * - TX of a packet completed. - */ -static void smc911x_tx(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int tx_status; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", - __func__); - - /* Collect the TX status */ - while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { - DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n", - (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16); - tx_status = SMC_GET_TX_STS_FIFO(lp); - dev->stats.tx_packets++; - dev->stats.tx_bytes+=tx_status>>16; - DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n", - (tx_status & 0xffff0000) >> 16, - tx_status & 0x0000ffff); - /* count Tx errors, but ignore lost carrier errors when in - * full-duplex mode */ - if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && - !(tx_status & 0x00000306))) { - dev->stats.tx_errors++; - } - if (tx_status & TX_STS_MANY_COLL_) { - dev->stats.collisions+=16; - dev->stats.tx_aborted_errors++; - } else { - dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; - } - /* carrier error only has meaning for half-duplex communication */ - if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && - !lp->ctl_rfduplx) { - dev->stats.tx_carrier_errors++; - } - if (tx_status & TX_STS_LATE_COLL_) { - dev->stats.collisions++; - dev->stats.tx_aborted_errors++; - } - } -} - - -/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ -/* - * Reads a register from the MII Management serial interface - */ - -static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int phydata; - - SMC_GET_MII(lp, phyreg, phyaddr, phydata); - - DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", - __func__, phyaddr, phyreg, phydata); - return phydata; -} - - -/* - * Writes a register to the MII Management serial interface - */ -static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, - int phydata) -{ - struct smc911x_local *lp = netdev_priv(dev); - - DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", - __func__, phyaddr, phyreg, phydata); - - SMC_SET_MII(lp, phyreg, phyaddr, phydata); -} - -/* - * Finds and reports the PHY address (115 and 117 have external - * PHY interface 118 has internal only - */ -static void smc911x_phy_detect(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr; - unsigned int cfg, id1, id2; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - lp->phy_type = 0; - - /* - * Scan all 32 PHY addresses if necessary, starting at - * PHY#1 to PHY#31, and then PHY#0 last. - */ - switch(lp->version) { - case CHIP_9115: - case CHIP_9117: - case CHIP_9215: - case CHIP_9217: - cfg = SMC_GET_HW_CFG(lp); - if (cfg & HW_CFG_EXT_PHY_DET_) { - cfg &= ~HW_CFG_PHY_CLK_SEL_; - cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; - SMC_SET_HW_CFG(lp, cfg); - udelay(10); /* Wait for clocks to stop */ - - cfg |= HW_CFG_EXT_PHY_EN_; - SMC_SET_HW_CFG(lp, cfg); - udelay(10); /* Wait for clocks to stop */ - - cfg &= ~HW_CFG_PHY_CLK_SEL_; - cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; - SMC_SET_HW_CFG(lp, cfg); - udelay(10); /* Wait for clocks to stop */ - - cfg |= HW_CFG_SMI_SEL_; - SMC_SET_HW_CFG(lp, cfg); - - for (phyaddr = 1; phyaddr < 32; ++phyaddr) { - - /* Read the PHY identifiers */ - SMC_GET_PHY_ID1(lp, phyaddr & 31, id1); - SMC_GET_PHY_ID2(lp, phyaddr & 31, id2); - - /* Make sure it is a valid identifier */ - if (id1 != 0x0000 && id1 != 0xffff && - id1 != 0x8000 && id2 != 0x0000 && - id2 != 0xffff && id2 != 0x8000) { - /* Save the PHY's address */ - lp->mii.phy_id = phyaddr & 31; - lp->phy_type = id1 << 16 | id2; - break; - } - } - if (phyaddr < 32) - /* Found an external PHY */ - break; - } - fallthrough; - default: - /* Internal media only */ - SMC_GET_PHY_ID1(lp, 1, id1); - SMC_GET_PHY_ID2(lp, 1, id2); - /* Save the PHY's address */ - lp->mii.phy_id = 1; - lp->phy_type = id1 << 16 | id2; - } - - DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%x\n", - id1, id2, lp->mii.phy_id); -} - -/* - * Sets the PHY to a configuration as determined by the user. - * Called with spin_lock held. - */ -static int smc911x_phy_fixed(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr = lp->mii.phy_id; - int bmcr; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - /* Enter Link Disable state */ - SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); - bmcr |= BMCR_PDOWN; - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - - /* - * Set our fixed capabilities - * Disable auto-negotiation - */ - bmcr &= ~BMCR_ANENABLE; - if (lp->ctl_rfduplx) - bmcr |= BMCR_FULLDPLX; - - if (lp->ctl_rspeed == 100) - bmcr |= BMCR_SPEED100; - - /* Write our capabilities to the phy control register */ - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - - /* Re-Configure the Receive/Phy Control register */ - bmcr &= ~BMCR_PDOWN; - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - - return 1; -} - -/** - * smc911x_phy_reset - reset the phy - * @dev: net device - * @phy: phy address - * - * Issue a software reset for the specified PHY and - * wait up to 100ms for the reset to complete. We should - * not access the PHY for 50ms after issuing the reset. - * - * The time to wait appears to be dependent on the PHY. - * - */ -static int smc911x_phy_reset(struct net_device *dev, int phy) -{ - struct smc911x_local *lp = netdev_priv(dev); - int timeout; - unsigned long flags; - unsigned int reg; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); - - spin_lock_irqsave(&lp->lock, flags); - reg = SMC_GET_PMT_CTRL(lp); - reg &= ~0xfffff030; - reg |= PMT_CTRL_PHY_RST_; - SMC_SET_PMT_CTRL(lp, reg); - spin_unlock_irqrestore(&lp->lock, flags); - for (timeout = 2; timeout; timeout--) { - msleep(50); - spin_lock_irqsave(&lp->lock, flags); - reg = SMC_GET_PMT_CTRL(lp); - spin_unlock_irqrestore(&lp->lock, flags); - if (!(reg & PMT_CTRL_PHY_RST_)) { - /* extra delay required because the phy may - * not be completed with its reset - * when PHY_BCR_RESET_ is cleared. 256us - * should suffice, but use 500us to be safe - */ - udelay(500); - break; - } - } - - return reg & PMT_CTRL_PHY_RST_; -} - -/** - * smc911x_phy_powerdown - powerdown phy - * @dev: net device - * @phy: phy address - * - * Power down the specified PHY - */ -static void smc911x_phy_powerdown(struct net_device *dev, int phy) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int bmcr; - - /* Enter Link Disable state */ - SMC_GET_PHY_BMCR(lp, phy, bmcr); - bmcr |= BMCR_PDOWN; - SMC_SET_PHY_BMCR(lp, phy, bmcr); -} - -/** - * smc911x_phy_check_media - check the media status and adjust BMCR - * @dev: net device - * @init: set true for initialisation - * - * Select duplex mode depending on negotiation state. This - * also updates our carrier state. - */ -static void smc911x_phy_check_media(struct net_device *dev, int init) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr = lp->mii.phy_id; - unsigned int bmcr, cr; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { - /* duplex state has changed */ - SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); - SMC_GET_MAC_CR(lp, cr); - if (lp->mii.full_duplex) { - DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n"); - bmcr |= BMCR_FULLDPLX; - cr |= MAC_CR_RCVOWN_; - } else { - DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n"); - bmcr &= ~BMCR_FULLDPLX; - cr &= ~MAC_CR_RCVOWN_; - } - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - SMC_SET_MAC_CR(lp, cr); - } -} - -/* - * Configures the specified PHY through the MII management interface - * using Autonegotiation. - * Calls smc911x_phy_fixed() if the user has requested a certain config. - * If RPC ANEG bit is set, the media selection is dependent purely on - * the selection by the MII (either in the MII BMCR reg or the result - * of autonegotiation.) If the RPC ANEG bit is cleared, the selection - * is controlled by the RPC SPEED and RPC DPLX bits. - */ -static void smc911x_phy_configure(struct work_struct *work) -{ - struct smc911x_local *lp = container_of(work, struct smc911x_local, - phy_configure); - struct net_device *dev = lp->netdev; - int phyaddr = lp->mii.phy_id; - int my_phy_caps; /* My PHY capabilities */ - int my_ad_caps; /* My Advertised capabilities */ - int status __always_unused; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); - - /* - * We should not be called if phy_type is zero. - */ - if (lp->phy_type == 0) - return; - - if (smc911x_phy_reset(dev, phyaddr)) { - netdev_info(dev, "PHY reset timed out\n"); - return; - } - spin_lock_irqsave(&lp->lock, flags); - - /* - * Enable PHY Interrupts (for register 18) - * Interrupts listed here are enabled - */ - SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ | - PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ | - PHY_INT_MASK_LINK_DOWN_); - - /* If the user requested no auto neg, then go set his request */ - if (lp->mii.force_media) { - smc911x_phy_fixed(dev); - goto smc911x_phy_configure_exit; - } - - /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ - SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps); - if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { - netdev_info(dev, "Auto negotiation NOT supported\n"); - smc911x_phy_fixed(dev); - goto smc911x_phy_configure_exit; - } - - /* CSMA capable w/ both pauses */ - my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - - if (my_phy_caps & BMSR_100BASE4) - my_ad_caps |= ADVERTISE_100BASE4; - if (my_phy_caps & BMSR_100FULL) - my_ad_caps |= ADVERTISE_100FULL; - if (my_phy_caps & BMSR_100HALF) - my_ad_caps |= ADVERTISE_100HALF; - if (my_phy_caps & BMSR_10FULL) - my_ad_caps |= ADVERTISE_10FULL; - if (my_phy_caps & BMSR_10HALF) - my_ad_caps |= ADVERTISE_10HALF; - - /* Disable capabilities not selected by our user */ - if (lp->ctl_rspeed != 100) - my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); - - if (!lp->ctl_rfduplx) - my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); - - /* Update our Auto-Neg Advertisement Register */ - SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps); - lp->mii.advertising = my_ad_caps; - - /* - * Read the register back. Without this, it appears that when - * auto-negotiation is restarted, sometimes it isn't ready and - * the link does not come up. - */ - udelay(10); - SMC_GET_PHY_MII_ADV(lp, phyaddr, status); - - DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps); - DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps); - - /* Restart auto-negotiation process in order to advertise my caps */ - SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); - - smc911x_phy_check_media(dev, 1); - -smc911x_phy_configure_exit: - spin_unlock_irqrestore(&lp->lock, flags); -} - -/* - * smc911x_phy_interrupt - * - * Purpose: Handle interrupts relating to PHY register 18. This is - * called from the "hard" interrupt handler under our private spinlock. - */ -static void smc911x_phy_interrupt(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr = lp->mii.phy_id; - int status __always_unused; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - if (lp->phy_type == 0) - return; - - smc911x_phy_check_media(dev, 0); - /* read to clear status bits */ - SMC_GET_PHY_INT_SRC(lp, phyaddr,status); - DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n", - status & 0xffff); - DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n", - SMC_GET_AFC_CFG(lp)); -} - -/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ - -/* - * This is the main routine of the driver, to handle the device when - * it needs some attention. - */ -static irqreturn_t smc911x_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct smc911x_local *lp = netdev_priv(dev); - unsigned int status, mask, timeout; - unsigned int rx_overrun=0, cr, pkts; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - spin_lock_irqsave(&lp->lock, flags); - - /* Spurious interrupt check */ - if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != - (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { - spin_unlock_irqrestore(&lp->lock, flags); - return IRQ_NONE; - } - - mask = SMC_GET_INT_EN(lp); - SMC_SET_INT_EN(lp, 0); - - /* set a timeout value, so I don't stay here forever */ - timeout = 8; - - - do { - status = SMC_GET_INT(lp); - - DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", - status, mask, status & ~mask); - - status &= mask; - if (!status) - break; - - /* Handle SW interrupt condition */ - if (status & INT_STS_SW_INT_) { - SMC_ACK_INT(lp, INT_STS_SW_INT_); - mask &= ~INT_EN_SW_INT_EN_; - } - /* Handle various error conditions */ - if (status & INT_STS_RXE_) { - SMC_ACK_INT(lp, INT_STS_RXE_); - dev->stats.rx_errors++; - } - if (status & INT_STS_RXDFH_INT_) { - SMC_ACK_INT(lp, INT_STS_RXDFH_INT_); - dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp); - } - /* Undocumented interrupt-what is the right thing to do here? */ - if (status & INT_STS_RXDF_INT_) { - SMC_ACK_INT(lp, INT_STS_RXDF_INT_); - } - - /* Rx Data FIFO exceeds set level */ - if (status & INT_STS_RDFL_) { - if (IS_REV_A(lp->revision)) { - rx_overrun=1; - SMC_GET_MAC_CR(lp, cr); - cr &= ~MAC_CR_RXEN_; - SMC_SET_MAC_CR(lp, cr); - DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - } - SMC_ACK_INT(lp, INT_STS_RDFL_); - } - if (status & INT_STS_RDFO_) { - if (!IS_REV_A(lp->revision)) { - SMC_GET_MAC_CR(lp, cr); - cr &= ~MAC_CR_RXEN_; - SMC_SET_MAC_CR(lp, cr); - rx_overrun=1; - DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - } - SMC_ACK_INT(lp, INT_STS_RDFO_); - } - /* Handle receive condition */ - if ((status & INT_STS_RSFL_) || rx_overrun) { - unsigned int fifo; - DBG(SMC_DEBUG_RX, dev, "RX irq\n"); - fifo = SMC_GET_RX_FIFO_INF(lp); - pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; - DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n", - pkts, fifo & 0xFFFF); - if (pkts != 0) { -#ifdef SMC_USE_DMA - unsigned int fifo; - if (lp->rxdma_active){ - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, - "RX DMA active\n"); - /* The DMA is already running so up the IRQ threshold */ - fifo = SMC_GET_FIFO_INT(lp) & ~0xFF; - fifo |= pkts & 0xFF; - DBG(SMC_DEBUG_RX, dev, - "Setting RX stat FIFO threshold to %d\n", - fifo & 0xff); - SMC_SET_FIFO_INT(lp, fifo); - } else -#endif - smc911x_rcv(dev); - } - SMC_ACK_INT(lp, INT_STS_RSFL_); - } - /* Handle transmit FIFO available */ - if (status & INT_STS_TDFA_) { - DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n"); - SMC_SET_FIFO_TDA(lp, 0xFF); - lp->tx_throttle = 0; -#ifdef SMC_USE_DMA - if (!lp->txdma_active) -#endif - netif_wake_queue(dev); - SMC_ACK_INT(lp, INT_STS_TDFA_); - } - /* Handle transmit done condition */ -#if 1 - if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { - DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev, - "Tx stat FIFO limit (%d) /GPT irq\n", - (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16); - smc911x_tx(dev); - SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); - SMC_ACK_INT(lp, INT_STS_TSFL_); - SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_); - } -#else - if (status & INT_STS_TSFL_) { - DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?); - smc911x_tx(dev); - SMC_ACK_INT(lp, INT_STS_TSFL_); - } - - if (status & INT_STS_GPT_INT_) { - DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", - SMC_GET_IRQ_CFG(lp), - SMC_GET_FIFO_INT(lp), - SMC_GET_RX_CFG(lp)); - DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", - (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16, - SMC_GET_RX_FIFO_INF(lp) & 0xffff, - SMC_GET_RX_STS_FIFO_PEEK(lp)); - SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); - SMC_ACK_INT(lp, INT_STS_GPT_INT_); - } -#endif - - /* Handle PHY interrupt condition */ - if (status & INT_STS_PHY_INT_) { - DBG(SMC_DEBUG_MISC, dev, "PHY irq\n"); - smc911x_phy_interrupt(dev); - SMC_ACK_INT(lp, INT_STS_PHY_INT_); - } - } while (--timeout); - - /* restore mask state */ - SMC_SET_INT_EN(lp, mask); - - DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n", - 8-timeout); - - spin_unlock_irqrestore(&lp->lock, flags); - - return IRQ_HANDLED; -} - -#ifdef SMC_USE_DMA -static void -smc911x_tx_dma_irq(void *data) -{ - struct smc911x_local *lp = data; - struct net_device *dev = lp->netdev; - struct sk_buff *skb = lp->current_tx_skb; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); - BUG_ON(skb == NULL); - dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); - netif_trans_update(dev); - dev_kfree_skb_irq(skb); - lp->current_tx_skb = NULL; - if (lp->pending_tx_skb != NULL) - smc911x_hardware_send_pkt(dev); - else { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, - "No pending Tx packets. DMA disabled\n"); - spin_lock_irqsave(&lp->lock, flags); - lp->txdma_active = 0; - if (!lp->tx_throttle) { - netif_wake_queue(dev); - } - spin_unlock_irqrestore(&lp->lock, flags); - } - - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, - "TX DMA irq completed\n"); -} -static void -smc911x_rx_dma_irq(void *data) -{ - struct smc911x_local *lp = data; - struct net_device *dev = lp->netdev; - struct sk_buff *skb = lp->current_rx_skb; - unsigned long flags; - unsigned int pkts; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); - dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); - BUG_ON(skb == NULL); - lp->current_rx_skb = NULL; - PRINT_PKT(skb->data, skb->len); - skb->protocol = eth_type_trans(skb, dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - netif_rx(skb); - - spin_lock_irqsave(&lp->lock, flags); - pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16; - if (pkts != 0) { - smc911x_rcv(dev); - }else { - lp->rxdma_active = 0; - } - spin_unlock_irqrestore(&lp->lock, flags); - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, - "RX DMA irq completed. DMA RX FIFO PKTS %d\n", - pkts); -} -#endif /* SMC_USE_DMA */ - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools - * to allow network i/o with interrupts disabled. - */ -static void smc911x_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - smc911x_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -/* Our watchdog timed out. Called by the networking layer */ -static void smc911x_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct smc911x_local *lp = netdev_priv(dev); - int status, mask; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - spin_lock_irqsave(&lp->lock, flags); - status = SMC_GET_INT(lp); - mask = SMC_GET_INT_EN(lp); - spin_unlock_irqrestore(&lp->lock, flags); - DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n", - status, mask); - - /* Dump the current TX FIFO contents and restart */ - mask = SMC_GET_TX_CFG(lp); - SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_); - /* - * Reconfiguring the PHY doesn't seem like a bad idea here, but - * smc911x_phy_configure() calls msleep() which calls schedule_timeout() - * which calls schedule(). Hence we use a work queue. - */ - if (lp->phy_type != 0) - schedule_work(&lp->phy_configure); - - /* We can accept TX packets again */ - netif_trans_update(dev); /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* - * This routine will, depending on the values passed to it, - * either make it accept multicast packets, go into - * promiscuous mode (for TCPDUMP and cousins) or accept - * a select set of multicast packets - */ -static void smc911x_set_multicast_list(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int multicast_table[2]; - unsigned int mcr, update_multicast = 0; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MAC_CR(lp, mcr); - spin_unlock_irqrestore(&lp->lock, flags); - - if (dev->flags & IFF_PROMISC) { - - DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n"); - mcr |= MAC_CR_PRMS_; - } - /* - * Here, I am setting this to accept all multicast packets. - * I don't need to zero the multicast table, because the flag is - * checked before the table is - */ - else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { - DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n"); - mcr |= MAC_CR_MCPAS_; - } - - /* - * This sets the internal hardware table to filter out unwanted - * multicast packets before they take up memory. - * - * The SMC chip uses a hash table where the high 6 bits of the CRC of - * address are the offset into the table. If that bit is 1, then the - * multicast packet is accepted. Otherwise, it's dropped silently. - * - * To use the 6 bits as an offset into the table, the high 1 bit is - * the number of the 32 bit register, while the low 5 bits are the bit - * within that register. - */ - else if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - - /* Set the Hash perfec mode */ - mcr |= MAC_CR_HPFILT_; - - /* start with a table of all zeros: reject all */ - memset(multicast_table, 0, sizeof(multicast_table)); - - netdev_for_each_mc_addr(ha, dev) { - u32 position; - - /* upper 6 bits are used as hash index */ - position = ether_crc(ETH_ALEN, ha->addr)>>26; - - multicast_table[position>>5] |= 1 << (position&0x1f); - } - - /* be sure I get rid of flags I might have set */ - mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); - - /* now, the table can be loaded into the chipset */ - update_multicast = 1; - } else { - DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n"); - mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); - - /* - * since I'm disabling all multicast entirely, I need to - * clear the multicast list - */ - memset(multicast_table, 0, sizeof(multicast_table)); - update_multicast = 1; - } - - spin_lock_irqsave(&lp->lock, flags); - SMC_SET_MAC_CR(lp, mcr); - if (update_multicast) { - DBG(SMC_DEBUG_MISC, dev, - "update mcast hash table 0x%08x 0x%08x\n", - multicast_table[0], multicast_table[1]); - SMC_SET_HASHL(lp, multicast_table[0]); - SMC_SET_HASHH(lp, multicast_table[1]); - } - spin_unlock_irqrestore(&lp->lock, flags); -} - - -/* - * Open and Initialize the board - * - * Set up everything, reset the card, etc.. - */ -static int -smc911x_open(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - /* reset the hardware */ - smc911x_reset(dev); - - /* Configure the PHY, initialize the link state */ - smc911x_phy_configure(&lp->phy_configure); - - /* Turn on Tx + Rx */ - smc911x_enable(dev); - - netif_start_queue(dev); - - return 0; -} - -/* - * smc911x_close - * - * this makes the board clean up everything that it can - * and not talk to the outside world. Caused by - * an 'ifconfig ethX down' - */ -static int smc911x_close(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - netif_stop_queue(dev); - netif_carrier_off(dev); - - /* clear everything */ - smc911x_shutdown(dev); - - if (lp->phy_type != 0) { - /* We need to ensure that no calls to - * smc911x_phy_configure are pending. - */ - cancel_work_sync(&lp->phy_configure); - smc911x_phy_powerdown(dev, lp->mii.phy_id); - } - - if (lp->pending_tx_skb) { - dev_kfree_skb(lp->pending_tx_skb); - lp->pending_tx_skb = NULL; - } - - return 0; -} - -/* - * Ethtool support - */ -static int -smc911x_ethtool_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct smc911x_local *lp = netdev_priv(dev); - int status; - unsigned long flags; - u32 supported; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - if (lp->phy_type != 0) { - spin_lock_irqsave(&lp->lock, flags); - mii_ethtool_get_link_ksettings(&lp->mii, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - } else { - supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_TP | SUPPORTED_AUI; - - if (lp->ctl_rspeed == 10) - cmd->base.speed = SPEED_10; - else if (lp->ctl_rspeed == 100) - cmd->base.speed = SPEED_100; - - cmd->base.autoneg = AUTONEG_DISABLE; - cmd->base.port = 0; - SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status); - cmd->base.duplex = - (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? - DUPLEX_FULL : DUPLEX_HALF; - - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.supported, supported); - - } - - return 0; -} - -static int -smc911x_ethtool_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - unsigned long flags; - - if (lp->phy_type != 0) { - spin_lock_irqsave(&lp->lock, flags); - ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - } else { - if (cmd->base.autoneg != AUTONEG_DISABLE || - cmd->base.speed != SPEED_10 || - (cmd->base.duplex != DUPLEX_HALF && - cmd->base.duplex != DUPLEX_FULL) || - (cmd->base.port != PORT_TP && - cmd->base.port != PORT_AUI)) - return -EINVAL; - - lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL; - - ret = 0; - } - - return ret; -} - -static void -smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strscpy(info->driver, CARDNAME, sizeof(info->driver)); - strscpy(info->version, version, sizeof(info->version)); - strscpy(info->bus_info, dev_name(dev->dev.parent), - sizeof(info->bus_info)); -} - -static int smc911x_ethtool_nwayreset(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret = -EINVAL; - unsigned long flags; - - if (lp->phy_type != 0) { - spin_lock_irqsave(&lp->lock, flags); - ret = mii_nway_restart(&lp->mii); - spin_unlock_irqrestore(&lp->lock, flags); - } - - return ret; -} - -static u32 smc911x_ethtool_getmsglevel(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - return lp->msg_enable; -} - -static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) -{ - struct smc911x_local *lp = netdev_priv(dev); - lp->msg_enable = level; -} - -static int smc911x_ethtool_getregslen(struct net_device *dev) -{ - /* System regs + MAC regs + PHY regs */ - return (((E2P_CMD - ID_REV)/4 + 1) + - (WUCSR - MAC_CR)+1 + 32) * sizeof(u32); -} - -static void smc911x_ethtool_getregs(struct net_device *dev, - struct ethtool_regs *regs, void *buf) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned long flags; - u32 reg,i,j=0; - u32 *data = (u32*)buf; - - regs->version = lp->version; - for(i=ID_REV;i<=E2P_CMD;i+=4) { - data[j++] = SMC_inl(lp, i); - } - for(i=MAC_CR;i<=WUCSR;i++) { - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MAC_CSR(lp, i, reg); - spin_unlock_irqrestore(&lp->lock, flags); - data[j++] = reg; - } - for(i=0;i<=31;i++) { - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MII(lp, i, lp->mii.phy_id, reg); - spin_unlock_irqrestore(&lp->lock, flags); - data[j++] = reg & 0xFFFF; - } -} - -static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int timeout; - int e2p_cmd; - - e2p_cmd = SMC_GET_E2P_CMD(lp); - for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { - if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { - PRINTK(dev, "%s timeout waiting for EEPROM to respond\n", - __func__); - return -EFAULT; - } - mdelay(1); - e2p_cmd = SMC_GET_E2P_CMD(lp); - } - if (timeout == 0) { - PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n", - __func__); - return -ETIMEDOUT; - } - return 0; -} - -static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev, - int cmd, int addr) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - - if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) - return ret; - SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ | - ((cmd) & (0x7<<28)) | - ((addr) & 0xFF)); - return 0; -} - -static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev, - u8 *data) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - - if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) - return ret; - *data = SMC_GET_E2P_DATA(lp); - return 0; -} - -static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev, - u8 data) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - - if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) - return ret; - SMC_SET_E2P_DATA(lp, data); - return 0; -} - -static int smc911x_ethtool_geteeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - u8 eebuf[SMC911X_EEPROM_LEN]; - int i, ret; - - for(i=0;i<SMC911X_EEPROM_LEN;i++) { - if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0) - return ret; - if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0) - return ret; - } - memcpy(data, eebuf+eeprom->offset, eeprom->len); - return 0; -} - -static int smc911x_ethtool_seteeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - int i, ret; - - /* Enable erase */ - if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0) - return ret; - for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) { - /* erase byte */ - if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0) - return ret; - /* write byte */ - if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) - return ret; - if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) - return ret; - } - return 0; -} - -static int smc911x_ethtool_geteeprom_len(struct net_device *dev) -{ - return SMC911X_EEPROM_LEN; -} - -static const struct ethtool_ops smc911x_ethtool_ops = { - .get_drvinfo = smc911x_ethtool_getdrvinfo, - .get_msglevel = smc911x_ethtool_getmsglevel, - .set_msglevel = smc911x_ethtool_setmsglevel, - .nway_reset = smc911x_ethtool_nwayreset, - .get_link = ethtool_op_get_link, - .get_regs_len = smc911x_ethtool_getregslen, - .get_regs = smc911x_ethtool_getregs, - .get_eeprom_len = smc911x_ethtool_geteeprom_len, - .get_eeprom = smc911x_ethtool_geteeprom, - .set_eeprom = smc911x_ethtool_seteeprom, - .get_link_ksettings = smc911x_ethtool_get_link_ksettings, - .set_link_ksettings = smc911x_ethtool_set_link_ksettings, -}; - -/* - * smc911x_findirq - * - * This routine has a simple purpose -- make the SMC chip generate an - * interrupt, so an auto-detect routine can detect it, and find the IRQ, - */ -static int smc911x_findirq(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int timeout = 20; - unsigned long cookie; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - cookie = probe_irq_on(); - - /* - * Force a SW interrupt - */ - - SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_); - - /* - * Wait until positive that the interrupt has been generated - */ - do { - int int_status; - udelay(10); - int_status = SMC_GET_INT_EN(lp); - if (int_status & INT_EN_SW_INT_EN_) - break; /* got the interrupt */ - } while (--timeout); - - /* - * there is really nothing that I can do here if timeout fails, - * as autoirq_report will return a 0 anyway, which is what I - * want in this case. Plus, the clean up is needed in both - * cases. - */ - - /* and disable all interrupts again */ - SMC_SET_INT_EN(lp, 0); - - /* and return what I found */ - return probe_irq_off(cookie); -} - -static const struct net_device_ops smc911x_netdev_ops = { - .ndo_open = smc911x_open, - .ndo_stop = smc911x_close, - .ndo_start_xmit = smc911x_hard_start_xmit, - .ndo_tx_timeout = smc911x_timeout, - .ndo_set_rx_mode = smc911x_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = smc911x_poll_controller, -#endif -}; - -/* - * Function: smc911x_probe(unsigned long ioaddr) - * - * Purpose: - * Tests to see if a given ioaddr points to an SMC911x chip. - * Returns a 0 on success - * - * Algorithm: - * (1) see if the endian word is OK - * (1) see if I recognize the chip ID in the appropriate register - * - * Here I do typical initialization tasks. - * - * o Initialize the structure if needed - * o print out my vanity message if not done so already - * o print out what type of hardware is detected - * o print out the ethernet address - * o find the IRQ - * o set up my private data - * o configure the dev structure with my subroutines - * o actually GRAB the irq. - * o GRAB the region - */ -static int smc911x_probe(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int i, retval; - unsigned int val, chip_id, revision; - const char *version_string; - unsigned long irq_flags; -#ifdef SMC_USE_DMA - struct dma_slave_config config; - dma_cap_mask_t mask; -#endif - u8 addr[ETH_ALEN]; - - DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - - /* First, see if the endian word is recognized */ - val = SMC_GET_BYTE_TEST(lp); - DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n", - CARDNAME, val); - if (val != 0x87654321) { - netdev_err(dev, "Invalid chip endian 0x%08x\n", val); - retval = -ENODEV; - goto err_out; - } - - /* - * check if the revision register is something that I - * recognize. These might need to be added to later, - * as future revisions could be added. - */ - chip_id = SMC_GET_PN(lp); - DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n", - CARDNAME, chip_id); - for(i=0;chip_ids[i].id != 0; i++) { - if (chip_ids[i].id == chip_id) break; - } - if (!chip_ids[i].id) { - netdev_err(dev, "Unknown chip ID %04x\n", chip_id); - retval = -ENODEV; - goto err_out; - } - version_string = chip_ids[i].name; - - revision = SMC_GET_REV(lp); - DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision); - - /* At this point I'll assume that the chip is an SMC911x. */ - DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n", - CARDNAME, chip_ids[i].name); - - /* Validate the TX FIFO size requested */ - if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) { - netdev_err(dev, "Invalid TX FIFO size requested %d\n", - tx_fifo_kb); - retval = -EINVAL; - goto err_out; - } - - /* fill in some of the fields */ - lp->version = chip_ids[i].id; - lp->revision = revision; - lp->tx_fifo_kb = tx_fifo_kb; - /* Reverse calculate the RX FIFO size from the TX */ - lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512; - lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15; - - /* Set the automatic flow control values */ - switch(lp->tx_fifo_kb) { - /* - * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64 - * AFC_LO is AFC_HI/2 - * BACK_DUR is about 5uS*(AFC_LO) rounded down - */ - case 2:/* 13440 Rx Data Fifo Size */ - lp->afc_cfg=0x008C46AF;break; - case 3:/* 12480 Rx Data Fifo Size */ - lp->afc_cfg=0x0082419F;break; - case 4:/* 11520 Rx Data Fifo Size */ - lp->afc_cfg=0x00783C9F;break; - case 5:/* 10560 Rx Data Fifo Size */ - lp->afc_cfg=0x006E374F;break; - case 6:/* 9600 Rx Data Fifo Size */ - lp->afc_cfg=0x0064328F;break; - case 7:/* 8640 Rx Data Fifo Size */ - lp->afc_cfg=0x005A2D7F;break; - case 8:/* 7680 Rx Data Fifo Size */ - lp->afc_cfg=0x0050287F;break; - case 9:/* 6720 Rx Data Fifo Size */ - lp->afc_cfg=0x0046236F;break; - case 10:/* 5760 Rx Data Fifo Size */ - lp->afc_cfg=0x003C1E6F;break; - case 11:/* 4800 Rx Data Fifo Size */ - lp->afc_cfg=0x0032195F;break; - /* - * AFC_HI is ~1520 bytes less than RX Data Fifo Size - * AFC_LO is AFC_HI/2 - * BACK_DUR is about 5uS*(AFC_LO) rounded down - */ - case 12:/* 3840 Rx Data Fifo Size */ - lp->afc_cfg=0x0024124F;break; - case 13:/* 2880 Rx Data Fifo Size */ - lp->afc_cfg=0x0015073F;break; - case 14:/* 1920 Rx Data Fifo Size */ - lp->afc_cfg=0x0006032F;break; - default: - PRINTK(dev, "ERROR -- no AFC_CFG setting found"); - break; - } - - DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev, - "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, - lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); - - spin_lock_init(&lp->lock); - - /* Get the MAC address */ - SMC_GET_MAC_ADDR(lp, addr); - eth_hw_addr_set(dev, addr); - - /* now, reset the chip, and put it into a known state */ - smc911x_reset(dev); - - /* - * If dev->irq is 0, then the device has to be banged on to see - * what the IRQ is. - * - * Specifying an IRQ is done with the assumption that the user knows - * what (s)he is doing. No checking is done!!!! - */ - if (dev->irq < 1) { - int trials; - - trials = 3; - while (trials--) { - dev->irq = smc911x_findirq(dev); - if (dev->irq) - break; - /* kick the card and try again */ - smc911x_reset(dev); - } - } - if (dev->irq == 0) { - netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n"); - retval = -ENODEV; - goto err_out; - } - dev->irq = irq_canonicalize(dev->irq); - - dev->netdev_ops = &smc911x_netdev_ops; - dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->ethtool_ops = &smc911x_ethtool_ops; - - INIT_WORK(&lp->phy_configure, smc911x_phy_configure); - lp->mii.phy_id_mask = 0x1f; - lp->mii.reg_num_mask = 0x1f; - lp->mii.force_media = 0; - lp->mii.full_duplex = 0; - lp->mii.dev = dev; - lp->mii.mdio_read = smc911x_phy_read; - lp->mii.mdio_write = smc911x_phy_write; - - /* - * Locate the phy, if any. - */ - smc911x_phy_detect(dev); - - /* Set default parameters */ - lp->msg_enable = NETIF_MSG_LINK; - lp->ctl_rfduplx = 1; - lp->ctl_rspeed = 100; - -#ifdef SMC_DYNAMIC_BUS_CONFIG - irq_flags = lp->cfg.irq_flags; -#else - irq_flags = IRQF_SHARED | SMC_IRQ_SENSE; -#endif - - /* Grab the IRQ */ - retval = request_irq(dev->irq, smc911x_interrupt, - irq_flags, dev->name, dev); - if (retval) - goto err_out; - -#ifdef SMC_USE_DMA - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - lp->rxdma = dma_request_channel(mask, NULL, NULL); - lp->txdma = dma_request_channel(mask, NULL, NULL); - lp->rxdma_active = 0; - lp->txdma_active = 0; - - memset(&config, 0, sizeof(config)); - config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - config.src_addr = lp->physaddr + RX_DATA_FIFO; - config.dst_addr = lp->physaddr + TX_DATA_FIFO; - config.src_maxburst = 32; - config.dst_maxburst = 32; - retval = dmaengine_slave_config(lp->rxdma, &config); - if (retval) { - dev_err(lp->dev, "dma rx channel configuration failed: %d\n", - retval); - goto err_out; - } - retval = dmaengine_slave_config(lp->txdma, &config); - if (retval) { - dev_err(lp->dev, "dma tx channel configuration failed: %d\n", - retval); - goto err_out; - } -#endif - - retval = register_netdev(dev); - if (retval == 0) { - /* now, print out the card info, in a short format.. */ - netdev_info(dev, "%s (rev %d) at %#lx IRQ %d", - version_string, lp->revision, - dev->base_addr, dev->irq); - -#ifdef SMC_USE_DMA - if (lp->rxdma) - pr_cont(" RXDMA %p", lp->rxdma); - - if (lp->txdma) - pr_cont(" TXDMA %p", lp->txdma); -#endif - pr_cont("\n"); - if (!is_valid_ether_addr(dev->dev_addr)) { - netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n"); - } else { - /* Print the Ethernet address */ - netdev_info(dev, "Ethernet addr: %pM\n", - dev->dev_addr); - } - - if (lp->phy_type == 0) { - PRINTK(dev, "No PHY found\n"); - } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) { - PRINTK(dev, "LAN911x Internal PHY\n"); - } else { - PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type); - } - } - -err_out: -#ifdef SMC_USE_DMA - if (retval) { - if (lp->rxdma) - dma_release_channel(lp->rxdma); - if (lp->txdma) - dma_release_channel(lp->txdma); - } -#endif - return retval; -} - -/* - * smc911x_drv_probe(void) - * - * Output: - * 0 --> there is a device - * anything else, error - */ -static int smc911x_drv_probe(struct platform_device *pdev) -{ - struct net_device *ndev; - struct resource *res; - struct smc911x_local *lp; - void __iomem *addr; - int ret; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENODEV; - goto out; - } - - /* - * Request the regions. - */ - if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) { - ret = -EBUSY; - goto out; - } - - ndev = alloc_etherdev(sizeof(struct smc911x_local)); - if (!ndev) { - ret = -ENOMEM; - goto release_1; - } - SET_NETDEV_DEV(ndev, &pdev->dev); - - ndev->dma = (unsigned char)-1; - ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq < 0) { - ret = ndev->irq; - goto release_both; - } - - lp = netdev_priv(ndev); - lp->netdev = ndev; -#ifdef SMC_DYNAMIC_BUS_CONFIG - { - struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev); - if (!pd) { - ret = -EINVAL; - goto release_both; - } - memcpy(&lp->cfg, pd, sizeof(lp->cfg)); - } -#endif - - addr = ioremap(res->start, SMC911X_IO_EXTENT); - if (!addr) { - ret = -ENOMEM; - goto release_both; - } - - platform_set_drvdata(pdev, ndev); - lp->base = addr; - ndev->base_addr = res->start; - ret = smc911x_probe(ndev); - if (ret != 0) { - iounmap(addr); -release_both: - free_netdev(ndev); -release_1: - release_mem_region(res->start, SMC911X_IO_EXTENT); -out: - pr_info("%s: not found (%d).\n", CARDNAME, ret); - } -#ifdef SMC_USE_DMA - else { - lp->physaddr = res->start; - lp->dev = &pdev->dev; - } -#endif - - return ret; -} - -static int smc911x_drv_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smc911x_local *lp = netdev_priv(ndev); - struct resource *res; - - DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); - - unregister_netdev(ndev); - - free_irq(ndev->irq, ndev); - -#ifdef SMC_USE_DMA - { - if (lp->rxdma) - dma_release_channel(lp->rxdma); - if (lp->txdma) - dma_release_channel(lp->txdma); - } -#endif - iounmap(lp->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, SMC911X_IO_EXTENT); - - free_netdev(ndev); - return 0; -} - -static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) -{ - struct net_device *ndev = platform_get_drvdata(dev); - struct smc911x_local *lp = netdev_priv(ndev); - - DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); - if (ndev) { - if (netif_running(ndev)) { - netif_device_detach(ndev); - smc911x_shutdown(ndev); -#if POWER_DOWN - /* Set D2 - Energy detect only setting */ - SMC_SET_PMT_CTRL(lp, 2<<12); -#endif - } - } - return 0; -} - -static int smc911x_drv_resume(struct platform_device *dev) -{ - struct net_device *ndev = platform_get_drvdata(dev); - - DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); - if (ndev) { - struct smc911x_local *lp = netdev_priv(ndev); - - if (netif_running(ndev)) { - smc911x_reset(ndev); - if (lp->phy_type != 0) - smc911x_phy_configure(&lp->phy_configure); - smc911x_enable(ndev); - netif_device_attach(ndev); - } - } - return 0; -} - -static struct platform_driver smc911x_driver = { - .probe = smc911x_drv_probe, - .remove = smc911x_drv_remove, - .suspend = smc911x_drv_suspend, - .resume = smc911x_drv_resume, - .driver = { - .name = CARDNAME, - }, -}; - -module_platform_driver(smc911x_driver); diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h deleted file mode 100644 index d4edcc0da87c..000000000000 --- a/drivers/net/ethernet/smsc/smc911x.h +++ /dev/null @@ -1,901 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/*------------------------------------------------------------------------ - . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device. - . - . Copyright (C) 2005 Sensoria Corp. - . Derived from the unified SMC91x driver by Nicolas Pitre - . - . - . Information contained in this file was obtained from the LAN9118 - . manual from SMC. To get a copy, if you really want one, you can find - . information under www.smsc.com. - . - . Authors - . Dustin McIntire <dustin@sensoria.com> - . - ---------------------------------------------------------------------------*/ -#ifndef _SMC911X_H_ -#define _SMC911X_H_ - -#include <linux/smc911x.h> -/* - * Use the DMA feature on PXA chips - */ -#ifdef CONFIG_ARCH_PXA - #define SMC_USE_PXA_DMA 1 - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING -#elif defined(CONFIG_SH_MAGIC_PANEL_R2) - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW -#elif defined(CONFIG_ARCH_OMAP3) - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW - #define SMC_MEM_RESERVED 1 -#elif defined(CONFIG_ARCH_OMAP2) - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW - #define SMC_MEM_RESERVED 1 -#else -/* - * Default configuration - */ - -#define SMC_DYNAMIC_BUS_CONFIG -#endif - -#ifdef SMC_USE_PXA_DMA -#define SMC_USE_DMA -#endif - -/* store this information for the driver.. */ -struct smc911x_local { - /* - * If I have to wait until the DMA is finished and ready to reload a - * packet, I will store the skbuff here. Then, the DMA will send it - * out and free it. - */ - struct sk_buff *pending_tx_skb; - - /* version/revision of the SMC911x chip */ - u16 version; - u16 revision; - - /* FIFO sizes */ - int tx_fifo_kb; - int tx_fifo_size; - int rx_fifo_size; - int afc_cfg; - - /* Contains the current active receive/phy mode */ - int ctl_rfduplx; - int ctl_rspeed; - - u32 msg_enable; - u32 phy_type; - struct mii_if_info mii; - - /* work queue */ - struct work_struct phy_configure; - - int tx_throttle; - spinlock_t lock; - - struct net_device *netdev; - -#ifdef SMC_USE_DMA - /* DMA needs the physical address of the chip */ - u_long physaddr; - struct dma_chan *rxdma; - struct dma_chan *txdma; - int rxdma_active; - int txdma_active; - struct sk_buff *current_rx_skb; - struct sk_buff *current_tx_skb; - struct device *dev; -#endif - void __iomem *base; -#ifdef SMC_DYNAMIC_BUS_CONFIG - struct smc911x_platdata cfg; -#endif -}; - -/* - * Define the bus width specific IO macros - */ - -#ifdef SMC_DYNAMIC_BUS_CONFIG -static inline unsigned int SMC_inl(struct smc911x_local *lp, int reg) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) - return readl(ioaddr); - - if (lp->cfg.flags & SMC911X_USE_16BIT) - return readw(ioaddr) | (readw(ioaddr + 2) << 16); - - BUG(); -} - -static inline void SMC_outl(unsigned int value, struct smc911x_local *lp, - int reg) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) { - writel(value, ioaddr); - return; - } - - if (lp->cfg.flags & SMC911X_USE_16BIT) { - writew(value & 0xffff, ioaddr); - writew(value >> 16, ioaddr + 2); - return; - } - - BUG(); -} - -static inline void SMC_insl(struct smc911x_local *lp, int reg, - void *addr, unsigned int count) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) { - ioread32_rep(ioaddr, addr, count); - return; - } - - if (lp->cfg.flags & SMC911X_USE_16BIT) { - ioread16_rep(ioaddr, addr, count * 2); - return; - } - - BUG(); -} - -static inline void SMC_outsl(struct smc911x_local *lp, int reg, - void *addr, unsigned int count) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) { - iowrite32_rep(ioaddr, addr, count); - return; - } - - if (lp->cfg.flags & SMC911X_USE_16BIT) { - iowrite16_rep(ioaddr, addr, count * 2); - return; - } - - BUG(); -} -#else -#if SMC_USE_16BIT -#define SMC_inl(lp, r) ((readw((lp)->base + (r)) & 0xFFFF) + (readw((lp)->base + (r) + 2) << 16)) -#define SMC_outl(v, lp, r) \ - do{ \ - writew(v & 0xFFFF, (lp)->base + (r)); \ - writew(v >> 16, (lp)->base + (r) + 2); \ - } while (0) -#define SMC_insl(lp, r, p, l) ioread16_rep((short*)((lp)->base + (r)), p, l*2) -#define SMC_outsl(lp, r, p, l) iowrite16_rep((short*)((lp)->base + (r)), p, l*2) - -#elif SMC_USE_32BIT -#define SMC_inl(lp, r) readl((lp)->base + (r)) -#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r)) -#define SMC_insl(lp, r, p, l) ioread32_rep((int*)((lp)->base + (r)), p, l) -#define SMC_outsl(lp, r, p, l) iowrite32_rep((int*)((lp)->base + (r)), p, l) - -#endif /* SMC_USE_16BIT */ -#endif /* SMC_DYNAMIC_BUS_CONFIG */ - - -#ifdef SMC_USE_PXA_DMA - -/* - * Use a DMA for RX and TX packets. - */ -#include <linux/dma-mapping.h> - -static dma_addr_t rx_dmabuf, tx_dmabuf; -static int rx_dmalen, tx_dmalen; -static void smc911x_rx_dma_irq(void *data); -static void smc911x_tx_dma_irq(void *data); - -#ifdef SMC_insl -#undef SMC_insl -#define SMC_insl(lp, r, p, l) \ - smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l) - -static inline void -smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, - int reg, struct dma_chan *dma, u_char *buf, int len) -{ - struct dma_async_tx_descriptor *tx; - - /* 64 bit alignment is required for memory to memory DMA */ - if ((long)buf & 4) { - *((u32 *)buf) = SMC_inl(lp, reg); - buf += 4; - len--; - } - - len *= 4; - rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE); - rx_dmalen = len; - tx = dmaengine_prep_slave_single(dma, rx_dmabuf, rx_dmalen, - DMA_DEV_TO_MEM, 0); - if (tx) { - tx->callback = smc911x_rx_dma_irq; - tx->callback_param = lp; - dmaengine_submit(tx); - dma_async_issue_pending(dma); - } -} -#endif - -#ifdef SMC_outsl -#undef SMC_outsl -#define SMC_outsl(lp, r, p, l) \ - smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l) - -static inline void -smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, - int reg, struct dma_chan *dma, u_char *buf, int len) -{ - struct dma_async_tx_descriptor *tx; - - /* 64 bit alignment is required for memory to memory DMA */ - if ((long)buf & 4) { - SMC_outl(*((u32 *)buf), lp, reg); - buf += 4; - len--; - } - - len *= 4; - tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE); - tx_dmalen = len; - tx = dmaengine_prep_slave_single(dma, tx_dmabuf, tx_dmalen, - DMA_DEV_TO_MEM, 0); - if (tx) { - tx->callback = smc911x_tx_dma_irq; - tx->callback_param = lp; - dmaengine_submit(tx); - dma_async_issue_pending(dma); - } -} -#endif -#endif /* SMC_USE_PXA_DMA */ - - -/* Chip Parameters and Register Definitions */ - -#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2) - -#define SMC911X_IO_EXTENT 0x100 - -#define SMC911X_EEPROM_LEN 7 - -/* Below are the register offsets and bit definitions - * of the Lan911x memory space - */ -#define RX_DATA_FIFO (0x00) - -#define TX_DATA_FIFO (0x20) -#define TX_CMD_A_INT_ON_COMP_ (0x80000000) -#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000) -#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000) -#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000) -#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000) -#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000) -#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000) -#define TX_CMD_A_INT_LAST_SEG_ (0x00001000) -#define TX_CMD_A_BUF_SIZE_ (0x000007FF) -#define TX_CMD_B_PKT_TAG_ (0xFFFF0000) -#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000) -#define TX_CMD_B_DISABLE_PADDING_ (0x00001000) -#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF) - -#define RX_STATUS_FIFO (0x40) -#define RX_STS_PKT_LEN_ (0x3FFF0000) -#define RX_STS_ES_ (0x00008000) -#define RX_STS_BCST_ (0x00002000) -#define RX_STS_LEN_ERR_ (0x00001000) -#define RX_STS_RUNT_ERR_ (0x00000800) -#define RX_STS_MCAST_ (0x00000400) -#define RX_STS_TOO_LONG_ (0x00000080) -#define RX_STS_COLL_ (0x00000040) -#define RX_STS_ETH_TYPE_ (0x00000020) -#define RX_STS_WDOG_TMT_ (0x00000010) -#define RX_STS_MII_ERR_ (0x00000008) -#define RX_STS_DRIBBLING_ (0x00000004) -#define RX_STS_CRC_ERR_ (0x00000002) -#define RX_STATUS_FIFO_PEEK (0x44) -#define TX_STATUS_FIFO (0x48) -#define TX_STS_TAG_ (0xFFFF0000) -#define TX_STS_ES_ (0x00008000) -#define TX_STS_LOC_ (0x00000800) -#define TX_STS_NO_CARR_ (0x00000400) -#define TX_STS_LATE_COLL_ (0x00000200) -#define TX_STS_MANY_COLL_ (0x00000100) -#define TX_STS_COLL_CNT_ (0x00000078) -#define TX_STS_MANY_DEFER_ (0x00000004) -#define TX_STS_UNDERRUN_ (0x00000002) -#define TX_STS_DEFERRED_ (0x00000001) -#define TX_STATUS_FIFO_PEEK (0x4C) -#define ID_REV (0x50) -#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */ -#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */ - -#define INT_CFG (0x54) -#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */ -#define INT_CFG_INT_DEAS_CLR_ (0x00004000) -#define INT_CFG_INT_DEAS_STS_ (0x00002000) -#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */ -#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */ -#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */ -#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */ - -#define INT_STS (0x58) -#define INT_STS_SW_INT_ (0x80000000) /* R/WC */ -#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */ -#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */ -#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */ -#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */ -#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */ -#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */ -#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */ -#define INT_STS_PHY_INT_ (0x00040000) /* RO */ -#define INT_STS_PME_INT_ (0x00020000) /* R/WC */ -#define INT_STS_TXSO_ (0x00010000) /* R/WC */ -#define INT_STS_RWT_ (0x00008000) /* R/WC */ -#define INT_STS_RXE_ (0x00004000) /* R/WC */ -#define INT_STS_TXE_ (0x00002000) /* R/WC */ -//#define INT_STS_ERX_ (0x00001000) /* R/WC */ -#define INT_STS_TDFU_ (0x00000800) /* R/WC */ -#define INT_STS_TDFO_ (0x00000400) /* R/WC */ -#define INT_STS_TDFA_ (0x00000200) /* R/WC */ -#define INT_STS_TSFF_ (0x00000100) /* R/WC */ -#define INT_STS_TSFL_ (0x00000080) /* R/WC */ -//#define INT_STS_RXDF_ (0x00000040) /* R/WC */ -#define INT_STS_RDFO_ (0x00000040) /* R/WC */ -#define INT_STS_RDFL_ (0x00000020) /* R/WC */ -#define INT_STS_RSFF_ (0x00000010) /* R/WC */ -#define INT_STS_RSFL_ (0x00000008) /* R/WC */ -#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */ -#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */ -#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */ - -#define INT_EN (0x5C) -#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */ -#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */ -#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */ -#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */ -//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */ -#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */ -#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */ -#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */ -#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */ -#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */ -#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */ -#define INT_EN_RWT_EN_ (0x00008000) /* R/W */ -#define INT_EN_RXE_EN_ (0x00004000) /* R/W */ -#define INT_EN_TXE_EN_ (0x00002000) /* R/W */ -//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */ -#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */ -#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */ -#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */ -#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */ -#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */ -//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */ -#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */ -#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */ -#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */ -#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */ -#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */ -#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */ -#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */ - -#define BYTE_TEST (0x64) -#define FIFO_INT (0x68) -#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */ -#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */ -#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */ -#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */ - -#define RX_CFG (0x6C) -#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */ -#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */ -#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */ -#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */ -#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */ -#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */ -#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */ -//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */ - -#define TX_CFG (0x70) -//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */ -//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */ -#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */ -#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */ -#define TX_CFG_TXSAO_ (0x00000004) /* R/W */ -#define TX_CFG_TX_ON_ (0x00000002) /* R/W */ -#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */ - -#define HW_CFG (0x74) -#define HW_CFG_TTM_ (0x00200000) /* R/W */ -#define HW_CFG_SF_ (0x00100000) /* R/W */ -#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */ -#define HW_CFG_TR_ (0x00003000) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */ -#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */ -#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */ -#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */ -#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */ -#define HW_CFG_SRST_TO_ (0x00000002) /* RO */ -#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */ - -#define RX_DP_CTRL (0x78) -#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */ -#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */ - -#define RX_FIFO_INF (0x7C) -#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */ -#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */ - -#define TX_FIFO_INF (0x80) -#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */ -#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */ - -#define PMT_CTRL (0x84) -#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */ -#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */ -#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */ -#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */ -#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */ -#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */ -#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */ -#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */ -#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */ -#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */ -#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */ -#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */ -#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */ -#define PMT_CTRL_READY_ (0x00000001) /* RO */ - -#define GPIO_CFG (0x88) -#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */ -#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */ -#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */ -#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */ -#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */ -#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */ -#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */ -#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */ -#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */ -#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */ -#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */ -#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */ -#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */ -#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */ -#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */ -#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */ -#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */ -#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */ - -#define GPT_CFG (0x8C) -#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */ -#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */ - -#define GPT_CNT (0x90) -#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */ - -#define ENDIAN (0x98) -#define FREE_RUN (0x9C) -#define RX_DROP (0xA0) -#define MAC_CSR_CMD (0xA4) -#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */ -#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */ -#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */ - -#define MAC_CSR_DATA (0xA8) -#define AFC_CFG (0xAC) -#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */ -#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */ -#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */ -#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */ -#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */ -#define AFC_CFG_FCADD_ (0x00000002) /* R/W */ -#define AFC_CFG_FCANY_ (0x00000001) /* R/W */ - -#define E2P_CMD (0xB0) -#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */ -#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */ -#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */ -#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */ -#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */ -#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */ -#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */ -#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */ -#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */ -#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */ -#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */ -#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */ -#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */ - -#define E2P_DATA (0xB4) -#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */ -/* end of LAN register offsets and bit definitions */ - -/* - **************************************************************************** - **************************************************************************** - * MAC Control and Status Register (Indirect Address) - * Offset (through the MAC_CSR CMD and DATA port) - **************************************************************************** - **************************************************************************** - * - */ -#define MAC_CR (0x01) /* R/W */ - -/* MAC_CR - MAC Control Register */ -#define MAC_CR_RXALL_ (0x80000000) -// TODO: delete this bit? It is not described in the data sheet. -#define MAC_CR_HBDIS_ (0x10000000) -#define MAC_CR_RCVOWN_ (0x00800000) -#define MAC_CR_LOOPBK_ (0x00200000) -#define MAC_CR_FDPX_ (0x00100000) -#define MAC_CR_MCPAS_ (0x00080000) -#define MAC_CR_PRMS_ (0x00040000) -#define MAC_CR_INVFILT_ (0x00020000) -#define MAC_CR_PASSBAD_ (0x00010000) -#define MAC_CR_HFILT_ (0x00008000) -#define MAC_CR_HPFILT_ (0x00002000) -#define MAC_CR_LCOLL_ (0x00001000) -#define MAC_CR_BCAST_ (0x00000800) -#define MAC_CR_DISRTY_ (0x00000400) -#define MAC_CR_PADSTR_ (0x00000100) -#define MAC_CR_BOLMT_MASK_ (0x000000C0) -#define MAC_CR_DFCHK_ (0x00000020) -#define MAC_CR_TXEN_ (0x00000008) -#define MAC_CR_RXEN_ (0x00000004) - -#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */ -#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */ -#define HASHH (0x04) /* R/W */ -#define HASHL (0x05) /* R/W */ - -#define MII_ACC (0x06) /* R/W */ -#define MII_ACC_PHY_ADDR_ (0x0000F800) -#define MII_ACC_MIIRINDA_ (0x000007C0) -#define MII_ACC_MII_WRITE_ (0x00000002) -#define MII_ACC_MII_BUSY_ (0x00000001) - -#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */ - -#define FLOW (0x08) /* R/W */ -#define FLOW_FCPT_ (0xFFFF0000) -#define FLOW_FCPASS_ (0x00000004) -#define FLOW_FCEN_ (0x00000002) -#define FLOW_FCBSY_ (0x00000001) - -#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */ -#define VLAN1_VTI1_ (0x0000ffff) - -#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */ -#define VLAN2_VTI2_ (0x0000ffff) - -#define WUFF (0x0B) /* WO */ - -#define WUCSR (0x0C) /* R/W */ -#define WUCSR_GUE_ (0x00000200) -#define WUCSR_WUFR_ (0x00000040) -#define WUCSR_MPR_ (0x00000020) -#define WUCSR_WAKE_EN_ (0x00000004) -#define WUCSR_MPEN_ (0x00000002) - -/* - **************************************************************************** - * Chip Specific MII Defines - **************************************************************************** - * - * Phy register offsets and bit definitions - * - */ - -#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */ -//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000) -#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000) -//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800) -//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400) -//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200) -//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100) -//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010) -//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008) -//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004) -#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002) - -#define PHY_INT_SRC ((u32)29) -#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080) -#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040) -#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020) -#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010) -#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008) -#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004) -#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002) - -#define PHY_INT_MASK ((u32)30) -#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080) -#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040) -#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020) -#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010) -#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008) -#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004) -#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002) - -#define PHY_SPECIAL ((u32)31) -#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000) -#define PHY_SPECIAL_RES_ ((u16)0x0040) -#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1) -#define PHY_SPECIAL_SPD_ ((u16)0x001C) -#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004) -#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014) -#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008) -#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018) - -#define LAN911X_INTERNAL_PHY_ID (0x0007C000) - -/* Chip ID values */ -#define CHIP_9115 0x0115 -#define CHIP_9116 0x0116 -#define CHIP_9117 0x0117 -#define CHIP_9118 0x0118 -#define CHIP_9211 0x9211 -#define CHIP_9215 0x115A -#define CHIP_9217 0x117A -#define CHIP_9218 0x118A - -struct chip_id { - u16 id; - char *name; -}; - -static const struct chip_id chip_ids[] = { - { CHIP_9115, "LAN9115" }, - { CHIP_9116, "LAN9116" }, - { CHIP_9117, "LAN9117" }, - { CHIP_9118, "LAN9118" }, - { CHIP_9211, "LAN9211" }, - { CHIP_9215, "LAN9215" }, - { CHIP_9217, "LAN9217" }, - { CHIP_9218, "LAN9218" }, - { 0, NULL }, -}; - -#define IS_REV_A(x) ((x & 0xFFFF)==0) - -/* - * Macros to abstract register access according to the data bus - * capabilities. Please use those and not the in/out primitives. - */ -/* FIFO read/write macros */ -#define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 ) -#define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 ) -#define SMC_SET_TX_FIFO(lp, x) SMC_outl( x, lp, TX_DATA_FIFO ) -#define SMC_GET_RX_FIFO(lp) SMC_inl( lp, RX_DATA_FIFO ) - - -/* I/O mapped register read/write macros */ -#define SMC_GET_TX_STS_FIFO(lp) SMC_inl( lp, TX_STATUS_FIFO ) -#define SMC_GET_RX_STS_FIFO(lp) SMC_inl( lp, RX_STATUS_FIFO ) -#define SMC_GET_RX_STS_FIFO_PEEK(lp) SMC_inl( lp, RX_STATUS_FIFO_PEEK ) -#define SMC_GET_PN(lp) (SMC_inl( lp, ID_REV ) >> 16) -#define SMC_GET_REV(lp) (SMC_inl( lp, ID_REV ) & 0xFFFF) -#define SMC_GET_IRQ_CFG(lp) SMC_inl( lp, INT_CFG ) -#define SMC_SET_IRQ_CFG(lp, x) SMC_outl( x, lp, INT_CFG ) -#define SMC_GET_INT(lp) SMC_inl( lp, INT_STS ) -#define SMC_ACK_INT(lp, x) SMC_outl( x, lp, INT_STS ) -#define SMC_GET_INT_EN(lp) SMC_inl( lp, INT_EN ) -#define SMC_SET_INT_EN(lp, x) SMC_outl( x, lp, INT_EN ) -#define SMC_GET_BYTE_TEST(lp) SMC_inl( lp, BYTE_TEST ) -#define SMC_SET_BYTE_TEST(lp, x) SMC_outl( x, lp, BYTE_TEST ) -#define SMC_GET_FIFO_INT(lp) SMC_inl( lp, FIFO_INT ) -#define SMC_SET_FIFO_INT(lp, x) SMC_outl( x, lp, FIFO_INT ) -#define SMC_SET_FIFO_TDA(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<24); \ - SMC_SET_FIFO_INT( (lp), __mask | (x)<<24 ); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_SET_FIFO_TSL(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<16); \ - SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<16)); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_SET_FIFO_RSA(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<8); \ - SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<8)); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_SET_FIFO_RSL(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~0xFF; \ - SMC_SET_FIFO_INT( (lp),__mask | ((x) & 0xFF)); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_GET_RX_CFG(lp) SMC_inl( lp, RX_CFG ) -#define SMC_SET_RX_CFG(lp, x) SMC_outl( x, lp, RX_CFG ) -#define SMC_GET_TX_CFG(lp) SMC_inl( lp, TX_CFG ) -#define SMC_SET_TX_CFG(lp, x) SMC_outl( x, lp, TX_CFG ) -#define SMC_GET_HW_CFG(lp) SMC_inl( lp, HW_CFG ) -#define SMC_SET_HW_CFG(lp, x) SMC_outl( x, lp, HW_CFG ) -#define SMC_GET_RX_DP_CTRL(lp) SMC_inl( lp, RX_DP_CTRL ) -#define SMC_SET_RX_DP_CTRL(lp, x) SMC_outl( x, lp, RX_DP_CTRL ) -#define SMC_GET_PMT_CTRL(lp) SMC_inl( lp, PMT_CTRL ) -#define SMC_SET_PMT_CTRL(lp, x) SMC_outl( x, lp, PMT_CTRL ) -#define SMC_GET_GPIO_CFG(lp) SMC_inl( lp, GPIO_CFG ) -#define SMC_SET_GPIO_CFG(lp, x) SMC_outl( x, lp, GPIO_CFG ) -#define SMC_GET_RX_FIFO_INF(lp) SMC_inl( lp, RX_FIFO_INF ) -#define SMC_SET_RX_FIFO_INF(lp, x) SMC_outl( x, lp, RX_FIFO_INF ) -#define SMC_GET_TX_FIFO_INF(lp) SMC_inl( lp, TX_FIFO_INF ) -#define SMC_SET_TX_FIFO_INF(lp, x) SMC_outl( x, lp, TX_FIFO_INF ) -#define SMC_GET_GPT_CFG(lp) SMC_inl( lp, GPT_CFG ) -#define SMC_SET_GPT_CFG(lp, x) SMC_outl( x, lp, GPT_CFG ) -#define SMC_GET_RX_DROP(lp) SMC_inl( lp, RX_DROP ) -#define SMC_SET_RX_DROP(lp, x) SMC_outl( x, lp, RX_DROP ) -#define SMC_GET_MAC_CMD(lp) SMC_inl( lp, MAC_CSR_CMD ) -#define SMC_SET_MAC_CMD(lp, x) SMC_outl( x, lp, MAC_CSR_CMD ) -#define SMC_GET_MAC_DATA(lp) SMC_inl( lp, MAC_CSR_DATA ) -#define SMC_SET_MAC_DATA(lp, x) SMC_outl( x, lp, MAC_CSR_DATA ) -#define SMC_GET_AFC_CFG(lp) SMC_inl( lp, AFC_CFG ) -#define SMC_SET_AFC_CFG(lp, x) SMC_outl( x, lp, AFC_CFG ) -#define SMC_GET_E2P_CMD(lp) SMC_inl( lp, E2P_CMD ) -#define SMC_SET_E2P_CMD(lp, x) SMC_outl( x, lp, E2P_CMD ) -#define SMC_GET_E2P_DATA(lp) SMC_inl( lp, E2P_DATA ) -#define SMC_SET_E2P_DATA(lp, x) SMC_outl( x, lp, E2P_DATA ) - -/* MAC register read/write macros */ -#define SMC_GET_MAC_CSR(lp,a,v) \ - do { \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - SMC_SET_MAC_CMD((lp),MAC_CSR_CMD_CSR_BUSY_ | \ - MAC_CSR_CMD_R_NOT_W_ | (a) ); \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - v = SMC_GET_MAC_DATA((lp)); \ - } while (0) -#define SMC_SET_MAC_CSR(lp,a,v) \ - do { \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - SMC_SET_MAC_DATA((lp), v); \ - SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_CSR_BUSY_ | (a) ); \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - } while (0) -#define SMC_GET_MAC_CR(lp, x) SMC_GET_MAC_CSR( (lp), MAC_CR, x ) -#define SMC_SET_MAC_CR(lp, x) SMC_SET_MAC_CSR( (lp), MAC_CR, x ) -#define SMC_GET_ADDRH(lp, x) SMC_GET_MAC_CSR( (lp), ADDRH, x ) -#define SMC_SET_ADDRH(lp, x) SMC_SET_MAC_CSR( (lp), ADDRH, x ) -#define SMC_GET_ADDRL(lp, x) SMC_GET_MAC_CSR( (lp), ADDRL, x ) -#define SMC_SET_ADDRL(lp, x) SMC_SET_MAC_CSR( (lp), ADDRL, x ) -#define SMC_GET_HASHH(lp, x) SMC_GET_MAC_CSR( (lp), HASHH, x ) -#define SMC_SET_HASHH(lp, x) SMC_SET_MAC_CSR( (lp), HASHH, x ) -#define SMC_GET_HASHL(lp, x) SMC_GET_MAC_CSR( (lp), HASHL, x ) -#define SMC_SET_HASHL(lp, x) SMC_SET_MAC_CSR( (lp), HASHL, x ) -#define SMC_GET_MII_ACC(lp, x) SMC_GET_MAC_CSR( (lp), MII_ACC, x ) -#define SMC_SET_MII_ACC(lp, x) SMC_SET_MAC_CSR( (lp), MII_ACC, x ) -#define SMC_GET_MII_DATA(lp, x) SMC_GET_MAC_CSR( (lp), MII_DATA, x ) -#define SMC_SET_MII_DATA(lp, x) SMC_SET_MAC_CSR( (lp), MII_DATA, x ) -#define SMC_GET_FLOW(lp, x) SMC_GET_MAC_CSR( (lp), FLOW, x ) -#define SMC_SET_FLOW(lp, x) SMC_SET_MAC_CSR( (lp), FLOW, x ) -#define SMC_GET_VLAN1(lp, x) SMC_GET_MAC_CSR( (lp), VLAN1, x ) -#define SMC_SET_VLAN1(lp, x) SMC_SET_MAC_CSR( (lp), VLAN1, x ) -#define SMC_GET_VLAN2(lp, x) SMC_GET_MAC_CSR( (lp), VLAN2, x ) -#define SMC_SET_VLAN2(lp, x) SMC_SET_MAC_CSR( (lp), VLAN2, x ) -#define SMC_SET_WUFF(lp, x) SMC_SET_MAC_CSR( (lp), WUFF, x ) -#define SMC_GET_WUCSR(lp, x) SMC_GET_MAC_CSR( (lp), WUCSR, x ) -#define SMC_SET_WUCSR(lp, x) SMC_SET_MAC_CSR( (lp), WUCSR, x ) - -/* PHY register read/write macros */ -#define SMC_GET_MII(lp,a,phy,v) \ - do { \ - u32 __v; \ - do { \ - SMC_GET_MII_ACC((lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ - MII_ACC_MII_BUSY_); \ - do { \ - SMC_GET_MII_ACC( (lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - SMC_GET_MII_DATA((lp), v); \ - } while (0) -#define SMC_SET_MII(lp,a,phy,v) \ - do { \ - u32 __v; \ - do { \ - SMC_GET_MII_ACC((lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - SMC_SET_MII_DATA((lp), v); \ - SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ - MII_ACC_MII_BUSY_ | \ - MII_ACC_MII_WRITE_ ); \ - do { \ - SMC_GET_MII_ACC((lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - } while (0) -#define SMC_GET_PHY_BMCR(lp,phy,x) SMC_GET_MII( (lp), MII_BMCR, phy, x ) -#define SMC_SET_PHY_BMCR(lp,phy,x) SMC_SET_MII( (lp), MII_BMCR, phy, x ) -#define SMC_GET_PHY_BMSR(lp,phy,x) SMC_GET_MII( (lp), MII_BMSR, phy, x ) -#define SMC_GET_PHY_ID1(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID1, phy, x ) -#define SMC_GET_PHY_ID2(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID2, phy, x ) -#define SMC_GET_PHY_MII_ADV(lp,phy,x) SMC_GET_MII( (lp), MII_ADVERTISE, phy, x ) -#define SMC_SET_PHY_MII_ADV(lp,phy,x) SMC_SET_MII( (lp), MII_ADVERTISE, phy, x ) -#define SMC_GET_PHY_MII_LPA(lp,phy,x) SMC_GET_MII( (lp), MII_LPA, phy, x ) -#define SMC_SET_PHY_MII_LPA(lp,phy,x) SMC_SET_MII( (lp), MII_LPA, phy, x ) -#define SMC_GET_PHY_CTRL_STS(lp,phy,x) SMC_GET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) -#define SMC_SET_PHY_CTRL_STS(lp,phy,x) SMC_SET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) -#define SMC_GET_PHY_INT_SRC(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_SRC, phy, x ) -#define SMC_SET_PHY_INT_SRC(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_SRC, phy, x ) -#define SMC_GET_PHY_INT_MASK(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_MASK, phy, x ) -#define SMC_SET_PHY_INT_MASK(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_MASK, phy, x ) -#define SMC_GET_PHY_SPECIAL(lp,phy,x) SMC_GET_MII( (lp), PHY_SPECIAL, phy, x ) - - - -/* Misc read/write macros */ - -#ifndef SMC_GET_MAC_ADDR -#define SMC_GET_MAC_ADDR(lp, addr) \ - do { \ - unsigned int __v; \ - \ - SMC_GET_MAC_CSR((lp), ADDRL, __v); \ - addr[0] = __v; addr[1] = __v >> 8; \ - addr[2] = __v >> 16; addr[3] = __v >> 24; \ - SMC_GET_MAC_CSR((lp), ADDRH, __v); \ - addr[4] = __v; addr[5] = __v >> 8; \ - } while (0) -#endif - -#define SMC_SET_MAC_ADDR(lp, addr) \ - do { \ - SMC_SET_MAC_CSR((lp), ADDRL, \ - addr[0] | \ - (addr[1] << 8) | \ - (addr[2] << 16) | \ - (addr[3] << 24)); \ - SMC_SET_MAC_CSR((lp), ADDRH, addr[4]|(addr[5] << 8));\ - } while (0) - - -#define SMC_WRITE_EEPROM_CMD(lp, cmd, addr) \ - do { \ - while (SMC_GET_E2P_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_R_NOT_W_ | a ); \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - } while (0) - -#endif /* _SMC911X_H_ */ diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 26b7f683a3e1..0f52c068c46d 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -87,6 +87,7 @@ struct gsi_tre { int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, u32 max_alloc) { + size_t alloc_size; void *virt; if (!size) @@ -103,13 +104,15 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, * If there aren't enough entries starting at the free index, * we just allocate free entries from the beginning of the pool. */ - virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL); + alloc_size = size_mul(count + max_alloc - 1, size); + alloc_size = kmalloc_size_roundup(alloc_size); + virt = kzalloc(alloc_size, GFP_KERNEL); if (!virt) return -ENOMEM; pool->base = virt; /* If the allocator gave us any extra memory, use it */ - pool->count = ksize(pool->base) / size; + pool->count = alloc_size / size; pool->free = 0; pool->max_alloc = max_alloc; pool->size = size; diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 97c0befe8d86..894f99517233 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -9,7 +9,7 @@ #include "ipa_qmi_msg.h" /* QMI message structure definition for struct ipa_indication_register_req */ -struct qmi_elem_info ipa_indication_register_req_ei[] = { +const struct qmi_elem_info ipa_indication_register_req_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -116,7 +116,7 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = { }; /* QMI message structure definition for struct ipa_indication_register_rsp */ -struct qmi_elem_info ipa_indication_register_rsp_ei[] = { +const struct qmi_elem_info ipa_indication_register_rsp_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -134,7 +134,7 @@ struct qmi_elem_info ipa_indication_register_rsp_ei[] = { }; /* QMI message structure definition for struct ipa_driver_init_complete_req */ -struct qmi_elem_info ipa_driver_init_complete_req_ei[] = { +const struct qmi_elem_info ipa_driver_init_complete_req_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -151,7 +151,7 @@ struct qmi_elem_info ipa_driver_init_complete_req_ei[] = { }; /* QMI message structure definition for struct ipa_driver_init_complete_rsp */ -struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = { +const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -169,7 +169,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = { }; /* QMI message structure definition for struct ipa_init_complete_ind */ -struct qmi_elem_info ipa_init_complete_ind_ei[] = { +const struct qmi_elem_info ipa_init_complete_ind_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -187,7 +187,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = { }; /* QMI message structure definition for struct ipa_mem_bounds */ -struct qmi_elem_info ipa_mem_bounds_ei[] = { +const struct qmi_elem_info ipa_mem_bounds_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -208,7 +208,7 @@ struct qmi_elem_info ipa_mem_bounds_ei[] = { }; /* QMI message structure definition for struct ipa_mem_array */ -struct qmi_elem_info ipa_mem_array_ei[] = { +const struct qmi_elem_info ipa_mem_array_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -229,7 +229,7 @@ struct qmi_elem_info ipa_mem_array_ei[] = { }; /* QMI message structure definition for struct ipa_mem_range */ -struct qmi_elem_info ipa_mem_range_ei[] = { +const struct qmi_elem_info ipa_mem_range_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -250,7 +250,7 @@ struct qmi_elem_info ipa_mem_range_ei[] = { }; /* QMI message structure definition for struct ipa_init_modem_driver_req */ -struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { +const struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -645,7 +645,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { }; /* QMI message structure definition for struct ipa_init_modem_driver_rsp */ -struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = { +const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index e29663965f43..b73503552c4d 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -247,15 +247,15 @@ struct ipa_init_modem_driver_rsp { }; /* Message structure definitions defined in "ipa_qmi_msg.c" */ -extern struct qmi_elem_info ipa_indication_register_req_ei[]; -extern struct qmi_elem_info ipa_indication_register_rsp_ei[]; -extern struct qmi_elem_info ipa_driver_init_complete_req_ei[]; -extern struct qmi_elem_info ipa_driver_init_complete_rsp_ei[]; -extern struct qmi_elem_info ipa_init_complete_ind_ei[]; -extern struct qmi_elem_info ipa_mem_bounds_ei[]; -extern struct qmi_elem_info ipa_mem_array_ei[]; -extern struct qmi_elem_info ipa_mem_range_ei[]; -extern struct qmi_elem_info ipa_init_modem_driver_req_ei[]; -extern struct qmi_elem_info ipa_init_modem_driver_rsp_ei[]; +extern const struct qmi_elem_info ipa_indication_register_req_ei[]; +extern const struct qmi_elem_info ipa_indication_register_rsp_ei[]; +extern const struct qmi_elem_info ipa_driver_init_complete_req_ei[]; +extern const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[]; +extern const struct qmi_elem_info ipa_init_complete_ind_ei[]; +extern const struct qmi_elem_info ipa_mem_bounds_ei[]; +extern const struct qmi_elem_info ipa_mem_array_ei[]; +extern const struct qmi_elem_info ipa_mem_range_ei[]; +extern const struct qmi_elem_info ipa_init_modem_driver_req_ei[]; +extern const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[]; #endif /* !_IPA_QMI_MSG_H_ */ diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 2c8bf438ea61..5d08c627a516 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -13,7 +13,7 @@ */ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 93, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 99, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -49,6 +49,8 @@ const char *phy_speed_to_str(int speed) return "200Gbps"; case SPEED_400000: return "400Gbps"; + case SPEED_800000: + return "800Gbps"; case SPEED_UNKNOWN: return "Unknown"; default: @@ -157,6 +159,13 @@ EXPORT_SYMBOL_GPL(phy_interface_num_ports); .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT} static const struct phy_setting settings[] = { + /* 800G */ + PHY_SETTING( 800000, FULL, 800000baseCR8_Full ), + PHY_SETTING( 800000, FULL, 800000baseKR8_Full ), + PHY_SETTING( 800000, FULL, 800000baseDR8_Full ), + PHY_SETTING( 800000, FULL, 800000baseDR8_2_Full ), + PHY_SETTING( 800000, FULL, 800000baseSR8_Full ), + PHY_SETTING( 800000, FULL, 800000baseVR8_Full ), /* 400G */ PHY_SETTING( 400000, FULL, 400000baseCR8_Full ), PHY_SETTING( 400000, FULL, 400000baseKR8_Full ), diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 6547b6cc6cbe..62106c9e9a9d 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -562,31 +562,48 @@ unsigned long phylink_get_capabilities(phy_interface_t interface, EXPORT_SYMBOL_GPL(phylink_get_capabilities); /** - * phylink_generic_validate() - generic validate() callback implementation - * @config: a pointer to a &struct phylink_config. + * phylink_validate_mask_caps() - Restrict link modes based on caps * @supported: ethtool bitmask for supported link modes. - * @state: a pointer to a &struct phylink_link_state. + * @state: an (optional) pointer to a &struct phylink_link_state. + * @mac_capabilities: bitmask of MAC capabilities * - * Generic implementation of the validate() callback that MAC drivers can - * use when they pass the range of supported interfaces and MAC capabilities. - * This makes use of phylink_get_linkmodes(). + * Calculate the supported link modes based on @mac_capabilities, and restrict + * @supported and @state based on that. Use this function if your capabiliies + * aren't constant, such as if they vary depending on the interface. */ -void phylink_generic_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) +void phylink_validate_mask_caps(unsigned long *supported, + struct phylink_link_state *state, + unsigned long mac_capabilities) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; unsigned long caps; phylink_set_port_modes(mask); phylink_set(mask, Autoneg); - caps = phylink_get_capabilities(state->interface, - config->mac_capabilities, + caps = phylink_get_capabilities(state->interface, mac_capabilities, state->rate_matching); phylink_caps_to_linkmodes(mask, caps); linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); + if (state) + linkmode_and(state->advertising, state->advertising, mask); +} +EXPORT_SYMBOL_GPL(phylink_validate_mask_caps); + +/** + * phylink_generic_validate() - generic validate() callback implementation + * @config: a pointer to a &struct phylink_config. + * @supported: ethtool bitmask for supported link modes. + * @state: a pointer to a &struct phylink_link_state. + * + * Generic implementation of the validate() callback that MAC drivers can + * use when they pass the range of supported interfaces and MAC capabilities. + */ +void phylink_generic_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + phylink_validate_mask_caps(supported, state, config->mac_capabilities); } EXPORT_SYMBOL_GPL(phylink_generic_validate); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index a48d9b7d2921..154d58cbd9ce 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -13,9 +13,11 @@ #include <linux/clk-provider.h> #include <linux/platform_device.h> #include <linux/platform_data/i2c-xiic.h> +#include <linux/platform_data/i2c-ocores.h> #include <linux/ptp_clock_kernel.h> #include <linux/spi/spi.h> #include <linux/spi/xilinx_spi.h> +#include <linux/spi/altera.h> #include <net/devlink.h> #include <linux/i2c.h> #include <linux/mtd/mtd.h> @@ -28,6 +30,9 @@ #define PCI_VENDOR_ID_CELESTICA 0x18d4 #define PCI_DEVICE_ID_CELESTICA_TIMECARD 0x1008 +#define PCI_VENDOR_ID_OROLIA 0x1ad7 +#define PCI_DEVICE_ID_OROLIA_ARTCARD 0xa000 + static struct class timecard_class = { .owner = THIS_MODULE, .name = "timecard", @@ -203,6 +208,11 @@ struct frequency_reg { u32 ctrl; u32 status; }; + +struct board_config_reg { + u32 mro50_serial_activate; +}; + #define FREQ_STATUS_VALID BIT(31) #define FREQ_STATUS_ERROR BIT(30) #define FREQ_STATUS_OVERRUN BIT(29) @@ -278,6 +288,11 @@ struct ptp_ocp_signal { bool running; }; +struct ptp_ocp_serial_port { + int line; + int baud; +}; + #define OCP_BOARD_ID_LEN 13 #define OCP_SERIAL_LEN 6 @@ -289,6 +304,7 @@ struct ptp_ocp { struct tod_reg __iomem *tod; struct pps_reg __iomem *pps_to_ext; struct pps_reg __iomem *pps_to_clk; + struct board_config_reg __iomem *board_config; struct gpio_reg __iomem *pps_select; struct gpio_reg __iomem *sma_map1; struct gpio_reg __iomem *sma_map2; @@ -305,6 +321,7 @@ struct ptp_ocp { struct ptp_ocp_ext_src *ts2; struct ptp_ocp_ext_src *ts3; struct ptp_ocp_ext_src *ts4; + struct ocp_art_gpio_reg __iomem *art_sma; struct img_reg __iomem *image; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; @@ -318,10 +335,10 @@ struct ptp_ocp { time64_t gnss_lost; int id; int n_irqs; - int gnss_port; - int gnss2_port; - int mac_port; /* miniature atomic clock */ - int nmea_port; + struct ptp_ocp_serial_port gnss_port; + struct ptp_ocp_serial_port gnss2_port; + struct ptp_ocp_serial_port mac_port; /* miniature atomic clock */ + struct ptp_ocp_serial_port nmea_port; bool fw_loader; u8 fw_tag; u16 fw_version; @@ -365,8 +382,12 @@ static int ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen, static int ptp_ocp_signal_enable(void *priv, u32 req, bool enable); static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr); +static int ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r); + static const struct ocp_attr_group fb_timecard_groups[]; +static const struct ocp_attr_group art_timecard_groups[]; + struct ptp_ocp_eeprom_map { u16 off; u16 len; @@ -389,6 +410,12 @@ static struct ptp_ocp_eeprom_map fb_eeprom_map[] = { { } }; +static struct ptp_ocp_eeprom_map art_eeprom_map[] = { + { EEPROM_ENTRY(0x200 + 0x43, board_id) }, + { EEPROM_ENTRY(0x200 + 0x63, serial) }, + { } +}; + #define bp_assign_entry(bp, res, val) ({ \ uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \ *(typeof(val) *)addr = val; \ @@ -430,6 +457,13 @@ static struct ptp_ocp_eeprom_map fb_eeprom_map[] = { * 14: Signal Generator 4 * 15: TS3 * 16: TS4 + -- + * 8: Orolia TS1 + * 10: Orolia TS2 + * 11: Orolia TS0 (GNSS) + * 12: Orolia PPS + * 14: Orolia TS3 + * 15: Orolia TS4 */ static struct ocp_resource ocp_fb_resource[] = { @@ -596,14 +630,23 @@ static struct ocp_resource ocp_fb_resource[] = { { OCP_SERIAL_RESOURCE(gnss_port), .offset = 0x00160000 + 0x1000, .irq_vec = 3, + .extra = &(struct ptp_ocp_serial_port) { + .baud = 115200, + }, }, { OCP_SERIAL_RESOURCE(gnss2_port), .offset = 0x00170000 + 0x1000, .irq_vec = 4, + .extra = &(struct ptp_ocp_serial_port) { + .baud = 115200, + }, }, { OCP_SERIAL_RESOURCE(mac_port), .offset = 0x00180000 + 0x1000, .irq_vec = 5, + .extra = &(struct ptp_ocp_serial_port) { + .baud = 57600, + }, }, { OCP_SERIAL_RESOURCE(nmea_port), @@ -647,9 +690,141 @@ static struct ocp_resource ocp_fb_resource[] = { { } }; +#define OCP_ART_CONFIG_SIZE 144 +#define OCP_ART_TEMP_TABLE_SIZE 368 + +struct ocp_art_gpio_reg { + struct { + u32 gpio; + u32 __pad[3]; + } map[4]; +}; + +static struct ocp_resource ocp_art_resource[] = { + { + OCP_MEM_RESOURCE(reg), + .offset = 0x01000000, .size = 0x10000, + }, + { + OCP_SERIAL_RESOURCE(gnss_port), + .offset = 0x00160000 + 0x1000, .irq_vec = 3, + .extra = &(struct ptp_ocp_serial_port) { + .baud = 115200, + }, + }, + { + OCP_MEM_RESOURCE(art_sma), + .offset = 0x003C0000, .size = 0x1000, + }, + /* Timestamp associated with GNSS1 receiver PPS */ + { + OCP_EXT_RESOURCE(ts0), + .offset = 0x360000, .size = 0x20, .irq_vec = 12, + .extra = &(struct ptp_ocp_ext_info) { + .index = 0, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_EXT_RESOURCE(ts1), + .offset = 0x380000, .size = 0x20, .irq_vec = 8, + .extra = &(struct ptp_ocp_ext_info) { + .index = 1, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_EXT_RESOURCE(ts2), + .offset = 0x390000, .size = 0x20, .irq_vec = 10, + .extra = &(struct ptp_ocp_ext_info) { + .index = 2, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_EXT_RESOURCE(ts3), + .offset = 0x3A0000, .size = 0x20, .irq_vec = 14, + .extra = &(struct ptp_ocp_ext_info) { + .index = 3, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_EXT_RESOURCE(ts4), + .offset = 0x3B0000, .size = 0x20, .irq_vec = 15, + .extra = &(struct ptp_ocp_ext_info) { + .index = 4, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + /* Timestamp associated with Internal PPS of the card */ + { + OCP_EXT_RESOURCE(pps), + .offset = 0x00330000, .size = 0x20, .irq_vec = 11, + .extra = &(struct ptp_ocp_ext_info) { + .index = 5, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_SPI_RESOURCE(spi_flash), + .offset = 0x00310000, .size = 0x10000, .irq_vec = 9, + .extra = &(struct ptp_ocp_flash_info) { + .name = "spi_altera", .pci_offset = 0, + .data_size = sizeof(struct altera_spi_platform_data), + .data = &(struct altera_spi_platform_data) { + .num_chipselect = 1, + .num_devices = 1, + .devices = &(struct spi_board_info) { + .modalias = "spi-nor", + }, + }, + }, + }, + { + OCP_I2C_RESOURCE(i2c_ctrl), + .offset = 0x350000, .size = 0x100, .irq_vec = 4, + .extra = &(struct ptp_ocp_i2c_info) { + .name = "ocores-i2c", + .fixed_rate = 400000, + .data_size = sizeof(struct ocores_i2c_platform_data), + .data = &(struct ocores_i2c_platform_data) { + .clock_khz = 125000, + .bus_khz = 400, + .num_devices = 1, + .devices = &(struct i2c_board_info) { + I2C_BOARD_INFO("24c08", 0x50), + }, + }, + }, + }, + { + OCP_SERIAL_RESOURCE(mac_port), + .offset = 0x00190000, .irq_vec = 7, + .extra = &(struct ptp_ocp_serial_port) { + .baud = 9600, + }, + }, + { + OCP_MEM_RESOURCE(board_config), + .offset = 0x210000, .size = 0x1000, + }, + { + .setup = ptp_ocp_art_board_init, + }, + { } +}; + static const struct pci_device_id ptp_ocp_pcidev_id[] = { { PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) }, { PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) }, + { PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) }, { } }; MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id); @@ -714,6 +889,19 @@ static const struct ocp_selector ptp_ocp_sma_out[] = { { } }; +static const struct ocp_selector ptp_ocp_art_sma_in[] = { + { .name = "PPS1", .value = 0x0001 }, + { .name = "10Mhz", .value = 0x0008 }, + { } +}; + +static const struct ocp_selector ptp_ocp_art_sma_out[] = { + { .name = "PHC", .value = 0x0002 }, + { .name = "GNSS", .value = 0x0004 }, + { .name = "10Mhz", .value = 0x0010 }, + { } +}; + struct ocp_sma_op { const struct ocp_selector *tbl[2]; void (*init)(struct ptp_ocp *bp); @@ -1342,11 +1530,9 @@ ptp_ocp_devlink_fw_image(struct devlink *devlink, const struct firmware *fw, hdr = (const struct ptp_ocp_firmware_header *)fw->data; if (memcmp(hdr->magic, OCP_FIRMWARE_MAGIC_HEADER, 4)) { devlink_flash_update_status_notify(devlink, - "No firmware header found, flashing raw image", + "No firmware header found, cancel firmware upgrade", NULL, 0, 0); - offset = 0; - length = fw->size; - goto out; + return -EINVAL; } if (be16_to_cpu(hdr->pci_vendor_id) != bp->pdev->vendor || @@ -1374,7 +1560,6 @@ ptp_ocp_devlink_fw_image(struct devlink *devlink, const struct firmware *fw, return -EINVAL; } -out: *data = &fw->data[offset]; *size = length; @@ -1872,11 +2057,15 @@ ptp_ocp_serial_line(struct ptp_ocp *bp, struct ocp_resource *r) static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r) { - int port; + struct ptp_ocp_serial_port *p = (struct ptp_ocp_serial_port *)r->extra; + struct ptp_ocp_serial_port port = {}; - port = ptp_ocp_serial_line(bp, r); - if (port < 0) - return port; + port.line = ptp_ocp_serial_line(bp, r); + if (port.line < 0) + return port.line; + + if (p) + port.baud = p->baud; bp_assign_entry(bp, r, port); @@ -2257,6 +2446,121 @@ ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data) return err; } +static void +ptp_ocp_art_sma_init(struct ptp_ocp *bp) +{ + u32 reg; + int i; + + /* defaults */ + bp->sma[0].mode = SMA_MODE_IN; + bp->sma[1].mode = SMA_MODE_IN; + bp->sma[2].mode = SMA_MODE_OUT; + bp->sma[3].mode = SMA_MODE_OUT; + + bp->sma[0].default_fcn = 0x08; /* IN: 10Mhz */ + bp->sma[1].default_fcn = 0x01; /* IN: PPS1 */ + bp->sma[2].default_fcn = 0x10; /* OUT: 10Mhz */ + bp->sma[3].default_fcn = 0x02; /* OUT: PHC */ + + /* If no SMA map, the pin functions and directions are fixed. */ + if (!bp->art_sma) { + for (i = 0; i < 4; i++) { + bp->sma[i].fixed_fcn = true; + bp->sma[i].fixed_dir = true; + } + return; + } + + for (i = 0; i < 4; i++) { + reg = ioread32(&bp->art_sma->map[i].gpio); + + switch (reg & 0xff) { + case 0: + bp->sma[i].fixed_fcn = true; + bp->sma[i].fixed_dir = true; + break; + case 1: + case 8: + bp->sma[i].mode = SMA_MODE_IN; + break; + default: + bp->sma[i].mode = SMA_MODE_OUT; + break; + } + } +} + +static u32 +ptp_ocp_art_sma_get(struct ptp_ocp *bp, int sma_nr) +{ + if (bp->sma[sma_nr - 1].fixed_fcn) + return bp->sma[sma_nr - 1].default_fcn; + + return ioread32(&bp->art_sma->map[sma_nr - 1].gpio) & 0xff; +} + +/* note: store 0 is considered invalid. */ +static int +ptp_ocp_art_sma_set(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + unsigned long flags; + u32 __iomem *gpio; + int err = 0; + u32 reg; + + val &= SMA_SELECT_MASK; + if (hweight32(val) > 1) + return -EINVAL; + + gpio = &bp->art_sma->map[sma_nr - 1].gpio; + + spin_lock_irqsave(&bp->lock, flags); + reg = ioread32(gpio); + if (((reg >> 16) & val) == 0) { + err = -EOPNOTSUPP; + } else { + reg = (reg & 0xff00) | (val & 0xff); + iowrite32(reg, gpio); + } + spin_unlock_irqrestore(&bp->lock, flags); + + return err; +} + +static const struct ocp_sma_op ocp_art_sma_op = { + .tbl = { ptp_ocp_art_sma_in, ptp_ocp_art_sma_out }, + .init = ptp_ocp_art_sma_init, + .get = ptp_ocp_art_sma_get, + .set_inputs = ptp_ocp_art_sma_set, + .set_output = ptp_ocp_art_sma_set, +}; + +/* ART specific board initializers; last "resource" registered. */ +static int +ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r) +{ + int err; + + bp->flash_start = 0x1000000; + bp->eeprom_map = art_eeprom_map; + bp->fw_cap = OCP_CAP_BASIC; + bp->fw_version = ioread32(&bp->reg->version); + bp->fw_tag = 2; + bp->sma_op = &ocp_art_sma_op; + + /* Enable MAC serial port during initialisation */ + iowrite32(1, &bp->board_config->mro50_serial_activate); + + ptp_ocp_sma_init(bp); + + err = ptp_ocp_attr_group_add(bp, art_timecard_groups); + if (err) + return err; + + return ptp_ocp_init_clock(bp); +} + static ssize_t ptp_ocp_show_output(const struct ocp_selector *tbl, u32 val, char *buf, int def_val) @@ -3030,6 +3334,130 @@ DEVICE_FREQ_GROUP(freq2, 1); DEVICE_FREQ_GROUP(freq3, 2); DEVICE_FREQ_GROUP(freq4, 3); +static ssize_t +disciplining_config_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj)); + size_t size = OCP_ART_CONFIG_SIZE; + struct nvmem_device *nvmem; + ssize_t err; + + nvmem = ptp_ocp_nvmem_device_get(bp, NULL); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + if (off > size) { + err = 0; + goto out; + } + + if (off + count > size) + count = size - off; + + // the configuration is in the very beginning of the EEPROM + err = nvmem_device_read(nvmem, off, count, buf); + if (err != count) { + err = -EFAULT; + goto out; + } + +out: + ptp_ocp_nvmem_device_put(&nvmem); + + return err; +} + +static ssize_t +disciplining_config_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj)); + struct nvmem_device *nvmem; + ssize_t err; + + /* Allow write of the whole area only */ + if (off || count != OCP_ART_CONFIG_SIZE) + return -EFAULT; + + nvmem = ptp_ocp_nvmem_device_get(bp, NULL); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + err = nvmem_device_write(nvmem, 0x00, count, buf); + if (err != count) + err = -EFAULT; + + ptp_ocp_nvmem_device_put(&nvmem); + + return err; +} +static BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE); + +static ssize_t +temperature_table_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj)); + size_t size = OCP_ART_TEMP_TABLE_SIZE; + struct nvmem_device *nvmem; + ssize_t err; + + nvmem = ptp_ocp_nvmem_device_get(bp, NULL); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + if (off > size) { + err = 0; + goto out; + } + + if (off + count > size) + count = size - off; + + // the configuration is in the very beginning of the EEPROM + err = nvmem_device_read(nvmem, 0x90 + off, count, buf); + if (err != count) { + err = -EFAULT; + goto out; + } + +out: + ptp_ocp_nvmem_device_put(&nvmem); + + return err; +} + +static ssize_t +temperature_table_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj)); + struct nvmem_device *nvmem; + ssize_t err; + + /* Allow write of the whole area only */ + if (off || count != OCP_ART_TEMP_TABLE_SIZE) + return -EFAULT; + + nvmem = ptp_ocp_nvmem_device_get(bp, NULL); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + err = nvmem_device_write(nvmem, 0x90, count, buf); + if (err != count) + err = -EFAULT; + + ptp_ocp_nvmem_device_put(&nvmem); + + return err; +} +static BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE); + static struct attribute *fb_timecard_attrs[] = { &dev_attr_serialnum.attr, &dev_attr_gnss_sync.attr, @@ -3049,9 +3477,11 @@ static struct attribute *fb_timecard_attrs[] = { &dev_attr_tod_correction.attr, NULL, }; + static const struct attribute_group fb_timecard_group = { .attrs = fb_timecard_attrs, }; + static const struct ocp_attr_group fb_timecard_groups[] = { { .cap = OCP_CAP_BASIC, .group = &fb_timecard_group }, { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group }, @@ -3065,6 +3495,37 @@ static const struct ocp_attr_group fb_timecard_groups[] = { { }, }; +static struct attribute *art_timecard_attrs[] = { + &dev_attr_serialnum.attr, + &dev_attr_clock_source.attr, + &dev_attr_available_clock_sources.attr, + &dev_attr_utc_tai_offset.attr, + &dev_attr_ts_window_adjust.attr, + &dev_attr_sma1.attr, + &dev_attr_sma2.attr, + &dev_attr_sma3.attr, + &dev_attr_sma4.attr, + &dev_attr_available_sma_inputs.attr, + &dev_attr_available_sma_outputs.attr, + NULL, +}; + +static struct bin_attribute *bin_art_timecard_attrs[] = { + &bin_attr_disciplining_config, + &bin_attr_temperature_table, + NULL, +}; + +static const struct attribute_group art_timecard_group = { + .attrs = art_timecard_attrs, + .bin_attrs = bin_art_timecard_attrs, +}; + +static const struct ocp_attr_group art_timecard_groups[] = { + { .cap = OCP_CAP_BASIC, .group = &art_timecard_group }, + { }, +}; + static void gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit, const char *def) @@ -3177,14 +3638,16 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) bp = dev_get_drvdata(dev); seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp)); - if (bp->gnss_port != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1", bp->gnss_port); - if (bp->gnss2_port != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2", bp->gnss2_port); - if (bp->mac_port != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port); - if (bp->nmea_port != -1) - seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port); + if (bp->gnss_port.line != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1", + bp->gnss_port.line); + if (bp->gnss2_port.line != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2", + bp->gnss2_port.line); + if (bp->mac_port.line != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port.line); + if (bp->nmea_port.line != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port.line); memset(sma_val, 0xff, sizeof(sma_val)); if (bp->sma_map1) { @@ -3508,10 +3971,10 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev) bp->ptp_info = ptp_ocp_clock_info; spin_lock_init(&bp->lock); - bp->gnss_port = -1; - bp->gnss2_port = -1; - bp->mac_port = -1; - bp->nmea_port = -1; + bp->gnss_port.line = -1; + bp->gnss2_port.line = -1; + bp->mac_port.line = -1; + bp->nmea_port.line = -1; bp->pdev = pdev; device_initialize(&bp->dev); @@ -3569,20 +4032,20 @@ ptp_ocp_complete(struct ptp_ocp *bp) struct pps_device *pps; char buf[32]; - if (bp->gnss_port != -1) { - sprintf(buf, "ttyS%d", bp->gnss_port); + if (bp->gnss_port.line != -1) { + sprintf(buf, "ttyS%d", bp->gnss_port.line); ptp_ocp_link_child(bp, buf, "ttyGNSS"); } - if (bp->gnss2_port != -1) { - sprintf(buf, "ttyS%d", bp->gnss2_port); + if (bp->gnss2_port.line != -1) { + sprintf(buf, "ttyS%d", bp->gnss2_port.line); ptp_ocp_link_child(bp, buf, "ttyGNSS2"); } - if (bp->mac_port != -1) { - sprintf(buf, "ttyS%d", bp->mac_port); + if (bp->mac_port.line != -1) { + sprintf(buf, "ttyS%d", bp->mac_port.line); ptp_ocp_link_child(bp, buf, "ttyMAC"); } - if (bp->nmea_port != -1) { - sprintf(buf, "ttyS%d", bp->nmea_port); + if (bp->nmea_port.line != -1) { + sprintf(buf, "ttyS%d", bp->nmea_port.line); ptp_ocp_link_child(bp, buf, "ttyNMEA"); } sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp)); @@ -3638,16 +4101,20 @@ ptp_ocp_info(struct ptp_ocp *bp) ptp_ocp_phc_info(bp); - ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200); - ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200); - ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600); - if (bp->nmea_out && bp->nmea_port != -1) { - int baud = -1; + ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port.line, + bp->gnss_port.baud); + ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port.line, + bp->gnss2_port.baud); + ptp_ocp_serial_info(dev, "MAC", bp->mac_port.line, bp->mac_port.baud); + if (bp->nmea_out && bp->nmea_port.line != -1) { + bp->nmea_port.baud = -1; reg = ioread32(&bp->nmea_out->uart_baud); if (reg < ARRAY_SIZE(nmea_baud)) - baud = nmea_baud[reg]; - ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port, baud); + bp->nmea_port.baud = nmea_baud[reg]; + + ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port.line, + bp->nmea_port.baud); } } @@ -3688,14 +4155,14 @@ ptp_ocp_detach(struct ptp_ocp *bp) for (i = 0; i < 4; i++) if (bp->signal_out[i]) ptp_ocp_unregister_ext(bp->signal_out[i]); - if (bp->gnss_port != -1) - serial8250_unregister_port(bp->gnss_port); - if (bp->gnss2_port != -1) - serial8250_unregister_port(bp->gnss2_port); - if (bp->mac_port != -1) - serial8250_unregister_port(bp->mac_port); - if (bp->nmea_port != -1) - serial8250_unregister_port(bp->nmea_port); + if (bp->gnss_port.line != -1) + serial8250_unregister_port(bp->gnss_port.line); + if (bp->gnss2_port.line != -1) + serial8250_unregister_port(bp->gnss2_port.line); + if (bp->mac_port.line != -1) + serial8250_unregister_port(bp->mac_port.line); + if (bp->nmea_port.line != -1) + serial8250_unregister_port(bp->nmea_port.line); platform_device_unregister(bp->spi_flash); platform_device_unregister(bp->i2c_ctrl); if (bp->i2c_clk) diff --git a/include/linux/net.h b/include/linux/net.h index 18d942bbdf6e..b73ad8e3c212 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -42,6 +42,7 @@ struct net; #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 #define SOCK_SUPPORT_ZC 5 +#define SOCK_CUSTOM_SOCKOPT 6 #ifndef ARCH_HAS_SOCKET_TYPES /** diff --git a/include/linux/netlink.h b/include/linux/netlink.h index d51e041d2242..d81bde5a5844 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -64,6 +64,7 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) /* this can be increased when necessary - don't expose to userland */ #define NETLINK_MAX_COOKIE_LEN 20 +#define NETLINK_MAX_FMTMSG_LEN 80 /** * struct netlink_ext_ack - netlink extended ACK report struct @@ -75,6 +76,8 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) * @miss_nest: nest missing an attribute (%NULL if missing top level attr) * @cookie: cookie data to return to userspace (for success) * @cookie_len: actual cookie data length + * @_msg_buf: output buffer for formatted message strings - don't access + * directly, use %NL_SET_ERR_MSG_FMT */ struct netlink_ext_ack { const char *_msg; @@ -84,13 +87,13 @@ struct netlink_ext_ack { u16 miss_type; u8 cookie[NETLINK_MAX_COOKIE_LEN]; u8 cookie_len; + char _msg_buf[NETLINK_MAX_FMTMSG_LEN]; }; /* Always use this macro, this allows later putting the * message into a separate section or such for things * like translation or listing all possible messages. - * Currently string formatting is not supported (due - * to the lack of an output buffer.) + * If string formatting is needed use NL_SET_ERR_MSG_FMT. */ #define NL_SET_ERR_MSG(extack, msg) do { \ static const char __msg[] = msg; \ @@ -102,9 +105,31 @@ struct netlink_ext_ack { __extack->_msg = __msg; \ } while (0) +/* We splice fmt with %s at each end even in the snprintf so that both calls + * can use the same string constant, avoiding its duplication in .ro + */ +#define NL_SET_ERR_MSG_FMT(extack, fmt, args...) do { \ + struct netlink_ext_ack *__extack = (extack); \ + \ + if (!__extack) \ + break; \ + if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \ + "%s" fmt "%s", "", ##args, "") >= \ + NETLINK_MAX_FMTMSG_LEN) \ + net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \ + ##args, "\n"); \ + \ + do_trace_netlink_extack(__extack->_msg_buf); \ + \ + __extack->_msg = __extack->_msg_buf; \ +} while (0) + #define NL_SET_ERR_MSG_MOD(extack, msg) \ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) +#define NL_SET_ERR_MSG_FMT_MOD(extack, fmt, args...) \ + NL_SET_ERR_MSG_FMT((extack), KBUILD_MODNAME ": " fmt, ##args) + #define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \ if ((extack)) { \ (extack)->bad_attr = (attr); \ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 3f01ac8017e0..63800bf4a7ac 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -558,6 +558,9 @@ void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps); unsigned long phylink_get_capabilities(phy_interface_t interface, unsigned long mac_capabilities, int rate_matching); +void phylink_validate_mask_caps(unsigned long *supported, + struct phylink_link_state *state, + unsigned long caps); void phylink_generic_validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 08605ce7379d..8822f06e4b40 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -241,6 +241,18 @@ static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** + * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? + * + * As an accident of implementation, an RCU Tasks Trace grace period also + * acts as an RCU grace period. However, this could change at any time. + * Code relying on this accident must call this function to verify that + * this accident is still happening. + * + * You have been warned! + */ +static inline bool rcu_trace_implies_rcu_gp(void) { return true; } + +/** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7be5bb4c94b6..59c9fd55699d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -5050,12 +5050,5 @@ static inline void skb_mark_for_recycle(struct sk_buff *skb) } #endif -static inline bool skb_pp_recycle(struct sk_buff *skb, void *data) -{ - if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) - return false; - return page_pool_return_skb_page(virt_to_page(data)); -} - #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h deleted file mode 100644 index 8cace8189e74..000000000000 --- a/include/linux/smc911x.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __SMC911X_H__ -#define __SMC911X_H__ - -#define SMC911X_USE_16BIT (1 << 0) -#define SMC911X_USE_32BIT (1 << 1) - -struct smc911x_platdata { - unsigned long flags; - unsigned long irq_flags; /* IRQF_... */ - int irq_polarity; -}; - -#endif /* __SMC911X_H__ */ diff --git a/include/linux/udp.h b/include/linux/udp.h index e96da4157d04..5cdba00a904a 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -87,6 +87,9 @@ struct udp_sock { /* This field is dirtied by udp_recvmsg() */ int forward_deficit; + + /* This fields follows rcvbuf value, and is touched by udp_recvmsg */ + int forward_threshold; }; #define UDP_MAX_SEGMENTS (1 << 6UL) diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 8c3587d5c308..78beaa765c73 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -92,7 +92,9 @@ struct net { struct ns_common ns; struct ref_tracker_dir refcnt_tracker; - + struct ref_tracker_dir notrefcnt_tracker; /* tracker for objects not + * refcounted against netns + */ struct list_head dev_base_head; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; @@ -320,19 +322,31 @@ static inline int check_net(const struct net *net) #endif -static inline void netns_tracker_alloc(struct net *net, - netns_tracker *tracker, gfp_t gfp) +static inline void __netns_tracker_alloc(struct net *net, + netns_tracker *tracker, + bool refcounted, + gfp_t gfp) { #ifdef CONFIG_NET_NS_REFCNT_TRACKER - ref_tracker_alloc(&net->refcnt_tracker, tracker, gfp); + ref_tracker_alloc(refcounted ? &net->refcnt_tracker : + &net->notrefcnt_tracker, + tracker, gfp); #endif } -static inline void netns_tracker_free(struct net *net, - netns_tracker *tracker) +static inline void netns_tracker_alloc(struct net *net, netns_tracker *tracker, + gfp_t gfp) +{ + __netns_tracker_alloc(net, tracker, true, gfp); +} + +static inline void __netns_tracker_free(struct net *net, + netns_tracker *tracker, + bool refcounted) { #ifdef CONFIG_NET_NS_REFCNT_TRACKER - ref_tracker_free(&net->refcnt_tracker, tracker); + ref_tracker_free(refcounted ? &net->refcnt_tracker : + &net->notrefcnt_tracker, tracker); #endif } @@ -346,7 +360,7 @@ static inline struct net *get_net_track(struct net *net, static inline void put_net_track(struct net *net, netns_tracker *tracker) { - netns_tracker_free(net, tracker); + __netns_tracker_free(net, tracker, true); put_net(net); } diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index 0eaf8650e3b2..60f6641290c3 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -35,8 +35,7 @@ struct sctp_ulpq { }; /* Prototypes. */ -struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, - struct sctp_association *); +void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc); void sctp_ulpq_flush(struct sctp_ulpq *ulpq); void sctp_ulpq_free(struct sctp_ulpq *); diff --git a/include/net/sock.h b/include/net/sock.h index 22f8bab583dd..928bb601fd8f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1901,7 +1901,7 @@ static inline void sockcm_init(struct sockcm_cookie *sockc, *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags }; } -int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, +int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, struct sockcm_cookie *sockc); int sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct sockcm_cookie *sockc); diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index b830463e3dff..d27b1caf3753 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -58,8 +58,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) -void inet6_destroy_sock(struct sock *sk); - #define IPV6_SEQ_DGRAM_HEADER \ " sl " \ "local_address " \ diff --git a/include/net/udp.h b/include/net/udp.h index fee053bcd17c..de4b528522bb 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -174,6 +174,15 @@ INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *)); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, netdev_features_t features, bool is_ipv6); +static inline void udp_lib_init_sock(struct sock *sk) +{ + struct udp_sock *up = udp_sk(sk); + + skb_queue_head_init(&up->reader_queue); + up->forward_threshold = sk->sk_rcvbuf >> 2; + set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); +} + /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ static inline int udp_lib_hash(struct sock *sk) { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 51b9aa640ad2..17f61338f8f8 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5436,225 +5436,231 @@ union bpf_attr { * larger than the size of the ring buffer, or which cannot fit * within a struct bpf_dynptr. */ -#define __BPF_FUNC_MAPPER(FN) \ - FN(unspec), \ - FN(map_lookup_elem), \ - FN(map_update_elem), \ - FN(map_delete_elem), \ - FN(probe_read), \ - FN(ktime_get_ns), \ - FN(trace_printk), \ - FN(get_prandom_u32), \ - FN(get_smp_processor_id), \ - FN(skb_store_bytes), \ - FN(l3_csum_replace), \ - FN(l4_csum_replace), \ - FN(tail_call), \ - FN(clone_redirect), \ - FN(get_current_pid_tgid), \ - FN(get_current_uid_gid), \ - FN(get_current_comm), \ - FN(get_cgroup_classid), \ - FN(skb_vlan_push), \ - FN(skb_vlan_pop), \ - FN(skb_get_tunnel_key), \ - FN(skb_set_tunnel_key), \ - FN(perf_event_read), \ - FN(redirect), \ - FN(get_route_realm), \ - FN(perf_event_output), \ - FN(skb_load_bytes), \ - FN(get_stackid), \ - FN(csum_diff), \ - FN(skb_get_tunnel_opt), \ - FN(skb_set_tunnel_opt), \ - FN(skb_change_proto), \ - FN(skb_change_type), \ - FN(skb_under_cgroup), \ - FN(get_hash_recalc), \ - FN(get_current_task), \ - FN(probe_write_user), \ - FN(current_task_under_cgroup), \ - FN(skb_change_tail), \ - FN(skb_pull_data), \ - FN(csum_update), \ - FN(set_hash_invalid), \ - FN(get_numa_node_id), \ - FN(skb_change_head), \ - FN(xdp_adjust_head), \ - FN(probe_read_str), \ - FN(get_socket_cookie), \ - FN(get_socket_uid), \ - FN(set_hash), \ - FN(setsockopt), \ - FN(skb_adjust_room), \ - FN(redirect_map), \ - FN(sk_redirect_map), \ - FN(sock_map_update), \ - FN(xdp_adjust_meta), \ - FN(perf_event_read_value), \ - FN(perf_prog_read_value), \ - FN(getsockopt), \ - FN(override_return), \ - FN(sock_ops_cb_flags_set), \ - FN(msg_redirect_map), \ - FN(msg_apply_bytes), \ - FN(msg_cork_bytes), \ - FN(msg_pull_data), \ - FN(bind), \ - FN(xdp_adjust_tail), \ - FN(skb_get_xfrm_state), \ - FN(get_stack), \ - FN(skb_load_bytes_relative), \ - FN(fib_lookup), \ - FN(sock_hash_update), \ - FN(msg_redirect_hash), \ - FN(sk_redirect_hash), \ - FN(lwt_push_encap), \ - FN(lwt_seg6_store_bytes), \ - FN(lwt_seg6_adjust_srh), \ - FN(lwt_seg6_action), \ - FN(rc_repeat), \ - FN(rc_keydown), \ - FN(skb_cgroup_id), \ - FN(get_current_cgroup_id), \ - FN(get_local_storage), \ - FN(sk_select_reuseport), \ - FN(skb_ancestor_cgroup_id), \ - FN(sk_lookup_tcp), \ - FN(sk_lookup_udp), \ - FN(sk_release), \ - FN(map_push_elem), \ - FN(map_pop_elem), \ - FN(map_peek_elem), \ - FN(msg_push_data), \ - FN(msg_pop_data), \ - FN(rc_pointer_rel), \ - FN(spin_lock), \ - FN(spin_unlock), \ - FN(sk_fullsock), \ - FN(tcp_sock), \ - FN(skb_ecn_set_ce), \ - FN(get_listener_sock), \ - FN(skc_lookup_tcp), \ - FN(tcp_check_syncookie), \ - FN(sysctl_get_name), \ - FN(sysctl_get_current_value), \ - FN(sysctl_get_new_value), \ - FN(sysctl_set_new_value), \ - FN(strtol), \ - FN(strtoul), \ - FN(sk_storage_get), \ - FN(sk_storage_delete), \ - FN(send_signal), \ - FN(tcp_gen_syncookie), \ - FN(skb_output), \ - FN(probe_read_user), \ - FN(probe_read_kernel), \ - FN(probe_read_user_str), \ - FN(probe_read_kernel_str), \ - FN(tcp_send_ack), \ - FN(send_signal_thread), \ - FN(jiffies64), \ - FN(read_branch_records), \ - FN(get_ns_current_pid_tgid), \ - FN(xdp_output), \ - FN(get_netns_cookie), \ - FN(get_current_ancestor_cgroup_id), \ - FN(sk_assign), \ - FN(ktime_get_boot_ns), \ - FN(seq_printf), \ - FN(seq_write), \ - FN(sk_cgroup_id), \ - FN(sk_ancestor_cgroup_id), \ - FN(ringbuf_output), \ - FN(ringbuf_reserve), \ - FN(ringbuf_submit), \ - FN(ringbuf_discard), \ - FN(ringbuf_query), \ - FN(csum_level), \ - FN(skc_to_tcp6_sock), \ - FN(skc_to_tcp_sock), \ - FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), \ - FN(skc_to_udp6_sock), \ - FN(get_task_stack), \ - FN(load_hdr_opt), \ - FN(store_hdr_opt), \ - FN(reserve_hdr_opt), \ - FN(inode_storage_get), \ - FN(inode_storage_delete), \ - FN(d_path), \ - FN(copy_from_user), \ - FN(snprintf_btf), \ - FN(seq_printf_btf), \ - FN(skb_cgroup_classid), \ - FN(redirect_neigh), \ - FN(per_cpu_ptr), \ - FN(this_cpu_ptr), \ - FN(redirect_peer), \ - FN(task_storage_get), \ - FN(task_storage_delete), \ - FN(get_current_task_btf), \ - FN(bprm_opts_set), \ - FN(ktime_get_coarse_ns), \ - FN(ima_inode_hash), \ - FN(sock_from_file), \ - FN(check_mtu), \ - FN(for_each_map_elem), \ - FN(snprintf), \ - FN(sys_bpf), \ - FN(btf_find_by_name_kind), \ - FN(sys_close), \ - FN(timer_init), \ - FN(timer_set_callback), \ - FN(timer_start), \ - FN(timer_cancel), \ - FN(get_func_ip), \ - FN(get_attach_cookie), \ - FN(task_pt_regs), \ - FN(get_branch_snapshot), \ - FN(trace_vprintk), \ - FN(skc_to_unix_sock), \ - FN(kallsyms_lookup_name), \ - FN(find_vma), \ - FN(loop), \ - FN(strncmp), \ - FN(get_func_arg), \ - FN(get_func_ret), \ - FN(get_func_arg_cnt), \ - FN(get_retval), \ - FN(set_retval), \ - FN(xdp_get_buff_len), \ - FN(xdp_load_bytes), \ - FN(xdp_store_bytes), \ - FN(copy_from_user_task), \ - FN(skb_set_tstamp), \ - FN(ima_file_hash), \ - FN(kptr_xchg), \ - FN(map_lookup_percpu_elem), \ - FN(skc_to_mptcp_sock), \ - FN(dynptr_from_mem), \ - FN(ringbuf_reserve_dynptr), \ - FN(ringbuf_submit_dynptr), \ - FN(ringbuf_discard_dynptr), \ - FN(dynptr_read), \ - FN(dynptr_write), \ - FN(dynptr_data), \ - FN(tcp_raw_gen_syncookie_ipv4), \ - FN(tcp_raw_gen_syncookie_ipv6), \ - FN(tcp_raw_check_syncookie_ipv4), \ - FN(tcp_raw_check_syncookie_ipv6), \ - FN(ktime_get_tai_ns), \ - FN(user_ringbuf_drain), \ +#define ___BPF_FUNC_MAPPER(FN, ctx...) \ + FN(unspec, 0, ##ctx) \ + FN(map_lookup_elem, 1, ##ctx) \ + FN(map_update_elem, 2, ##ctx) \ + FN(map_delete_elem, 3, ##ctx) \ + FN(probe_read, 4, ##ctx) \ + FN(ktime_get_ns, 5, ##ctx) \ + FN(trace_printk, 6, ##ctx) \ + FN(get_prandom_u32, 7, ##ctx) \ + FN(get_smp_processor_id, 8, ##ctx) \ + FN(skb_store_bytes, 9, ##ctx) \ + FN(l3_csum_replace, 10, ##ctx) \ + FN(l4_csum_replace, 11, ##ctx) \ + FN(tail_call, 12, ##ctx) \ + FN(clone_redirect, 13, ##ctx) \ + FN(get_current_pid_tgid, 14, ##ctx) \ + FN(get_current_uid_gid, 15, ##ctx) \ + FN(get_current_comm, 16, ##ctx) \ + FN(get_cgroup_classid, 17, ##ctx) \ + FN(skb_vlan_push, 18, ##ctx) \ + FN(skb_vlan_pop, 19, ##ctx) \ + FN(skb_get_tunnel_key, 20, ##ctx) \ + FN(skb_set_tunnel_key, 21, ##ctx) \ + FN(perf_event_read, 22, ##ctx) \ + FN(redirect, 23, ##ctx) \ + FN(get_route_realm, 24, ##ctx) \ + FN(perf_event_output, 25, ##ctx) \ + FN(skb_load_bytes, 26, ##ctx) \ + FN(get_stackid, 27, ##ctx) \ + FN(csum_diff, 28, ##ctx) \ + FN(skb_get_tunnel_opt, 29, ##ctx) \ + FN(skb_set_tunnel_opt, 30, ##ctx) \ + FN(skb_change_proto, 31, ##ctx) \ + FN(skb_change_type, 32, ##ctx) \ + FN(skb_under_cgroup, 33, ##ctx) \ + FN(get_hash_recalc, 34, ##ctx) \ + FN(get_current_task, 35, ##ctx) \ + FN(probe_write_user, 36, ##ctx) \ + FN(current_task_under_cgroup, 37, ##ctx) \ + FN(skb_change_tail, 38, ##ctx) \ + FN(skb_pull_data, 39, ##ctx) \ + FN(csum_update, 40, ##ctx) \ + FN(set_hash_invalid, 41, ##ctx) \ + FN(get_numa_node_id, 42, ##ctx) \ + FN(skb_change_head, 43, ##ctx) \ + FN(xdp_adjust_head, 44, ##ctx) \ + FN(probe_read_str, 45, ##ctx) \ + FN(get_socket_cookie, 46, ##ctx) \ + FN(get_socket_uid, 47, ##ctx) \ + FN(set_hash, 48, ##ctx) \ + FN(setsockopt, 49, ##ctx) \ + FN(skb_adjust_room, 50, ##ctx) \ + FN(redirect_map, 51, ##ctx) \ + FN(sk_redirect_map, 52, ##ctx) \ + FN(sock_map_update, 53, ##ctx) \ + FN(xdp_adjust_meta, 54, ##ctx) \ + FN(perf_event_read_value, 55, ##ctx) \ + FN(perf_prog_read_value, 56, ##ctx) \ + FN(getsockopt, 57, ##ctx) \ + FN(override_return, 58, ##ctx) \ + FN(sock_ops_cb_flags_set, 59, ##ctx) \ + FN(msg_redirect_map, 60, ##ctx) \ + FN(msg_apply_bytes, 61, ##ctx) \ + FN(msg_cork_bytes, 62, ##ctx) \ + FN(msg_pull_data, 63, ##ctx) \ + FN(bind, 64, ##ctx) \ + FN(xdp_adjust_tail, 65, ##ctx) \ + FN(skb_get_xfrm_state, 66, ##ctx) \ + FN(get_stack, 67, ##ctx) \ + FN(skb_load_bytes_relative, 68, ##ctx) \ + FN(fib_lookup, 69, ##ctx) \ + FN(sock_hash_update, 70, ##ctx) \ + FN(msg_redirect_hash, 71, ##ctx) \ + FN(sk_redirect_hash, 72, ##ctx) \ + FN(lwt_push_encap, 73, ##ctx) \ + FN(lwt_seg6_store_bytes, 74, ##ctx) \ + FN(lwt_seg6_adjust_srh, 75, ##ctx) \ + FN(lwt_seg6_action, 76, ##ctx) \ + FN(rc_repeat, 77, ##ctx) \ + FN(rc_keydown, 78, ##ctx) \ + FN(skb_cgroup_id, 79, ##ctx) \ + FN(get_current_cgroup_id, 80, ##ctx) \ + FN(get_local_storage, 81, ##ctx) \ + FN(sk_select_reuseport, 82, ##ctx) \ + FN(skb_ancestor_cgroup_id, 83, ##ctx) \ + FN(sk_lookup_tcp, 84, ##ctx) \ + FN(sk_lookup_udp, 85, ##ctx) \ + FN(sk_release, 86, ##ctx) \ + FN(map_push_elem, 87, ##ctx) \ + FN(map_pop_elem, 88, ##ctx) \ + FN(map_peek_elem, 89, ##ctx) \ + FN(msg_push_data, 90, ##ctx) \ + FN(msg_pop_data, 91, ##ctx) \ + FN(rc_pointer_rel, 92, ##ctx) \ + FN(spin_lock, 93, ##ctx) \ + FN(spin_unlock, 94, ##ctx) \ + FN(sk_fullsock, 95, ##ctx) \ + FN(tcp_sock, 96, ##ctx) \ + FN(skb_ecn_set_ce, 97, ##ctx) \ + FN(get_listener_sock, 98, ##ctx) \ + FN(skc_lookup_tcp, 99, ##ctx) \ + FN(tcp_check_syncookie, 100, ##ctx) \ + FN(sysctl_get_name, 101, ##ctx) \ + FN(sysctl_get_current_value, 102, ##ctx) \ + FN(sysctl_get_new_value, 103, ##ctx) \ + FN(sysctl_set_new_value, 104, ##ctx) \ + FN(strtol, 105, ##ctx) \ + FN(strtoul, 106, ##ctx) \ + FN(sk_storage_get, 107, ##ctx) \ + FN(sk_storage_delete, 108, ##ctx) \ + FN(send_signal, 109, ##ctx) \ + FN(tcp_gen_syncookie, 110, ##ctx) \ + FN(skb_output, 111, ##ctx) \ + FN(probe_read_user, 112, ##ctx) \ + FN(probe_read_kernel, 113, ##ctx) \ + FN(probe_read_user_str, 114, ##ctx) \ + FN(probe_read_kernel_str, 115, ##ctx) \ + FN(tcp_send_ack, 116, ##ctx) \ + FN(send_signal_thread, 117, ##ctx) \ + FN(jiffies64, 118, ##ctx) \ + FN(read_branch_records, 119, ##ctx) \ + FN(get_ns_current_pid_tgid, 120, ##ctx) \ + FN(xdp_output, 121, ##ctx) \ + FN(get_netns_cookie, 122, ##ctx) \ + FN(get_current_ancestor_cgroup_id, 123, ##ctx) \ + FN(sk_assign, 124, ##ctx) \ + FN(ktime_get_boot_ns, 125, ##ctx) \ + FN(seq_printf, 126, ##ctx) \ + FN(seq_write, 127, ##ctx) \ + FN(sk_cgroup_id, 128, ##ctx) \ + FN(sk_ancestor_cgroup_id, 129, ##ctx) \ + FN(ringbuf_output, 130, ##ctx) \ + FN(ringbuf_reserve, 131, ##ctx) \ + FN(ringbuf_submit, 132, ##ctx) \ + FN(ringbuf_discard, 133, ##ctx) \ + FN(ringbuf_query, 134, ##ctx) \ + FN(csum_level, 135, ##ctx) \ + FN(skc_to_tcp6_sock, 136, ##ctx) \ + FN(skc_to_tcp_sock, 137, ##ctx) \ + FN(skc_to_tcp_timewait_sock, 138, ##ctx) \ + FN(skc_to_tcp_request_sock, 139, ##ctx) \ + FN(skc_to_udp6_sock, 140, ##ctx) \ + FN(get_task_stack, 141, ##ctx) \ + FN(load_hdr_opt, 142, ##ctx) \ + FN(store_hdr_opt, 143, ##ctx) \ + FN(reserve_hdr_opt, 144, ##ctx) \ + FN(inode_storage_get, 145, ##ctx) \ + FN(inode_storage_delete, 146, ##ctx) \ + FN(d_path, 147, ##ctx) \ + FN(copy_from_user, 148, ##ctx) \ + FN(snprintf_btf, 149, ##ctx) \ + FN(seq_printf_btf, 150, ##ctx) \ + FN(skb_cgroup_classid, 151, ##ctx) \ + FN(redirect_neigh, 152, ##ctx) \ + FN(per_cpu_ptr, 153, ##ctx) \ + FN(this_cpu_ptr, 154, ##ctx) \ + FN(redirect_peer, 155, ##ctx) \ + FN(task_storage_get, 156, ##ctx) \ + FN(task_storage_delete, 157, ##ctx) \ + FN(get_current_task_btf, 158, ##ctx) \ + FN(bprm_opts_set, 159, ##ctx) \ + FN(ktime_get_coarse_ns, 160, ##ctx) \ + FN(ima_inode_hash, 161, ##ctx) \ + FN(sock_from_file, 162, ##ctx) \ + FN(check_mtu, 163, ##ctx) \ + FN(for_each_map_elem, 164, ##ctx) \ + FN(snprintf, 165, ##ctx) \ + FN(sys_bpf, 166, ##ctx) \ + FN(btf_find_by_name_kind, 167, ##ctx) \ + FN(sys_close, 168, ##ctx) \ + FN(timer_init, 169, ##ctx) \ + FN(timer_set_callback, 170, ##ctx) \ + FN(timer_start, 171, ##ctx) \ + FN(timer_cancel, 172, ##ctx) \ + FN(get_func_ip, 173, ##ctx) \ + FN(get_attach_cookie, 174, ##ctx) \ + FN(task_pt_regs, 175, ##ctx) \ + FN(get_branch_snapshot, 176, ##ctx) \ + FN(trace_vprintk, 177, ##ctx) \ + FN(skc_to_unix_sock, 178, ##ctx) \ + FN(kallsyms_lookup_name, 179, ##ctx) \ + FN(find_vma, 180, ##ctx) \ + FN(loop, 181, ##ctx) \ + FN(strncmp, 182, ##ctx) \ + FN(get_func_arg, 183, ##ctx) \ + FN(get_func_ret, 184, ##ctx) \ + FN(get_func_arg_cnt, 185, ##ctx) \ + FN(get_retval, 186, ##ctx) \ + FN(set_retval, 187, ##ctx) \ + FN(xdp_get_buff_len, 188, ##ctx) \ + FN(xdp_load_bytes, 189, ##ctx) \ + FN(xdp_store_bytes, 190, ##ctx) \ + FN(copy_from_user_task, 191, ##ctx) \ + FN(skb_set_tstamp, 192, ##ctx) \ + FN(ima_file_hash, 193, ##ctx) \ + FN(kptr_xchg, 194, ##ctx) \ + FN(map_lookup_percpu_elem, 195, ##ctx) \ + FN(skc_to_mptcp_sock, 196, ##ctx) \ + FN(dynptr_from_mem, 197, ##ctx) \ + FN(ringbuf_reserve_dynptr, 198, ##ctx) \ + FN(ringbuf_submit_dynptr, 199, ##ctx) \ + FN(ringbuf_discard_dynptr, 200, ##ctx) \ + FN(dynptr_read, 201, ##ctx) \ + FN(dynptr_write, 202, ##ctx) \ + FN(dynptr_data, 203, ##ctx) \ + FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \ + FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \ + FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \ + FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \ + FN(ktime_get_tai_ns, 208, ##ctx) \ + FN(user_ringbuf_drain, 209, ##ctx) \ /* */ +/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't + * know or care about integer value that is now passed as second argument + */ +#define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name), +#define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN) + /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call */ -#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x +#define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y, enum bpf_func_id { - __BPF_FUNC_MAPPER(__BPF_ENUM_FN) + ___BPF_FUNC_MAPPER(__BPF_ENUM_FN) __BPF_FUNC_MAX_ID, }; #undef __BPF_ENUM_FN diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index dc2aa3d75b39..f341de2ae612 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1737,6 +1737,13 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, + /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS }; @@ -1848,6 +1855,7 @@ enum ethtool_link_mode_bit_indices { #define SPEED_100000 100000 #define SPEED_200000 200000 #define SPEED_400000 400000 +#define SPEED_800000 800000 #define SPEED_UNKNOWN -1 diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 802fc15b0d73..9dc6de1cf185 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -88,8 +88,14 @@ void bpf_local_storage_free_rcu(struct rcu_head *rcu) { struct bpf_local_storage *local_storage; + /* If RCU Tasks Trace grace period implies RCU grace period, do + * kfree(), else do kfree_rcu(). + */ local_storage = container_of(rcu, struct bpf_local_storage, rcu); - kfree_rcu(local_storage, rcu); + if (rcu_trace_implies_rcu_gp()) + kfree(local_storage); + else + kfree_rcu(local_storage, rcu); } static void bpf_selem_free_rcu(struct rcu_head *rcu) @@ -97,7 +103,10 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu) struct bpf_local_storage_elem *selem; selem = container_of(rcu, struct bpf_local_storage_elem, rcu); - kfree_rcu(selem, rcu); + if (rcu_trace_implies_rcu_gp()) + kfree(selem); + else + kfree_rcu(selem, rcu); } /* local_storage->lock must be held and selem->local_storage == local_storage. diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 25a54e04560e..9c16338bcbe8 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2251,8 +2251,14 @@ static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu) { struct bpf_prog_array *progs; + /* If RCU Tasks Trace grace period implies RCU grace period, there is + * no need to call kfree_rcu(), just call kfree() directly. + */ progs = container_of(rcu, struct bpf_prog_array, rcu); - kfree_rcu(progs, rcu); + if (rcu_trace_implies_rcu_gp()) + kfree(progs); + else + kfree_rcu(progs, rcu); } void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs) diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 4901fa1048cd..8f0d65f2474a 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -222,9 +222,13 @@ static void __free_rcu(struct rcu_head *head) static void __free_rcu_tasks_trace(struct rcu_head *head) { - struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); - - call_rcu(&c->rcu, __free_rcu); + /* If RCU Tasks Trace grace period implies RCU grace period, + * there is no need to invoke call_rcu(). + */ + if (rcu_trace_implies_rcu_gp()) + __free_rcu(head); + else + call_rcu(head, __free_rcu); } static void enque_to_free(struct bpf_mem_cache *c, void *obj) @@ -253,8 +257,9 @@ static void do_call_rcu(struct bpf_mem_cache *c) */ __llist_add(llnode, &c->waiting_for_gp); /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. - * Then use call_rcu() to wait for normal progs to finish - * and finally do free_one() on each element. + * If RCU Tasks Trace grace period implies RCU grace period, free + * these elements directly, else use call_rcu() to wait for normal + * progs to finish and finally do free_one() on each element. */ call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace); } diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index f5bf6fb430da..9435e5a7b53e 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1535,6 +1535,8 @@ static void rcu_tasks_trace_postscan(struct list_head *hop) { // Wait for late-stage exiting tasks to finish exiting. // These might have passed the call to exit_tasks_rcu_finish(). + + // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! synchronize_rcu(); // Any tasks that exit after this point will set // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 589ff497d50c..321be94c445a 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -866,7 +866,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, unsigned long now = jiffies; unsigned char flags = 0; u8 filter_mode; - int err; __mdb_entry_to_br_ip(entry, &group, mdb_attrs); @@ -892,13 +891,9 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, return -EINVAL; } - mp = br_mdb_ip_get(br, &group); - if (!mp) { - mp = br_multicast_new_group(br, &group); - err = PTR_ERR_OR_ZERO(mp); - if (err) - return err; - } + mp = br_multicast_new_group(br, &group); + if (IS_ERR(mp)) + return PTR_ERR(mp); /* host join */ if (!port) { diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index db4f2641d1cd..09140bc8c15e 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -2669,7 +2669,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, if (!pmctx || igmpv2) continue; - spin_lock_bh(&brmctx->br->multicast_lock); + spin_lock(&brmctx->br->multicast_lock); if (!br_multicast_ctx_should_use(brmctx, pmctx)) goto unlock_continue; @@ -2717,7 +2717,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, if (changed) br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); unlock_continue: - spin_unlock_bh(&brmctx->br->multicast_lock); + spin_unlock(&brmctx->br->multicast_lock); } return err; @@ -2807,7 +2807,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, if (!pmctx || mldv1) continue; - spin_lock_bh(&brmctx->br->multicast_lock); + spin_lock(&brmctx->br->multicast_lock); if (!br_multicast_ctx_should_use(brmctx, pmctx)) goto unlock_continue; @@ -2859,7 +2859,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, if (changed) br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); unlock_continue: - spin_unlock_bh(&brmctx->br->multicast_lock); + spin_unlock(&brmctx->br->multicast_lock); } return err; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index f64654df71a2..5581d22cc191 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -316,6 +316,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) refcount_set(&net->ns.count, 1); ref_tracker_dir_init(&net->refcnt_tracker, 128); + ref_tracker_dir_init(&net->notrefcnt_tracker, 128); refcount_set(&net->passive, 1); get_random_bytes(&net->hash_mix, sizeof(u32)); @@ -436,6 +437,10 @@ static void net_free(struct net *net) { if (refcount_dec_and_test(&net->passive)) { kfree(rcu_access_pointer(net->gen)); + + /* There should not be any trackers left there. */ + ref_tracker_dir_exit(&net->notrefcnt_tracker); + kmem_cache_free(net_cachep, net); } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1d9719e72f9d..9b3b19816d2d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -748,6 +748,13 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } +static bool skb_pp_recycle(struct sk_buff *skb, void *data) +{ + if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) + return false; + return page_pool_return_skb_page(virt_to_page(data)); +} + static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; diff --git a/net/core/sock.c b/net/core/sock.c index a3ba0358c77c..2786c1107e53 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2094,6 +2094,9 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, if (likely(sk->sk_net_refcnt)) { get_net_track(net, &sk->ns_tracker, priority); sock_inuse_add(net, 1); + } else { + __netns_tracker_alloc(net, &sk->ns_tracker, + false, priority); } sock_net_set(sk, net); @@ -2149,6 +2152,9 @@ static void __sk_destruct(struct rcu_head *head) if (likely(sk->sk_net_refcnt)) put_net_track(sock_net(sk), &sk->ns_tracker); + else + __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); + sk_prot_free(sk->sk_prot_creator, sk); } @@ -2237,6 +2243,14 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) if (likely(newsk->sk_net_refcnt)) { get_net_track(sock_net(newsk), &newsk->ns_tracker, priority); sock_inuse_add(sock_net(newsk), 1); + } else { + /* Kernel sockets are not elevating the struct net refcount. + * Instead, use a tracker to more easily detect if a layer + * is not properly dismantling its kernel sockets at netns + * destroy time. + */ + __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, + false, priority); } sk_node_init(&newsk->sk_node); sock_lock_init(newsk); @@ -2730,7 +2744,7 @@ failure: } EXPORT_SYMBOL(sock_alloc_send_pskb); -int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, +int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, struct sockcm_cookie *sockc) { u32 tsflags; @@ -2784,7 +2798,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg, return -EINVAL; if (cmsg->cmsg_level != SOL_SOCKET) continue; - ret = __sock_cmsg_send(sk, msg, cmsg, sockc); + ret = __sock_cmsg_send(sk, cmsg, sockc); if (ret) return ret; } diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 7dfc00c9fb32..9ddc3a9e89e4 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -278,6 +278,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len); +void dccp_destruct_common(struct sock *sk); int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); void dccp_destroy_sock(struct sock *sk); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index e57b43006074..ae62b1591dea 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1021,6 +1021,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .sockaddr_len = sizeof(struct sockaddr_in6), }; +static void dccp_v6_sk_destruct(struct sock *sk) +{ + dccp_destruct_common(sk); + inet6_sock_destruct(sk); +} + /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ @@ -1033,17 +1039,12 @@ static int dccp_v6_init_sock(struct sock *sk) if (unlikely(!dccp_v6_ctl_sock_initialized)) dccp_v6_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; + sk->sk_destruct = dccp_v6_sk_destruct; } return err; } -static void dccp_v6_destroy_sock(struct sock *sk) -{ - dccp_destroy_sock(sk); - inet6_destroy_sock(sk); -} - static struct timewait_sock_ops dccp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct dccp6_timewait_sock), }; @@ -1066,7 +1067,7 @@ static struct proto dccp_v6_prot = { .accept = inet_csk_accept, .get_port = inet_csk_get_port, .shutdown = dccp_shutdown, - .destroy = dccp_v6_destroy_sock, + .destroy = dccp_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp6_sock), diff --git a/net/dccp/proto.c b/net/dccp/proto.c index c548ca3e9b0e..9494b0d224f9 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type) EXPORT_SYMBOL_GPL(dccp_packet_name); -static void dccp_sk_destruct(struct sock *sk) +void dccp_destruct_common(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_tx_ccid = NULL; +} +EXPORT_SYMBOL_GPL(dccp_destruct_common); + +static void dccp_sk_destruct(struct sock *sk) +{ + dccp_destruct_common(sk); inet_sock_destruct(sk); } diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 566adf85e658..ee3e02da0013 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -202,6 +202,12 @@ const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(100, FX, Half), __DEFINE_LINK_MODE_NAME(100, FX, Full), __DEFINE_LINK_MODE_NAME(10, T1L, Full), + __DEFINE_LINK_MODE_NAME(800000, CR8, Full), + __DEFINE_LINK_MODE_NAME(800000, KR8, Full), + __DEFINE_LINK_MODE_NAME(800000, DR8, Full), + __DEFINE_LINK_MODE_NAME(800000, DR8_2, Full), + __DEFINE_LINK_MODE_NAME(800000, SR8, Full), + __DEFINE_LINK_MODE_NAME(800000, VR8, Full), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -238,6 +244,8 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); #define __LINK_MODE_LANES_X 1 #define __LINK_MODE_LANES_FX 1 #define __LINK_MODE_LANES_T1L 1 +#define __LINK_MODE_LANES_VR8 8 +#define __LINK_MODE_LANES_DR8_2 8 #define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \ [ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \ @@ -352,6 +360,12 @@ const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(100, FX, Half), __DEFINE_LINK_MODE_PARAMS(100, FX, Full), __DEFINE_LINK_MODE_PARAMS(10, T1L, Full), + __DEFINE_LINK_MODE_PARAMS(800000, CR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, KR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR8_2, Full), + __DEFINE_LINK_MODE_PARAMS(800000, SR8, Full), + __DEFINE_LINK_MODE_PARAMS(800000, VR8, Full), }; static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 6e19cad154f5..5f16807d3235 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -267,7 +267,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, } #endif if (cmsg->cmsg_level == SOL_SOCKET) { - err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc); + err = __sock_cmsg_send(sk, cmsg, &ipc->sockc); if (err) return err; continue; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6a320a614e54..89accc3c8bb3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1448,7 +1448,7 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, if (likely(partial)) { up->forward_deficit += size; size = up->forward_deficit; - if (size < (sk->sk_rcvbuf >> 2) && + if (size < READ_ONCE(up->forward_threshold) && !skb_queue_empty(&up->reader_queue)) return; } else { @@ -1622,7 +1622,7 @@ static void udp_destruct_sock(struct sock *sk) int udp_init_sock(struct sock *sk) { - skb_queue_head_init(&udp_sk(sk)->reader_queue); + udp_lib_init_sock(sk); sk->sk_destruct = udp_destruct_sock; set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); return 0; @@ -2672,6 +2672,18 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, int err = 0; int is_udplite = IS_UDPLITE(sk); + if (level == SOL_SOCKET) { + err = sk_setsockopt(sk, level, optname, optval, optlen); + + if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) { + sockopt_lock_sock(sk); + /* paired with READ_ONCE in udp_rmem_release() */ + WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2); + sockopt_release_sock(sk); + } + return err; + } + if (optlen < sizeof(int)) return -EINVAL; @@ -2785,7 +2797,7 @@ EXPORT_SYMBOL(udp_lib_setsockopt); int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { - if (level == SOL_UDP || level == SOL_UDPLITE) + if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 024191004982..68075295d587 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -114,6 +114,7 @@ void inet6_sock_destruct(struct sock *sk) inet6_cleanup_sock(sk); inet_sock_destruct(sk); } +EXPORT_SYMBOL_GPL(inet6_sock_destruct); static int inet6_create(struct net *net, struct socket *sock, int protocol, int kern) @@ -489,7 +490,7 @@ int inet6_release(struct socket *sock) } EXPORT_SYMBOL(inet6_release); -void inet6_destroy_sock(struct sock *sk) +void inet6_cleanup_sock(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; @@ -514,12 +515,6 @@ void inet6_destroy_sock(struct sock *sk) txopt_put(opt); } } -EXPORT_SYMBOL_GPL(inet6_destroy_sock); - -void inet6_cleanup_sock(struct sock *sk) -{ - inet6_destroy_sock(sk); -} EXPORT_SYMBOL_GPL(inet6_cleanup_sock); /* diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 5ecb56522f9d..df7e032ce87d 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -771,7 +771,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, } if (cmsg->cmsg_level == SOL_SOCKET) { - err = __sock_cmsg_send(sk, msg, cmsg, &ipc6->sockc); + err = __sock_cmsg_send(sk, cmsg, &ipc6->sockc); if (err) return err; continue; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 48b4ff0294f6..02b1b54165e8 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -870,26 +870,6 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) return 0; } -/** - * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own - * @t: the outgoing tunnel device - * @hdr: IPv6 header from the incoming packet - * - * Description: - * Avoid trivial tunneling loop by checking that tunnel exit-point - * doesn't match source of incoming packet. - * - * Return: - * 1 if conflict, - * 0 else - **/ - -static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t, - const struct ipv6hdr *hdr) -{ - return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); -} - static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 532f4478c884..9ce51680290b 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -1005,10 +1005,8 @@ unlock: return retv; e_inval: - sockopt_release_sock(sk); - if (needs_rtnl) - rtnl_unlock(); - return -EINVAL; + retv = -EINVAL; + goto unlock; } int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 86c26e48d065..808983bc2ec9 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -23,11 +23,6 @@ #include <linux/bpf-cgroup.h> #include <net/ping.h> -static void ping_v6_destroy(struct sock *sk) -{ - inet6_destroy_sock(sk); -} - /* Compatibility glue so we can support IPv6 when it's compiled as a module */ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) @@ -205,7 +200,6 @@ struct proto pingv6_prot = { .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, - .destroy = ping_v6_destroy, .pre_connect = ping_v6_pre_connect, .connect = ip6_datagram_connect_v6_only, .disconnect = __udp_disconnect, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 722de9dd0ff7..a06a9f847db5 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1173,8 +1173,6 @@ static void raw6_destroy(struct sock *sk) lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); - - inet6_destroy_sock(sk); } static int rawv6_init_sk(struct sock *sk) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2a3f9296df1e..f676be14e6b6 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1966,12 +1966,6 @@ static int tcp_v6_init_sock(struct sock *sk) return 0; } -static void tcp_v6_destroy_sock(struct sock *sk) -{ - tcp_v4_destroy_sock(sk); - inet6_destroy_sock(sk); -} - #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, @@ -2164,7 +2158,7 @@ struct proto tcpv6_prot = { .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, - .destroy = tcp_v6_destroy_sock, + .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 129ec5a9b0eb..297f7cc06044 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -64,7 +64,7 @@ static void udpv6_destruct_sock(struct sock *sk) int udpv6_init_sock(struct sock *sk) { - skb_queue_head_init(&udp_sk(sk)->reader_queue); + udp_lib_init_sock(sk); sk->sk_destruct = udpv6_destruct_sock; return 0; } @@ -1661,8 +1661,6 @@ void udpv6_destroy_sock(struct sock *sk) udp_encap_disable(); } } - - inet6_destroy_sock(sk); } /* @@ -1671,7 +1669,7 @@ void udpv6_destroy_sock(struct sock *sk) int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { - if (level == SOL_UDP || level == SOL_UDPLITE) + if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 9dbd801ddb98..2478aa60145f 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -257,8 +257,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk) if (tunnel) l2tp_tunnel_delete(tunnel); - - inet6_destroy_sock(sk); } static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f599ad44ed24..e60d144bfb2d 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2708,6 +2708,8 @@ static int mptcp_init_sock(struct sock *sk) if (ret) return ret; + set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); + /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will * propagate the correct value */ @@ -3684,6 +3686,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, struct mptcp_subflow_context *subflow; struct sock *newsk = newsock->sk; + set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags); + lock_sock(newsk); /* PM/worker can now acquire the first subflow socket @@ -3898,12 +3902,6 @@ static const struct proto_ops mptcp_v6_stream_ops = { static struct proto mptcp_v6_prot; -static void mptcp_v6_destroy(struct sock *sk) -{ - mptcp_destroy(sk); - inet6_destroy_sock(sk); -} - static struct inet_protosw mptcp_v6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_MPTCP, @@ -3919,7 +3917,6 @@ int __init mptcp_proto_v6_init(void) mptcp_v6_prot = mptcp_prot; strcpy(mptcp_v6_prot.name, "MPTCPv6"); mptcp_v6_prot.slab = NULL; - mptcp_v6_prot.destroy = mptcp_v6_destroy; mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); err = proto_register(&mptcp_v6_prot, 1); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a662e8a5ff84..f0c94d394ab1 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -812,6 +812,17 @@ static int netlink_release(struct socket *sock) } sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); + + /* Because struct net might disappear soon, do not keep a pointer. */ + if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) { + __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); + /* Because of deferred_put_nlk_sk and use of work queue, + * it is possible netns will be freed before this socket. + */ + sock_net_set(sk, &init_net); + __netns_tracker_alloc(&init_net, &sk->ns_tracker, + false, GFP_KERNEL); + } call_rcu(&nlk->rcu, deferred_put_nlk_sk); return 0; } diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 4a07ab094a84..ead5418c126e 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2309,7 +2309,7 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size) WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); - sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); + sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 4444fd82b66d..c5b86066ff66 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -503,6 +503,9 @@ bool rds_tcp_tune(struct socket *sock) release_sock(sk); return false; } + /* Update ns_tracker to current stack trace and refcounted tracker */ + __netns_tracker_free(net, &sk->ns_tracker, false); + sk->sk_net_refcnt = 1; netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL); sock_inuse_add(net, 1); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 3460abceba44..63ba5551c13f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -226,8 +226,7 @@ static struct sctp_association *sctp_association_init( /* Create an output queue. */ sctp_outq_init(asoc, &asoc->outqueue); - if (!sctp_ulpq_init(&asoc->ulpq, asoc)) - goto fail_init; + sctp_ulpq_init(&asoc->ulpq, asoc); if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp)) goto stream_free; @@ -277,7 +276,6 @@ static struct sctp_association *sctp_association_init( stream_free: sctp_stream_free(&asoc->stream); -fail_init: sock_put(asoc->base.sk); sctp_endpoint_put(asoc->ep); return NULL; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 83628c347744..3e83963d1b8a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5098,13 +5098,17 @@ static void sctp_destroy_sock(struct sock *sk) } /* Triggered when there are no references on the socket anymore */ -static void sctp_destruct_sock(struct sock *sk) +static void sctp_destruct_common(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_shash(sp->hmac); +} +static void sctp_destruct_sock(struct sock *sk) +{ + sctp_destruct_common(sk); inet_sock_destruct(sk); } @@ -9427,7 +9431,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, sctp_sk(newsk)->reuse = sp->reuse; newsk->sk_shutdown = sk->sk_shutdown; - newsk->sk_destruct = sctp_destruct_sock; + newsk->sk_destruct = sk->sk_destruct; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; @@ -9662,11 +9666,20 @@ struct proto sctp_prot = { #if IS_ENABLED(CONFIG_IPV6) -#include <net/transp_v6.h> -static void sctp_v6_destroy_sock(struct sock *sk) +static void sctp_v6_destruct_sock(struct sock *sk) +{ + sctp_destruct_common(sk); + inet6_sock_destruct(sk); +} + +static int sctp_v6_init_sock(struct sock *sk) { - sctp_destroy_sock(sk); - inet6_destroy_sock(sk); + int ret = sctp_init_sock(sk); + + if (!ret) + sk->sk_destruct = sctp_v6_destruct_sock; + + return ret; } struct proto sctpv6_prot = { @@ -9676,8 +9689,8 @@ struct proto sctpv6_prot = { .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, - .init = sctp_init_sock, - .destroy = sctp_v6_destroy_sock, + .init = sctp_v6_init_sock, + .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c index bb22b71df7a3..94727feb07b3 100644 --- a/net/sctp/stream_interleave.c +++ b/net/sctp/stream_interleave.c @@ -490,11 +490,8 @@ static int sctp_enqueue_event(struct sctp_ulpq *ulpq, if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) goto out_free; - if (skb_list) - skb_queue_splice_tail_init(skb_list, - &sk->sk_receive_queue); - else - __skb_queue_tail(&sk->sk_receive_queue, skb); + skb_queue_splice_tail_init(skb_list, + &sk->sk_receive_queue); if (!sp->data_ready_signalled) { sp->data_ready_signalled = 1; @@ -504,10 +501,7 @@ static int sctp_enqueue_event(struct sctp_ulpq *ulpq, return 1; out_free: - if (skb_list) - sctp_queue_purge_ulpevents(skb_list); - else - sctp_ulpevent_free(event); + sctp_queue_purge_ulpevents(skb_list); return 0; } diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 0a8510a0c5e6..b05daafd369a 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -38,8 +38,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq); /* 1st Level Abstractions */ /* Initialize a ULP queue from a block of memory. */ -struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, - struct sctp_association *asoc) +void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc) { memset(ulpq, 0, sizeof(struct sctp_ulpq)); @@ -48,8 +47,6 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, skb_queue_head_init(&ulpq->reasm_uo); skb_queue_head_init(&ulpq->lobby); ulpq->pd_mode = 0; - - return ulpq; } @@ -259,10 +256,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list) return 1; out_free: - if (skb_list) - sctp_queue_purge_ulpevents(skb_list); - else - sctp_ulpevent_free(event); + sctp_queue_purge_ulpevents(skb_list); return 0; } diff --git a/net/socket.c b/net/socket.c index 00da9ce3dba0..55c5d536e5f6 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2199,13 +2199,7 @@ SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size, static bool sock_use_custom_sol_socket(const struct socket *sock) { - const struct sock *sk = sock->sk; - - /* Use sock->ops->setsockopt() for MPTCP */ - return IS_ENABLED(CONFIG_MPTCP) && - sk->sk_protocol == IPPROTO_MPTCP && - sk->sk_type == SOCK_STREAM && - (sk->sk_family == AF_INET || sk->sk_family == AF_INET6); + return test_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags); } /* diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index d5c389df6045..c0e6690be82a 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -97,6 +97,7 @@ class HeaderParser(object): self.desc_unique_helpers = set() self.define_unique_helpers = [] self.helper_enum_vals = {} + self.helper_enum_pos = {} self.desc_syscalls = [] self.enum_syscalls = [] @@ -253,54 +254,71 @@ class HeaderParser(object): break def parse_define_helpers(self): - # Parse FN(...) in #define __BPF_FUNC_MAPPER to compare later with the + # Parse FN(...) in #define ___BPF_FUNC_MAPPER to compare later with the # number of unique function names present in description and use the # correct enumeration value. # Note: seek_to(..) discards the first line below the target search text, - # resulting in FN(unspec) being skipped and not added to self.define_unique_helpers. - self.seek_to('#define __BPF_FUNC_MAPPER(FN)', + # resulting in FN(unspec, 0, ##ctx) being skipped and not added to + # self.define_unique_helpers. + self.seek_to('#define ___BPF_FUNC_MAPPER(FN, ctx...)', 'Could not find start of eBPF helper definition list') # Searches for one FN(\w+) define or a backslash for newline - p = re.compile('\s*FN\((\w+)\)|\\\\') + p = re.compile('\s*FN\((\w+), (\d+), ##ctx\)|\\\\') fn_defines_str = '' - i = 1 # 'unspec' is skipped as mentioned above + i = 0 while True: capture = p.match(self.line) if capture: fn_defines_str += self.line - self.helper_enum_vals[capture.expand(r'bpf_\1')] = i + helper_name = capture.expand(r'bpf_\1') + self.helper_enum_vals[helper_name] = int(capture[2]) + self.helper_enum_pos[helper_name] = i i += 1 else: break self.line = self.reader.readline() # Find the number of occurences of FN(\w+) - self.define_unique_helpers = re.findall('FN\(\w+\)', fn_defines_str) + self.define_unique_helpers = re.findall('FN\(\w+, \d+, ##ctx\)', fn_defines_str) - def assign_helper_values(self): + def validate_helpers(self): + last_helper = '' seen_helpers = set() + seen_enum_vals = set() + i = 0 for helper in self.helpers: proto = helper.proto_break_down() name = proto['name'] try: enum_val = self.helper_enum_vals[name] + enum_pos = self.helper_enum_pos[name] except KeyError: raise Exception("Helper %s is missing from enum bpf_func_id" % name) + if name in seen_helpers: + if last_helper != name: + raise Exception("Helper %s has multiple descriptions which are not grouped together" % name) + continue + # Enforce current practice of having the descriptions ordered # by enum value. + if enum_pos != i: + raise Exception("Helper %s (ID %d) comment order (#%d) must be aligned with its position (#%d) in enum bpf_func_id" % (name, enum_val, i + 1, enum_pos + 1)) + if enum_val in seen_enum_vals: + raise Exception("Helper %s has duplicated value %d" % (name, enum_val)) + seen_helpers.add(name) - desc_val = len(seen_helpers) - if desc_val != enum_val: - raise Exception("Helper %s comment order (#%d) must be aligned with its position (#%d) in enum bpf_func_id" % (name, desc_val, enum_val)) + last_helper = name + seen_enum_vals.add(enum_val) helper.enum_val = enum_val + i += 1 def run(self): self.parse_desc_syscall() self.parse_enum_syscall() self.parse_desc_helpers() self.parse_define_helpers() - self.assign_helper_values() + self.validate_helpers() self.reader.close() ############################################################################### @@ -423,7 +441,7 @@ class PrinterHelpersRST(PrinterRST): """ def __init__(self, parser): self.elements = parser.helpers - self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '__BPF_FUNC_MAPPER') + self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '___BPF_FUNC_MAPPER') def print_header(self): header = '''\ @@ -636,7 +654,7 @@ class PrinterHelpers(Printer): """ def __init__(self, parser): self.elements = parser.helpers - self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '__BPF_FUNC_MAPPER') + self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '___BPF_FUNC_MAPPER') type_fwds = [ 'struct bpf_fib_lookup', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 51b9aa640ad2..17f61338f8f8 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5436,225 +5436,231 @@ union bpf_attr { * larger than the size of the ring buffer, or which cannot fit * within a struct bpf_dynptr. */ -#define __BPF_FUNC_MAPPER(FN) \ - FN(unspec), \ - FN(map_lookup_elem), \ - FN(map_update_elem), \ - FN(map_delete_elem), \ - FN(probe_read), \ - FN(ktime_get_ns), \ - FN(trace_printk), \ - FN(get_prandom_u32), \ - FN(get_smp_processor_id), \ - FN(skb_store_bytes), \ - FN(l3_csum_replace), \ - FN(l4_csum_replace), \ - FN(tail_call), \ - FN(clone_redirect), \ - FN(get_current_pid_tgid), \ - FN(get_current_uid_gid), \ - FN(get_current_comm), \ - FN(get_cgroup_classid), \ - FN(skb_vlan_push), \ - FN(skb_vlan_pop), \ - FN(skb_get_tunnel_key), \ - FN(skb_set_tunnel_key), \ - FN(perf_event_read), \ - FN(redirect), \ - FN(get_route_realm), \ - FN(perf_event_output), \ - FN(skb_load_bytes), \ - FN(get_stackid), \ - FN(csum_diff), \ - FN(skb_get_tunnel_opt), \ - FN(skb_set_tunnel_opt), \ - FN(skb_change_proto), \ - FN(skb_change_type), \ - FN(skb_under_cgroup), \ - FN(get_hash_recalc), \ - FN(get_current_task), \ - FN(probe_write_user), \ - FN(current_task_under_cgroup), \ - FN(skb_change_tail), \ - FN(skb_pull_data), \ - FN(csum_update), \ - FN(set_hash_invalid), \ - FN(get_numa_node_id), \ - FN(skb_change_head), \ - FN(xdp_adjust_head), \ - FN(probe_read_str), \ - FN(get_socket_cookie), \ - FN(get_socket_uid), \ - FN(set_hash), \ - FN(setsockopt), \ - FN(skb_adjust_room), \ - FN(redirect_map), \ - FN(sk_redirect_map), \ - FN(sock_map_update), \ - FN(xdp_adjust_meta), \ - FN(perf_event_read_value), \ - FN(perf_prog_read_value), \ - FN(getsockopt), \ - FN(override_return), \ - FN(sock_ops_cb_flags_set), \ - FN(msg_redirect_map), \ - FN(msg_apply_bytes), \ - FN(msg_cork_bytes), \ - FN(msg_pull_data), \ - FN(bind), \ - FN(xdp_adjust_tail), \ - FN(skb_get_xfrm_state), \ - FN(get_stack), \ - FN(skb_load_bytes_relative), \ - FN(fib_lookup), \ - FN(sock_hash_update), \ - FN(msg_redirect_hash), \ - FN(sk_redirect_hash), \ - FN(lwt_push_encap), \ - FN(lwt_seg6_store_bytes), \ - FN(lwt_seg6_adjust_srh), \ - FN(lwt_seg6_action), \ - FN(rc_repeat), \ - FN(rc_keydown), \ - FN(skb_cgroup_id), \ - FN(get_current_cgroup_id), \ - FN(get_local_storage), \ - FN(sk_select_reuseport), \ - FN(skb_ancestor_cgroup_id), \ - FN(sk_lookup_tcp), \ - FN(sk_lookup_udp), \ - FN(sk_release), \ - FN(map_push_elem), \ - FN(map_pop_elem), \ - FN(map_peek_elem), \ - FN(msg_push_data), \ - FN(msg_pop_data), \ - FN(rc_pointer_rel), \ - FN(spin_lock), \ - FN(spin_unlock), \ - FN(sk_fullsock), \ - FN(tcp_sock), \ - FN(skb_ecn_set_ce), \ - FN(get_listener_sock), \ - FN(skc_lookup_tcp), \ - FN(tcp_check_syncookie), \ - FN(sysctl_get_name), \ - FN(sysctl_get_current_value), \ - FN(sysctl_get_new_value), \ - FN(sysctl_set_new_value), \ - FN(strtol), \ - FN(strtoul), \ - FN(sk_storage_get), \ - FN(sk_storage_delete), \ - FN(send_signal), \ - FN(tcp_gen_syncookie), \ - FN(skb_output), \ - FN(probe_read_user), \ - FN(probe_read_kernel), \ - FN(probe_read_user_str), \ - FN(probe_read_kernel_str), \ - FN(tcp_send_ack), \ - FN(send_signal_thread), \ - FN(jiffies64), \ - FN(read_branch_records), \ - FN(get_ns_current_pid_tgid), \ - FN(xdp_output), \ - FN(get_netns_cookie), \ - FN(get_current_ancestor_cgroup_id), \ - FN(sk_assign), \ - FN(ktime_get_boot_ns), \ - FN(seq_printf), \ - FN(seq_write), \ - FN(sk_cgroup_id), \ - FN(sk_ancestor_cgroup_id), \ - FN(ringbuf_output), \ - FN(ringbuf_reserve), \ - FN(ringbuf_submit), \ - FN(ringbuf_discard), \ - FN(ringbuf_query), \ - FN(csum_level), \ - FN(skc_to_tcp6_sock), \ - FN(skc_to_tcp_sock), \ - FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), \ - FN(skc_to_udp6_sock), \ - FN(get_task_stack), \ - FN(load_hdr_opt), \ - FN(store_hdr_opt), \ - FN(reserve_hdr_opt), \ - FN(inode_storage_get), \ - FN(inode_storage_delete), \ - FN(d_path), \ - FN(copy_from_user), \ - FN(snprintf_btf), \ - FN(seq_printf_btf), \ - FN(skb_cgroup_classid), \ - FN(redirect_neigh), \ - FN(per_cpu_ptr), \ - FN(this_cpu_ptr), \ - FN(redirect_peer), \ - FN(task_storage_get), \ - FN(task_storage_delete), \ - FN(get_current_task_btf), \ - FN(bprm_opts_set), \ - FN(ktime_get_coarse_ns), \ - FN(ima_inode_hash), \ - FN(sock_from_file), \ - FN(check_mtu), \ - FN(for_each_map_elem), \ - FN(snprintf), \ - FN(sys_bpf), \ - FN(btf_find_by_name_kind), \ - FN(sys_close), \ - FN(timer_init), \ - FN(timer_set_callback), \ - FN(timer_start), \ - FN(timer_cancel), \ - FN(get_func_ip), \ - FN(get_attach_cookie), \ - FN(task_pt_regs), \ - FN(get_branch_snapshot), \ - FN(trace_vprintk), \ - FN(skc_to_unix_sock), \ - FN(kallsyms_lookup_name), \ - FN(find_vma), \ - FN(loop), \ - FN(strncmp), \ - FN(get_func_arg), \ - FN(get_func_ret), \ - FN(get_func_arg_cnt), \ - FN(get_retval), \ - FN(set_retval), \ - FN(xdp_get_buff_len), \ - FN(xdp_load_bytes), \ - FN(xdp_store_bytes), \ - FN(copy_from_user_task), \ - FN(skb_set_tstamp), \ - FN(ima_file_hash), \ - FN(kptr_xchg), \ - FN(map_lookup_percpu_elem), \ - FN(skc_to_mptcp_sock), \ - FN(dynptr_from_mem), \ - FN(ringbuf_reserve_dynptr), \ - FN(ringbuf_submit_dynptr), \ - FN(ringbuf_discard_dynptr), \ - FN(dynptr_read), \ - FN(dynptr_write), \ - FN(dynptr_data), \ - FN(tcp_raw_gen_syncookie_ipv4), \ - FN(tcp_raw_gen_syncookie_ipv6), \ - FN(tcp_raw_check_syncookie_ipv4), \ - FN(tcp_raw_check_syncookie_ipv6), \ - FN(ktime_get_tai_ns), \ - FN(user_ringbuf_drain), \ +#define ___BPF_FUNC_MAPPER(FN, ctx...) \ + FN(unspec, 0, ##ctx) \ + FN(map_lookup_elem, 1, ##ctx) \ + FN(map_update_elem, 2, ##ctx) \ + FN(map_delete_elem, 3, ##ctx) \ + FN(probe_read, 4, ##ctx) \ + FN(ktime_get_ns, 5, ##ctx) \ + FN(trace_printk, 6, ##ctx) \ + FN(get_prandom_u32, 7, ##ctx) \ + FN(get_smp_processor_id, 8, ##ctx) \ + FN(skb_store_bytes, 9, ##ctx) \ + FN(l3_csum_replace, 10, ##ctx) \ + FN(l4_csum_replace, 11, ##ctx) \ + FN(tail_call, 12, ##ctx) \ + FN(clone_redirect, 13, ##ctx) \ + FN(get_current_pid_tgid, 14, ##ctx) \ + FN(get_current_uid_gid, 15, ##ctx) \ + FN(get_current_comm, 16, ##ctx) \ + FN(get_cgroup_classid, 17, ##ctx) \ + FN(skb_vlan_push, 18, ##ctx) \ + FN(skb_vlan_pop, 19, ##ctx) \ + FN(skb_get_tunnel_key, 20, ##ctx) \ + FN(skb_set_tunnel_key, 21, ##ctx) \ + FN(perf_event_read, 22, ##ctx) \ + FN(redirect, 23, ##ctx) \ + FN(get_route_realm, 24, ##ctx) \ + FN(perf_event_output, 25, ##ctx) \ + FN(skb_load_bytes, 26, ##ctx) \ + FN(get_stackid, 27, ##ctx) \ + FN(csum_diff, 28, ##ctx) \ + FN(skb_get_tunnel_opt, 29, ##ctx) \ + FN(skb_set_tunnel_opt, 30, ##ctx) \ + FN(skb_change_proto, 31, ##ctx) \ + FN(skb_change_type, 32, ##ctx) \ + FN(skb_under_cgroup, 33, ##ctx) \ + FN(get_hash_recalc, 34, ##ctx) \ + FN(get_current_task, 35, ##ctx) \ + FN(probe_write_user, 36, ##ctx) \ + FN(current_task_under_cgroup, 37, ##ctx) \ + FN(skb_change_tail, 38, ##ctx) \ + FN(skb_pull_data, 39, ##ctx) \ + FN(csum_update, 40, ##ctx) \ + FN(set_hash_invalid, 41, ##ctx) \ + FN(get_numa_node_id, 42, ##ctx) \ + FN(skb_change_head, 43, ##ctx) \ + FN(xdp_adjust_head, 44, ##ctx) \ + FN(probe_read_str, 45, ##ctx) \ + FN(get_socket_cookie, 46, ##ctx) \ + FN(get_socket_uid, 47, ##ctx) \ + FN(set_hash, 48, ##ctx) \ + FN(setsockopt, 49, ##ctx) \ + FN(skb_adjust_room, 50, ##ctx) \ + FN(redirect_map, 51, ##ctx) \ + FN(sk_redirect_map, 52, ##ctx) \ + FN(sock_map_update, 53, ##ctx) \ + FN(xdp_adjust_meta, 54, ##ctx) \ + FN(perf_event_read_value, 55, ##ctx) \ + FN(perf_prog_read_value, 56, ##ctx) \ + FN(getsockopt, 57, ##ctx) \ + FN(override_return, 58, ##ctx) \ + FN(sock_ops_cb_flags_set, 59, ##ctx) \ + FN(msg_redirect_map, 60, ##ctx) \ + FN(msg_apply_bytes, 61, ##ctx) \ + FN(msg_cork_bytes, 62, ##ctx) \ + FN(msg_pull_data, 63, ##ctx) \ + FN(bind, 64, ##ctx) \ + FN(xdp_adjust_tail, 65, ##ctx) \ + FN(skb_get_xfrm_state, 66, ##ctx) \ + FN(get_stack, 67, ##ctx) \ + FN(skb_load_bytes_relative, 68, ##ctx) \ + FN(fib_lookup, 69, ##ctx) \ + FN(sock_hash_update, 70, ##ctx) \ + FN(msg_redirect_hash, 71, ##ctx) \ + FN(sk_redirect_hash, 72, ##ctx) \ + FN(lwt_push_encap, 73, ##ctx) \ + FN(lwt_seg6_store_bytes, 74, ##ctx) \ + FN(lwt_seg6_adjust_srh, 75, ##ctx) \ + FN(lwt_seg6_action, 76, ##ctx) \ + FN(rc_repeat, 77, ##ctx) \ + FN(rc_keydown, 78, ##ctx) \ + FN(skb_cgroup_id, 79, ##ctx) \ + FN(get_current_cgroup_id, 80, ##ctx) \ + FN(get_local_storage, 81, ##ctx) \ + FN(sk_select_reuseport, 82, ##ctx) \ + FN(skb_ancestor_cgroup_id, 83, ##ctx) \ + FN(sk_lookup_tcp, 84, ##ctx) \ + FN(sk_lookup_udp, 85, ##ctx) \ + FN(sk_release, 86, ##ctx) \ + FN(map_push_elem, 87, ##ctx) \ + FN(map_pop_elem, 88, ##ctx) \ + FN(map_peek_elem, 89, ##ctx) \ + FN(msg_push_data, 90, ##ctx) \ + FN(msg_pop_data, 91, ##ctx) \ + FN(rc_pointer_rel, 92, ##ctx) \ + FN(spin_lock, 93, ##ctx) \ + FN(spin_unlock, 94, ##ctx) \ + FN(sk_fullsock, 95, ##ctx) \ + FN(tcp_sock, 96, ##ctx) \ + FN(skb_ecn_set_ce, 97, ##ctx) \ + FN(get_listener_sock, 98, ##ctx) \ + FN(skc_lookup_tcp, 99, ##ctx) \ + FN(tcp_check_syncookie, 100, ##ctx) \ + FN(sysctl_get_name, 101, ##ctx) \ + FN(sysctl_get_current_value, 102, ##ctx) \ + FN(sysctl_get_new_value, 103, ##ctx) \ + FN(sysctl_set_new_value, 104, ##ctx) \ + FN(strtol, 105, ##ctx) \ + FN(strtoul, 106, ##ctx) \ + FN(sk_storage_get, 107, ##ctx) \ + FN(sk_storage_delete, 108, ##ctx) \ + FN(send_signal, 109, ##ctx) \ + FN(tcp_gen_syncookie, 110, ##ctx) \ + FN(skb_output, 111, ##ctx) \ + FN(probe_read_user, 112, ##ctx) \ + FN(probe_read_kernel, 113, ##ctx) \ + FN(probe_read_user_str, 114, ##ctx) \ + FN(probe_read_kernel_str, 115, ##ctx) \ + FN(tcp_send_ack, 116, ##ctx) \ + FN(send_signal_thread, 117, ##ctx) \ + FN(jiffies64, 118, ##ctx) \ + FN(read_branch_records, 119, ##ctx) \ + FN(get_ns_current_pid_tgid, 120, ##ctx) \ + FN(xdp_output, 121, ##ctx) \ + FN(get_netns_cookie, 122, ##ctx) \ + FN(get_current_ancestor_cgroup_id, 123, ##ctx) \ + FN(sk_assign, 124, ##ctx) \ + FN(ktime_get_boot_ns, 125, ##ctx) \ + FN(seq_printf, 126, ##ctx) \ + FN(seq_write, 127, ##ctx) \ + FN(sk_cgroup_id, 128, ##ctx) \ + FN(sk_ancestor_cgroup_id, 129, ##ctx) \ + FN(ringbuf_output, 130, ##ctx) \ + FN(ringbuf_reserve, 131, ##ctx) \ + FN(ringbuf_submit, 132, ##ctx) \ + FN(ringbuf_discard, 133, ##ctx) \ + FN(ringbuf_query, 134, ##ctx) \ + FN(csum_level, 135, ##ctx) \ + FN(skc_to_tcp6_sock, 136, ##ctx) \ + FN(skc_to_tcp_sock, 137, ##ctx) \ + FN(skc_to_tcp_timewait_sock, 138, ##ctx) \ + FN(skc_to_tcp_request_sock, 139, ##ctx) \ + FN(skc_to_udp6_sock, 140, ##ctx) \ + FN(get_task_stack, 141, ##ctx) \ + FN(load_hdr_opt, 142, ##ctx) \ + FN(store_hdr_opt, 143, ##ctx) \ + FN(reserve_hdr_opt, 144, ##ctx) \ + FN(inode_storage_get, 145, ##ctx) \ + FN(inode_storage_delete, 146, ##ctx) \ + FN(d_path, 147, ##ctx) \ + FN(copy_from_user, 148, ##ctx) \ + FN(snprintf_btf, 149, ##ctx) \ + FN(seq_printf_btf, 150, ##ctx) \ + FN(skb_cgroup_classid, 151, ##ctx) \ + FN(redirect_neigh, 152, ##ctx) \ + FN(per_cpu_ptr, 153, ##ctx) \ + FN(this_cpu_ptr, 154, ##ctx) \ + FN(redirect_peer, 155, ##ctx) \ + FN(task_storage_get, 156, ##ctx) \ + FN(task_storage_delete, 157, ##ctx) \ + FN(get_current_task_btf, 158, ##ctx) \ + FN(bprm_opts_set, 159, ##ctx) \ + FN(ktime_get_coarse_ns, 160, ##ctx) \ + FN(ima_inode_hash, 161, ##ctx) \ + FN(sock_from_file, 162, ##ctx) \ + FN(check_mtu, 163, ##ctx) \ + FN(for_each_map_elem, 164, ##ctx) \ + FN(snprintf, 165, ##ctx) \ + FN(sys_bpf, 166, ##ctx) \ + FN(btf_find_by_name_kind, 167, ##ctx) \ + FN(sys_close, 168, ##ctx) \ + FN(timer_init, 169, ##ctx) \ + FN(timer_set_callback, 170, ##ctx) \ + FN(timer_start, 171, ##ctx) \ + FN(timer_cancel, 172, ##ctx) \ + FN(get_func_ip, 173, ##ctx) \ + FN(get_attach_cookie, 174, ##ctx) \ + FN(task_pt_regs, 175, ##ctx) \ + FN(get_branch_snapshot, 176, ##ctx) \ + FN(trace_vprintk, 177, ##ctx) \ + FN(skc_to_unix_sock, 178, ##ctx) \ + FN(kallsyms_lookup_name, 179, ##ctx) \ + FN(find_vma, 180, ##ctx) \ + FN(loop, 181, ##ctx) \ + FN(strncmp, 182, ##ctx) \ + FN(get_func_arg, 183, ##ctx) \ + FN(get_func_ret, 184, ##ctx) \ + FN(get_func_arg_cnt, 185, ##ctx) \ + FN(get_retval, 186, ##ctx) \ + FN(set_retval, 187, ##ctx) \ + FN(xdp_get_buff_len, 188, ##ctx) \ + FN(xdp_load_bytes, 189, ##ctx) \ + FN(xdp_store_bytes, 190, ##ctx) \ + FN(copy_from_user_task, 191, ##ctx) \ + FN(skb_set_tstamp, 192, ##ctx) \ + FN(ima_file_hash, 193, ##ctx) \ + FN(kptr_xchg, 194, ##ctx) \ + FN(map_lookup_percpu_elem, 195, ##ctx) \ + FN(skc_to_mptcp_sock, 196, ##ctx) \ + FN(dynptr_from_mem, 197, ##ctx) \ + FN(ringbuf_reserve_dynptr, 198, ##ctx) \ + FN(ringbuf_submit_dynptr, 199, ##ctx) \ + FN(ringbuf_discard_dynptr, 200, ##ctx) \ + FN(dynptr_read, 201, ##ctx) \ + FN(dynptr_write, 202, ##ctx) \ + FN(dynptr_data, 203, ##ctx) \ + FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \ + FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \ + FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \ + FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \ + FN(ktime_get_tai_ns, 208, ##ctx) \ + FN(user_ringbuf_drain, 209, ##ctx) \ /* */ +/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't + * know or care about integer value that is now passed as second argument + */ +#define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name), +#define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN) + /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call */ -#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x +#define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y, enum bpf_func_id { - __BPF_FUNC_MAPPER(__BPF_ENUM_FN) + ___BPF_FUNC_MAPPER(__BPF_ENUM_FN) __BPF_FUNC_MAX_ID, }; #undef __BPF_ENUM_FN diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 1d49a0352836..9aff98f42a3d 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -935,58 +935,98 @@ int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); } -int bpf_prog_get_fd_by_id(__u32 id) +int bpf_prog_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; + if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) + return libbpf_err(-EINVAL); + memset(&attr, 0, attr_sz); attr.prog_id = id; + attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } -int bpf_map_get_fd_by_id(__u32 id) +int bpf_prog_get_fd_by_id(__u32 id) +{ + return bpf_prog_get_fd_by_id_opts(id, NULL); +} + +int bpf_map_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; + if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) + return libbpf_err(-EINVAL); + memset(&attr, 0, attr_sz); attr.map_id = id; + attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } -int bpf_btf_get_fd_by_id(__u32 id) +int bpf_map_get_fd_by_id(__u32 id) +{ + return bpf_map_get_fd_by_id_opts(id, NULL); +} + +int bpf_btf_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; + if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) + return libbpf_err(-EINVAL); + memset(&attr, 0, attr_sz); attr.btf_id = id; + attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } -int bpf_link_get_fd_by_id(__u32 id) +int bpf_btf_get_fd_by_id(__u32 id) +{ + return bpf_btf_get_fd_by_id_opts(id, NULL); +} + +int bpf_link_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; + if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) + return libbpf_err(-EINVAL); + memset(&attr, 0, attr_sz); attr.link_id = id; + attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } +int bpf_link_get_fd_by_id(__u32 id) +{ + return bpf_link_get_fd_by_id_opts(id, NULL); +} + int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) { const size_t attr_sz = offsetofend(union bpf_attr, info); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 9c50beabdd14..a112e0ed1b19 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -365,10 +365,26 @@ LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id); + +struct bpf_get_fd_by_id_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 open_flags; /* permissions requested for the operation on fd */ + size_t :0; +}; +#define bpf_get_fd_by_id_opts__last_field open_flags + LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_map_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_map_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_btf_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_link_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id, + const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len); struct bpf_prog_query_opts { diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 4221f73a74d0..bf0cc0e986dd 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -219,6 +219,17 @@ static int btf_dump_resize(struct btf_dump *d) return 0; } +static void btf_dump_free_names(struct hashmap *map) +{ + size_t bkt; + struct hashmap_entry *cur; + + hashmap__for_each_entry(map, cur, bkt) + free((void *)cur->key); + + hashmap__free(map); +} + void btf_dump__free(struct btf_dump *d) { int i; @@ -237,8 +248,8 @@ void btf_dump__free(struct btf_dump *d) free(d->cached_names); free(d->emit_queue); free(d->decl_stack); - hashmap__free(d->type_names); - hashmap__free(d->ident_names); + btf_dump_free_names(d->type_names); + btf_dump_free_names(d->ident_names); free(d); } @@ -944,7 +955,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, lvl + 1); } - if (vlen) + /* + * Keep `struct empty {}` on a single line, + * only print newline when there are regular or padding fields. + */ + if (vlen || t->size) btf_dump_printf(d, "\n"); btf_dump_printf(d, "%s}", pfx(lvl)); if (packed) @@ -1520,11 +1535,23 @@ static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id, static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, const char *orig_name) { + char *old_name, *new_name; size_t dup_cnt = 0; + int err; + + new_name = strdup(orig_name); + if (!new_name) + return 1; hashmap__find(name_map, orig_name, (void **)&dup_cnt); dup_cnt++; - hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL); + + err = hashmap__set(name_map, new_name, (void *)dup_cnt, + (const void **)&old_name, NULL); + if (err) + free(new_name); + + free(old_name); return dup_cnt; } diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 184ce1684dcd..8c3f236c86e4 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -597,7 +597,7 @@ struct elf_state { size_t shstrndx; /* section index for section name strings */ size_t strtabidx; struct elf_sec_desc *secs; - int sec_cnt; + size_t sec_cnt; int btf_maps_shndx; __u32 btf_maps_sec_btf_id; int text_shndx; @@ -1408,6 +1408,10 @@ static int bpf_object__check_endianness(struct bpf_object *obj) static int bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) { + if (!data) { + pr_warn("invalid license section in %s\n", obj->path); + return -LIBBPF_ERRNO__FORMAT; + } /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't * go over allowed ELF data section buffer */ @@ -1421,7 +1425,7 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) { __u32 kver; - if (size != sizeof(kver)) { + if (!data || size != sizeof(kver)) { pr_warn("invalid kver section in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } @@ -3312,10 +3316,15 @@ static int bpf_object__elf_collect(struct bpf_object *obj) Elf64_Shdr *sh; /* ELF section indices are 0-based, but sec #0 is special "invalid" - * section. e_shnum does include sec #0, so e_shnum is the necessary - * size of an array to keep all the sections. + * section. Since section count retrieved by elf_getshdrnum() does + * include sec #0, it is already the necessary size of an array to keep + * all the sections. */ - obj->efile.sec_cnt = obj->efile.ehdr->e_shnum; + if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) { + pr_warn("elf: failed to get the number of sections for %s: %s\n", + obj->path, elf_errmsg(-1)); + return -LIBBPF_ERRNO__FORMAT; + } obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); if (!obj->efile.secs) return -ENOMEM; @@ -4106,6 +4115,9 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, int l = 0, r = obj->nr_programs - 1, m; struct bpf_program *prog; + if (!obj->nr_programs) + return NULL; + while (l < r) { m = l + (r - l + 1) / 2; prog = &obj->programs[m]; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c1d6aa7c82b6..71bf5691a689 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -367,10 +367,14 @@ LIBBPF_1.0.0 { libbpf_bpf_map_type_str; libbpf_bpf_prog_type_str; perf_buffer__buffer; -}; +} LIBBPF_0.8.0; LIBBPF_1.1.0 { global: + bpf_btf_get_fd_by_id_opts; + bpf_link_get_fd_by_id_opts; + bpf_map_get_fd_by_id_opts; + bpf_prog_get_fd_by_id_opts; user_ring_buffer__discard; user_ring_buffer__free; user_ring_buffer__new; diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index e83b497c2245..49f3c3b7f609 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -1348,25 +1348,23 @@ static int calc_pt_regs_off(const char *reg_name) static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) { - char *reg_name = NULL; + char reg_name[16]; int arg_sz, len, reg_off; long off; - if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, ®_name, &off, &len) == 3) { + if (sscanf(arg_str, " %d @ \[ %15[a-z0-9], %ld ] %n", &arg_sz, reg_name, &off, &len) == 3) { /* Memory dereference case, e.g., -4@[sp, 96] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; reg_off = calc_pt_regs_off(reg_name); - free(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; - } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, ®_name, &len) == 2) { + } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", &arg_sz, reg_name, &len) == 2) { /* Memory dereference case, e.g., -4@[sp] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); - free(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; @@ -1375,12 +1373,11 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; - } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, ®_name, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -8@x4 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); - free(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST index 939de574fc7f..f748f2c33b22 100644 --- a/tools/testing/selftests/bpf/DENYLIST +++ b/tools/testing/selftests/bpf/DENYLIST @@ -1,6 +1,7 @@ # TEMPORARY +# Alphabetical order get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge -stacktrace_build_id_nmi stacktrace_build_id +stacktrace_build_id_nmi task_fd_query_rawtp varlen diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index 17e074eb42b8..520f12229b98 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -1,13 +1,18 @@ # TEMPORARY +# Alphabetical order atomics # attach(add): actual -524 <= expected 0 (trampoline) -bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc) bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?) -bpf_tcp_ca # JIT does not support calling kernel function (kfunc) +bpf_cookie # failed to open_and_load program: -524 (trampoline) +bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc) bpf_loop # attaches to __x64_sys_nanosleep bpf_mod_race # BPF trampoline bpf_nf # JIT does not support calling kernel function +bpf_tcp_ca # JIT does not support calling kernel function (kfunc) +cb_refs # expected error message unexpected error: -524 (trampoline) +cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc) core_read_macros # unknown func bpf_probe_read#4 (overlapping) d_path # failed to auto-attach program 'prog_stat': -524 (trampoline) +deny_namespace # failed to attach: ERROR: strerror_r(-524)=22 (trampoline) dummy_st_ops # test_run unexpected error: -524 (errno 524) (trampoline) fentry_fexit # fentry attach failed: -524 (trampoline) fentry_test # fentry_first_attach unexpected error: -524 (trampoline) @@ -18,19 +23,28 @@ fexit_test # fexit_first_attach unexpected error: get_func_args_test # trampoline get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (trampoline) get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) +htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline) kfree_skb # attach fentry unexpected error: -524 (trampoline) kfunc_call # 'bpf_prog_active': not found in kernel BTF (?) +kfunc_dynptr_param # JIT does not support calling kernel function (kfunc) +kprobe_multi_test # relies on fentry ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?) ksyms_module_libbpf # JIT does not support calling kernel function (kfunc) ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?) +libbpf_get_fd_by_id_opts # failed to attach: ERROR: strerror_r(-524)=22 (trampoline) +lookup_key # JIT does not support calling kernel function (kfunc) +lru_bug # prog 'printk': failed to auto-attach: -524 +map_kptr # failed to open_and_load program: -524 (trampoline) modify_return # modify_return attach failed: -524 (trampoline) module_attach # skel_attach skeleton attach failed: -524 (trampoline) mptcp -kprobe_multi_test # relies on fentry netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?) probe_user # check_kprobe_res wrong kprobe res from probe read (?) recursion # skel_attach unexpected error: -524 (trampoline) ringbuf # skel_load skeleton load failed (?) +select_reuseport # intermittently fails on new s390x setup +send_signal # intermittently fails to receive signal +setget_sockopt # attach unexpected error: -524 (trampoline) sk_assign # Can't read on server: Invalid argument (?) sk_lookup # endianness problem sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (trampoline) @@ -52,26 +66,15 @@ timer_mim # failed to auto-attach program 'test1' trace_ext # failed to auto-attach program 'test_pkt_md_access_new': -524 (trampoline) trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?) trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?) +tracing_struct # failed to auto-attach: -524 (trampoline) trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline) +unpriv_bpf_disabled # fentry +user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?) verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?) +verify_pkcs7_sig # JIT does not support calling kernel function (kfunc) vmlinux # failed to auto-attach program 'handle__fentry': -524 (trampoline) xdp_adjust_tail # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520 (?) xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline) xdp_bpf2bpf # failed to auto-attach program 'trace_on_entry': -524 (trampoline) -map_kptr # failed to open_and_load program: -524 (trampoline) -bpf_cookie # failed to open_and_load program: -524 (trampoline) xdp_do_redirect # prog_run_max_size unexpected error: -22 (errno 22) -send_signal # intermittently fails to receive signal -select_reuseport # intermittently fails on new s390x setup xdp_synproxy # JIT does not support calling kernel function (kfunc) -unpriv_bpf_disabled # fentry -lru_bug # prog 'printk': failed to auto-attach: -524 -setget_sockopt # attach unexpected error: -524 (trampoline) -cb_refs # expected error message unexpected error: -524 (trampoline) -cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc) -htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline) -tracing_struct # failed to auto-attach: -524 (trampoline) -user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?) -lookup_key # JIT does not support calling kernel function (kfunc) -verify_pkcs7_sig # JIT does not support calling kernel function (kfunc) -kfunc_dynptr_param # JIT does not support calling kernel function (kfunc) diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index d3c6b3da0bb1..822548d0f2ae 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -14,10 +14,11 @@ It's now possible to run the selftests using ``tools/testing/selftests/bpf/vmtes The script tries to ensure that the tests are run with the same environment as they would be run post-submit in the CI used by the Maintainers. -This script downloads a suitable Kconfig and VM userspace image from the system used by -the CI. It builds the kernel (without overwriting your existing Kconfig), recompiles the -bpf selftests, runs them (by default ``tools/testing/selftests/bpf/test_progs``) and -saves the resulting output (by default in ``~/.bpf_selftests``). +This script uses the in-tree kernel configuration and downloads a VM userspace +image from the system used by the CI. It builds the kernel (without overwriting +your existing Kconfig), recompiles the bpf selftests, runs them (by default +``tools/testing/selftests/bpf/test_progs``) and saves the resulting output (by +default in ``~/.bpf_selftests``). Script dependencies: - clang (preferably built from sources, https://github.com/llvm/llvm-project); @@ -26,7 +27,7 @@ Script dependencies: - docutils (for ``rst2man``); - libcap-devel. -For more information on about using the script, run: +For more information about using the script, run: .. code-block:: console diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 3369c5ec3a17..c39d40f4b268 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -3,6 +3,7 @@ #include <test_progs.h> #include <unistd.h> #include <sys/syscall.h> +#include <task_local_storage_helpers.h> #include "bpf_iter_ipv6_route.skel.h" #include "bpf_iter_netlink.skel.h" #include "bpf_iter_bpf_map.skel.h" @@ -175,11 +176,6 @@ static void test_bpf_map(void) bpf_iter_bpf_map__destroy(skel); } -static int pidfd_open(pid_t pid, unsigned int flags) -{ - return syscall(SYS_pidfd_open, pid, flags); -} - static void check_bpf_link_info(const struct bpf_program *prog) { LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -295,8 +291,8 @@ static void test_task_pidfd(void) union bpf_iter_link_info linfo; int pidfd; - pidfd = pidfd_open(getpid(), 0); - if (!ASSERT_GT(pidfd, 0, "pidfd_open")) + pidfd = sys_pidfd_open(getpid(), 0); + if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open")) return; memset(&linfo, 0, sizeof(linfo)); @@ -1498,7 +1494,6 @@ static noinline int trigger_func(int arg) static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc) { struct bpf_iter_vma_offset *skel; - struct bpf_link *link; char buf[16] = {}; int iter_fd, len; int pgsz, shift; @@ -1513,11 +1508,11 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool ; skel->bss->page_shift = shift; - link = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); - if (!ASSERT_OK_PTR(link, "attach_iter")) - return; + skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); + if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter")) + goto exit; - iter_fd = bpf_iter_create(bpf_link__fd(link)); + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset)); if (!ASSERT_GT(iter_fd, 0, "create_iter")) goto exit; @@ -1535,7 +1530,7 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool close(iter_fd); exit: - bpf_link__destroy(link); + bpf_iter_vma_offset__destroy(skel); } static void test_task_vma_offset(void) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index 8a838ea8bdf3..c8ba4009e4ab 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -49,14 +49,14 @@ out: static void test_bpf_nf_ct(int mode) { - const char *iptables = "iptables -t raw %s PREROUTING -j CONNMARK --set-mark 42/0"; + const char *iptables = "iptables-legacy -t raw %s PREROUTING -j CONNMARK --set-mark 42/0"; int srv_fd = -1, client_fd = -1, srv_client_fd = -1; struct sockaddr_in peer_addr = {}; struct test_bpf_nf *skel; int prog_fd, err; socklen_t len; u16 srv_port; - char cmd[64]; + char cmd[128]; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), @@ -69,7 +69,7 @@ static void test_bpf_nf_ct(int mode) /* Enable connection tracking */ snprintf(cmd, sizeof(cmd), iptables, "-A"); - if (!ASSERT_OK(system(cmd), "iptables")) + if (!ASSERT_OK(system(cmd), cmd)) goto end; srv_port = (mode == TEST_XDP) ? 5005 : 5006; diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index d457a55ff408..287b3ac40227 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -325,7 +325,7 @@ static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_u static int get_syms(char ***symsp, size_t *cntp) { size_t cap = 0, cnt = 0, i; - char *name, **syms = NULL; + char *name = NULL, **syms = NULL; struct hashmap *map; char buf[256]; FILE *f; @@ -352,6 +352,8 @@ static int get_syms(char ***symsp, size_t *cntp) /* skip modules */ if (strchr(buf, '[')) continue; + + free(name); if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1) continue; /* @@ -369,32 +371,32 @@ static int get_syms(char ***symsp, size_t *cntp) if (!strncmp(name, "__ftrace_invalid_address__", sizeof("__ftrace_invalid_address__") - 1)) continue; + err = hashmap__add(map, name, NULL); - if (err) { - free(name); - if (err == -EEXIST) - continue; + if (err == -EEXIST) + continue; + if (err) goto error; - } + err = libbpf_ensure_mem((void **) &syms, &cap, sizeof(*syms), cnt + 1); - if (err) { - free(name); + if (err) goto error; - } - syms[cnt] = name; - cnt++; + + syms[cnt++] = name; + name = NULL; } *symsp = syms; *cntp = cnt; error: + free(name); fclose(f); hashmap__free(map); if (err) { for (i = 0; i < cnt; i++) - free(syms[cnt]); + free(syms[i]); free(syms); } return err; diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c new file mode 100644 index 000000000000..25e5dfa9c315 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <test_progs.h> + +#include "test_libbpf_get_fd_by_id_opts.skel.h" + +void test_libbpf_get_fd_by_id_opts(void) +{ + struct test_libbpf_get_fd_by_id_opts *skel; + struct bpf_map_info info_m = {}; + __u32 len = sizeof(info_m), value; + int ret, zero = 0, fd = -1; + LIBBPF_OPTS(bpf_get_fd_by_id_opts, fd_opts_rdonly, + .open_flags = BPF_F_RDONLY, + ); + + skel = test_libbpf_get_fd_by_id_opts__open_and_load(); + if (!ASSERT_OK_PTR(skel, + "test_libbpf_get_fd_by_id_opts__open_and_load")) + return; + + ret = test_libbpf_get_fd_by_id_opts__attach(skel); + if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach")) + goto close_prog; + + ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.data_input), + &info_m, &len); + if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd")) + goto close_prog; + + fd = bpf_map_get_fd_by_id(info_m.id); + if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id")) + goto close_prog; + + fd = bpf_map_get_fd_by_id_opts(info_m.id, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id_opts")) + goto close_prog; + + fd = bpf_map_get_fd_by_id_opts(info_m.id, &fd_opts_rdonly); + if (!ASSERT_GE(fd, 0, "bpf_map_get_fd_by_id_opts")) + goto close_prog; + + /* Map lookup should work with read-only fd. */ + ret = bpf_map_lookup_elem(fd, &zero, &value); + if (!ASSERT_OK(ret, "bpf_map_lookup_elem")) + goto close_prog; + + if (!ASSERT_EQ(value, 0, "map value mismatch")) + goto close_prog; + + /* Map update should not work with read-only fd. */ + ret = bpf_map_update_elem(fd, &zero, &len, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem")) + goto close_prog; + + /* Map update should work with read-write fd. */ + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.data_input), &zero, + &len, BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem")) + goto close_prog; + + /* Prog get fd with opts set should not work (no kernel support). */ + ret = bpf_prog_get_fd_by_id_opts(0, &fd_opts_rdonly); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_prog_get_fd_by_id_opts")) + goto close_prog; + + /* Link get fd with opts set should not work (no kernel support). */ + ret = bpf_link_get_fd_by_id_opts(0, &fd_opts_rdonly); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_link_get_fd_by_id_opts")) + goto close_prog; + + /* BTF get fd with opts set should not work (no kernel support). */ + ret = bpf_btf_get_fd_by_id_opts(0, &fd_opts_rdonly); + ASSERT_EQ(ret, -EINVAL, "bpf_btf_get_fd_by_id_opts"); + +close_prog: + if (fd >= 0) + close(fd); + + test_libbpf_get_fd_by_id_opts__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c index fdcea7a61491..0d66b1524208 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c @@ -105,7 +105,7 @@ static void test_map_kptr_success(bool test_run) ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); if (test_run) - return; + goto exit; ret = bpf_map__update_elem(skel->maps.array_map, &key, sizeof(key), buf, sizeof(buf), 0); @@ -132,6 +132,7 @@ static void test_map_kptr_success(bool test_run) ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0); ASSERT_OK(ret, "lru_hash_map delete"); +exit: map_kptr__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index d5022b91d1e4..48dc9472e160 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -15,7 +15,7 @@ static void test_fentry(void) err = tracing_struct__attach(skel); if (!ASSERT_OK(err, "tracing_struct__attach")) - return; + goto destroy_skel; ASSERT_OK(trigger_module_test_read(256), "trigger_read"); @@ -54,6 +54,7 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret"); tracing_struct__detach(skel); +destroy_skel: tracing_struct__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 9b9cf8458adf..39973ea1ce43 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -18,7 +18,7 @@ static void test_xdp_adjust_tail_shrink(void) ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (ASSERT_OK(err, "test_xdp_adjust_tail_shrink")) + if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink")) return; err = bpf_prog_test_run_opts(prog_fd, &topts); @@ -53,7 +53,7 @@ static void test_xdp_adjust_tail_grow(void) ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) + if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; err = bpf_prog_test_run_opts(prog_fd, &topts); @@ -63,6 +63,7 @@ static void test_xdp_adjust_tail_grow(void) expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */ topts.data_in = &pkt_v6; topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "ipv6"); ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); @@ -89,7 +90,7 @@ static void test_xdp_adjust_tail_grow2(void) ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) + if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; /* Test case-64 */ diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c index 75550a40e029..c72083885b6d 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c @@ -94,12 +94,12 @@ static void test_synproxy(bool xdp) SYS("sysctl -w net.ipv4.tcp_syncookies=2"); SYS("sysctl -w net.ipv4.tcp_timestamps=1"); SYS("sysctl -w net.netfilter.nf_conntrack_tcp_loose=0"); - SYS("iptables -t raw -I PREROUTING \ + SYS("iptables-legacy -t raw -I PREROUTING \ -i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack"); - SYS("iptables -t filter -A INPUT \ + SYS("iptables-legacy -t filter -A INPUT \ -i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \ -j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460"); - SYS("iptables -t filter -A INPUT \ + SYS("iptables-legacy -t filter -A INPUT \ -i tmp1 -m state --state INVALID -j DROP"); ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \ diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c index f2661c8d2d90..7cb522d22a66 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c @@ -102,12 +102,21 @@ struct zone { struct zone_padding __pad__; }; +/* ----- START-EXPECTED-OUTPUT ----- */ +struct padding_wo_named_members { + long: 64; + long: 64; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + int f(struct { struct padded_implicitly _1; struct padded_explicitly _2; struct padded_a_lot _3; struct padded_cache_line _4; struct zone _5; + struct padding_wo_named_members _6; } *_) { return 0; diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c new file mode 100644 index 000000000000..f5ac5f3e8919 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +/* From include/linux/mm.h. */ +#define FMODE_WRITE 0x2 + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} data_input SEC(".maps"); + +char _license[] SEC("license") = "GPL"; + +SEC("lsm/bpf_map") +int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode) +{ + if (map != (struct bpf_map *)&data_input) + return 0; + + if (fmode & FMODE_WRITE) + return -EACCES; + + return 0; +} diff --git a/tools/testing/selftests/bpf/task_local_storage_helpers.h b/tools/testing/selftests/bpf/task_local_storage_helpers.h index 711d5abb7d51..281f86132766 100644 --- a/tools/testing/selftests/bpf/task_local_storage_helpers.h +++ b/tools/testing/selftests/bpf/task_local_storage_helpers.h @@ -7,8 +7,12 @@ #include <sys/types.h> #ifndef __NR_pidfd_open +#ifdef __alpha__ +#define __NR_pidfd_open 544 +#else #define __NR_pidfd_open 434 #endif +#endif static inline int sys_pidfd_open(pid_t pid, unsigned int flags) { diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 2dbcbf363c18..9c7091100b7f 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -68,7 +68,6 @@ #define SKIP_INSNS() BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef) #define DEFAULT_LIBBPF_LOG_LEVEL 4 -#define VERBOSE_LIBBPF_LOG_LEVEL 1 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) @@ -81,6 +80,7 @@ static bool unpriv_disabled = false; static int skips; static bool verbose = false; +static int verif_log_level = 0; struct kfunc_btf_id_pair { const char *kfunc; @@ -759,7 +759,7 @@ static int load_btf_spec(__u32 *types, int types_len, .log_buf = bpf_vlog, .log_size = sizeof(bpf_vlog), .log_level = (verbose - ? VERBOSE_LIBBPF_LOG_LEVEL + ? verif_log_level : DEFAULT_LIBBPF_LOG_LEVEL), ); @@ -1491,7 +1491,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, opts.expected_attach_type = test->expected_attach_type; if (verbose) - opts.log_level = VERBOSE_LIBBPF_LOG_LEVEL; + opts.log_level = verif_log_level | 4; /* force stats */ else if (expected_ret == VERBOSE_ACCEPT) opts.log_level = 2; else @@ -1746,6 +1746,13 @@ int main(int argc, char **argv) if (argc > 1 && strcmp(argv[1], "-v") == 0) { arg++; verbose = true; + verif_log_level = 1; + argc--; + } + if (argc > 1 && strcmp(argv[1], "-vv") == 0) { + arg++; + verbose = true; + verif_log_level = 2; argc--; } diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index b0d83a28e348..973cbf6af323 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -509,6 +509,28 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats * return 0; } +static void fixup_obj(struct bpf_object *obj) +{ + struct bpf_map *map; + + bpf_object__for_each_map(map, obj) { + /* disable pinning */ + bpf_map__set_pin_path(map, NULL); + + /* fix up map size, if necessary */ + switch (bpf_map__type(map)) { + case BPF_MAP_TYPE_SK_STORAGE: + case BPF_MAP_TYPE_TASK_STORAGE: + case BPF_MAP_TYPE_INODE_STORAGE: + case BPF_MAP_TYPE_CGROUP_STORAGE: + break; + default: + if (bpf_map__max_entries(map) == 0) + bpf_map__set_max_entries(map, 1); + } + } +} + static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog) { const char *prog_name = bpf_program__name(prog); @@ -543,6 +565,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf } verif_log_buf[0] = '\0'; + /* increase chances of successful BPF object loading */ + fixup_obj(obj); + err = bpf_object__load(obj); env.progs_processed++; @@ -1104,17 +1129,21 @@ static void output_comp_stats(const struct verif_stats *base, const struct verif else snprintf(diff_buf, sizeof(diff_buf), "%s", "MISMATCH"); } else { + double p = 0.0; + snprintf(base_buf, sizeof(base_buf), "%ld", base_val); snprintf(comp_buf, sizeof(comp_buf), "%ld", comp_val); diff_val = comp_val - base_val; if (base == &fallback_stats || comp == &fallback_stats || base_val == 0) { - snprintf(diff_buf, sizeof(diff_buf), "%+ld (%+.2lf%%)", - diff_val, comp_val < base_val ? -100.0 : 100.0); + if (comp_val == base_val) + p = 0.0; /* avoid +0 (+100%) case */ + else + p = comp_val < base_val ? -100.0 : 100.0; } else { - snprintf(diff_buf, sizeof(diff_buf), "%+ld (%+.2lf%%)", - diff_val, diff_val * 100.0 / base_val); + p = diff_val * 100.0 / base_val; } + snprintf(diff_buf, sizeof(diff_buf), "%+ld (%+.2lf%%)", diff_val, p); } switch (fmt) { diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh index 1162836f8f32..2aa66d2a1702 100755 --- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh +++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh @@ -96,9 +96,6 @@ cleanup() switch_destroy - # Always cleanup the mcast group - ip address del dev $h2 $TEST_GROUP/32 2>&1 1>/dev/null - h2_destroy h1_destroy diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh index 8748d1b1d95b..72dfbeaf56b9 100755 --- a/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh @@ -59,6 +59,9 @@ switch_create() switch_destroy() { + tc qdisc del dev $swp2 clsact + tc qdisc del dev $swp1 clsact + ip link set dev $swp2 down ip link set dev $swp1 down |