diff options
718 files changed, 26703 insertions, 12432 deletions
diff --git a/Documentation/core-api/packing.rst b/Documentation/core-api/packing.rst index 3ed13bc9a195..821691f23c54 100644 --- a/Documentation/core-api/packing.rst +++ b/Documentation/core-api/packing.rst @@ -151,6 +151,77 @@ the more significant 4-byte word. We always think of our offsets as if there were no quirk, and we translate them afterwards, before accessing the memory region. +Note on buffer lengths not multiple of 4 +---------------------------------------- + +To deal with memory layout quirks where groups of 4 bytes are laid out "little +endian" relative to each other, but "big endian" within the group itself, the +concept of groups of 4 bytes is intrinsic to the packing API (not to be +confused with the memory access, which is performed byte by byte, though). + +With buffer lengths not multiple of 4, this means one group will be incomplete. +Depending on the quirks, this may lead to discontinuities in the bit fields +accessible through the buffer. The packing API assumes discontinuities were not +the intention of the memory layout, so it avoids them by effectively logically +shortening the most significant group of 4 octets to the number of octets +actually available. + +Example with a 31 byte sized buffer given below. Physical buffer offsets are +implicit, and increase from left to right within a group, and from top to +bottom within a column. + +No quirks: + +:: + + 31 29 28 | Group 7 (most significant) + 27 26 25 24 | Group 6 + 23 22 21 20 | Group 5 + 19 18 17 16 | Group 4 + 15 14 13 12 | Group 3 + 11 10 9 8 | Group 2 + 7 6 5 4 | Group 1 + 3 2 1 0 | Group 0 (least significant) + +QUIRK_LSW32_IS_FIRST: + +:: + + 3 2 1 0 | Group 0 (least significant) + 7 6 5 4 | Group 1 + 11 10 9 8 | Group 2 + 15 14 13 12 | Group 3 + 19 18 17 16 | Group 4 + 23 22 21 20 | Group 5 + 27 26 25 24 | Group 6 + 30 29 28 | Group 7 (most significant) + +QUIRK_LITTLE_ENDIAN: + +:: + + 30 28 29 | Group 7 (most significant) + 24 25 26 27 | Group 6 + 20 21 22 23 | Group 5 + 16 17 18 19 | Group 4 + 12 13 14 15 | Group 3 + 8 9 10 11 | Group 2 + 4 5 6 7 | Group 1 + 0 1 2 3 | Group 0 (least significant) + +QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST: + +:: + + 0 1 2 3 | Group 0 (least significant) + 4 5 6 7 | Group 1 + 8 9 10 11 | Group 2 + 12 13 14 15 | Group 3 + 16 17 18 19 | Group 4 + 20 21 22 23 | Group 5 + 24 25 26 27 | Group 6 + 28 29 30 | Group 7 (most significant) + Intended use ------------ diff --git a/Documentation/devicetree/bindings/net/dsa/realtek.yaml b/Documentation/devicetree/bindings/net/dsa/realtek.yaml index 70b6bda3cf98..f348e66fb515 100644 --- a/Documentation/devicetree/bindings/net/dsa/realtek.yaml +++ b/Documentation/devicetree/bindings/net/dsa/realtek.yaml @@ -147,7 +147,7 @@ examples: #include <dt-bindings/interrupt-controller/irq.h> platform { - switch { + ethernet-switch { compatible = "realtek,rtl8366rb"; /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */ mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>; @@ -163,35 +163,35 @@ examples: #interrupt-cells = <1>; }; - ports { + ethernet-ports { #address-cells = <1>; #size-cells = <0>; - port@0 { + ethernet-port@0 { reg = <0>; label = "lan0"; phy-handle = <&phy0>; }; - port@1 { + ethernet-port@1 { reg = <1>; label = "lan1"; phy-handle = <&phy1>; }; - port@2 { + ethernet-port@2 { reg = <2>; label = "lan2"; phy-handle = <&phy2>; }; - port@3 { + ethernet-port@3 { reg = <3>; label = "lan3"; phy-handle = <&phy3>; }; - port@4 { + ethernet-port@4 { reg = <4>; label = "wan"; phy-handle = <&phy4>; }; - port@5 { + ethernet-port@5 { reg = <5>; ethernet = <&gmac0>; phy-mode = "rgmii"; @@ -241,7 +241,7 @@ examples: #include <dt-bindings/interrupt-controller/irq.h> platform { - switch { + ethernet-switch { compatible = "realtek,rtl8365mb"; mdc-gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>; mdio-gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>; @@ -255,30 +255,30 @@ examples: #interrupt-cells = <1>; }; - ports { + ethernet-ports { #address-cells = <1>; #size-cells = <0>; - port@0 { + ethernet-port@0 { reg = <0>; label = "swp0"; phy-handle = <ðphy0>; }; - port@1 { + ethernet-port@1 { reg = <1>; label = "swp1"; phy-handle = <ðphy1>; }; - port@2 { + ethernet-port@2 { reg = <2>; label = "swp2"; phy-handle = <ðphy2>; }; - port@3 { + ethernet-port@3 { reg = <3>; label = "swp3"; phy-handle = <ðphy3>; }; - port@6 { + ethernet-port@6 { reg = <6>; ethernet = <&fec1>; phy-mode = "rgmii"; @@ -330,7 +330,7 @@ examples: #address-cells = <1>; #size-cells = <0>; - switch@29 { + ethernet-switch@29 { compatible = "realtek,rtl8365mb"; reg = <29>; @@ -344,36 +344,36 @@ examples: #interrupt-cells = <1>; }; - ports { + ethernet-ports { #address-cells = <1>; #size-cells = <0>; - port@0 { + ethernet-port@0 { reg = <0>; label = "lan4"; }; - port@1 { + ethernet-port@1 { reg = <1>; label = "lan3"; }; - port@2 { + ethernet-port@2 { reg = <2>; label = "lan2"; }; - port@3 { + ethernet-port@3 { reg = <3>; label = "lan1"; }; - port@4 { + ethernet-port@4 { reg = <4>; label = "wan"; }; - port@7 { + ethernet-port@7 { reg = <7>; ethernet = <ðernet>; phy-mode = "rgmii"; diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml index d9b62741a225..2c71454ae8e3 100644 --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml @@ -158,6 +158,27 @@ properties: Mark the corresponding energy efficient ethernet mode as broken and request the ethernet to stop advertising it. + timing-role: + $ref: /schemas/types.yaml#/definitions/string + enum: + - forced-master + - forced-slave + - preferred-master + - preferred-slave + description: | + Specifies the timing role of the PHY in the network link. This property is + required for setups where the role must be explicitly assigned via the + device tree due to limitations in hardware strapping or incorrect strap + configurations. + It is applicable to Single Pair Ethernet (1000/100/10Base-T1) and other + PHY types, including 1000Base-T, where it controls whether the PHY should + be a master (clock source) or a slave (clock receiver). + + - 'forced-master': The PHY is forced to operate as a master. + - 'forced-slave': The PHY is forced to operate as a slave. + - 'preferred-master': Prefer the PHY to be master but allow negotiation. + - 'preferred-slave': Prefer the PHY to be slave but allow negotiation. + pses: $ref: /schemas/types.yaml#/definitions/phandle-array maxItems: 1 diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml index 5536c06139ca..24e863fdbdab 100644 --- a/Documentation/devicetree/bindings/net/fsl,fec.yaml +++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml @@ -183,6 +183,13 @@ properties: description: Register bits of stop mode control, the format is <&gpr req_gpr req_bit>. + fsl,pps-channel: + $ref: /schemas/types.yaml#/definitions/uint32 + default: 0 + description: + Specifies to which timer instance the PPS signal is routed. + enum: [0, 1, 2, 3] + mdio: $ref: mdio.yaml# unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/net/marvell,aquantia.yaml b/Documentation/devicetree/bindings/net/marvell,aquantia.yaml index 9854fab4c4db..f269615126d8 100644 --- a/Documentation/devicetree/bindings/net/marvell,aquantia.yaml +++ b/Documentation/devicetree/bindings/net/marvell,aquantia.yaml @@ -48,6 +48,12 @@ properties: firmware-name: description: specify the name of PHY firmware to load + marvell,mdi-cfg-order: + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1] + description: + force normal (0) or reverse (1) order of MDI pairs, overriding MDI_CFG bootstrap pin. + nvmem-cells: description: phandle to the firmware nvmem cell maxItems: 1 diff --git a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml index a754a61adc2d..5f9f7efff538 100644 --- a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml +++ b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml @@ -62,6 +62,22 @@ allOf: reference clock output when RMII mode enabled. Only supported on TJA1100 and TJA1101. + - if: + properties: + compatible: + contains: + enum: + - ethernet-phy-id001b.b010 + - ethernet-phy-id001b.b013 + - ethernet-phy-id001b.b030 + - ethernet-phy-id001b.b031 + + then: + properties: + nxp,rmii-refclk-out: + type: boolean + description: Enable 50MHz RMII reference clock output on REF_CLK pin. + patternProperties: "^ethernet-phy@[0-9a-f]+$": type: object diff --git a/Documentation/devicetree/bindings/net/xlnx,emaclite.yaml b/Documentation/devicetree/bindings/net/xlnx,emaclite.yaml index 92d8ade988f6..e16384aff557 100644 --- a/Documentation/devicetree/bindings/net/xlnx,emaclite.yaml +++ b/Documentation/devicetree/bindings/net/xlnx,emaclite.yaml @@ -29,6 +29,9 @@ properties: interrupts: maxItems: 1 + clocks: + maxItems: 1 + phy-handle: true local-mac-address: true @@ -45,6 +48,7 @@ required: - compatible - reg - interrupts + - clocks - phy-handle additionalProperties: false @@ -56,6 +60,7 @@ examples: reg = <0x40e00000 0x10000>; interrupt-parent = <&axi_intc_1>; interrupts = <1>; + clocks = <&dummy>; local-mac-address = [00 00 00 00 00 00]; phy-handle = <&phy0>; xlnx,rx-ping-pong; diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index 6a050d755b9c..f6c5d8214c7e 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -96,7 +96,12 @@ attribute-sets: name: bits type: nest nested-attributes: bitset-bits - + - + name: value + type: binary + - + name: mask + type: binary - name: string attributes: diff --git a/Documentation/netlink/specs/net_shaper.yaml b/Documentation/netlink/specs/net_shaper.yaml new file mode 100644 index 000000000000..8ebad0d02904 --- /dev/null +++ b/Documentation/netlink/specs/net_shaper.yaml @@ -0,0 +1,362 @@ +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) +name: net-shaper + +doc: | + Networking HW rate limiting configuration. + + This API allows configuring HW shapers available on the network + devices at different levels (queues, network device) and allows + arbitrary manipulation of the scheduling tree of the involved + shapers. + + Each @shaper is identified within the given device, by a @handle, + comprising both a @scope and an @id. + + Depending on the @scope value, the shapers are attached to specific + HW objects (queues, devices) or, for @node scope, represent a + scheduling group, that can be placed in an arbitrary location of + the scheduling tree. + + Shapers can be created with two different operations: the @set + operation, to create and update a single "attached" shaper, and + the @group operation, to create and update a scheduling + group. Only the @group operation can create @node scope shapers. + + Existing shapers can be deleted/reset via the @delete operation. + + The user can query the running configuration via the @get operation. + + Different devices can provide different feature sets, e.g. with no + support for complex scheduling hierarchy, or for some shaping + parameters. The user can introspect the HW capabilities via the + @cap-get operation. + +definitions: + - + type: enum + name: scope + doc: Defines the shaper @id interpretation. + render-max: true + entries: + - name: unspec + doc: The scope is not specified. + - + name: netdev + doc: The main shaper for the given network device. + - + name: queue + doc: | + The shaper is attached to the given device queue, + the @id represents the queue number. + - + name: node + doc: | + The shaper allows grouping of queues or other + node shapers; can be nested in either @netdev + shapers or other @node shapers, allowing placement + in any location of the scheduling tree, except + leaves and root. + - + type: enum + name: metric + doc: Different metric supported by the shaper. + entries: + - + name: bps + doc: Shaper operates on a bits per second basis. + - + name: pps + doc: Shaper operates on a packets per second basis. + +attribute-sets: + - + name: net-shaper + attributes: + - + name: handle + type: nest + nested-attributes: handle + doc: Unique identifier for the given shaper inside the owning device. + - + name: metric + type: u32 + enum: metric + doc: Metric used by the given shaper for bw-min, bw-max and burst. + - + name: bw-min + type: uint + doc: Guaranteed bandwidth for the given shaper. + - + name: bw-max + type: uint + doc: Maximum bandwidth for the given shaper or 0 when unlimited. + - + name: burst + type: uint + doc: | + Maximum burst-size for shaping. Should not be interpreted + as a quantum. + - + name: priority + type: u32 + doc: | + Scheduling priority for the given shaper. The priority + scheduling is applied to sibling shapers. + - + name: weight + type: u32 + doc: | + Relative weight for round robin scheduling of the + given shaper. + The scheduling is applied to all sibling shapers + with the same priority. + - + name: ifindex + type: u32 + doc: Interface index owning the specified shaper. + - + name: parent + type: nest + nested-attributes: handle + doc: | + Identifier for the parent of the affected shaper. + Only needed for @group operation. + - + name: leaves + type: nest + multi-attr: true + nested-attributes: leaf-info + doc: | + Describes a set of leaves shapers for a @group operation. + - + name: handle + attributes: + - + name: scope + type: u32 + enum: scope + doc: Defines the shaper @id interpretation. + - + name: id + type: u32 + doc: | + Numeric identifier of a shaper. The id semantic depends on + the scope. For @queue scope it's the queue id and for @node + scope it's the node identifier. + - + name: leaf-info + subset-of: net-shaper + attributes: + - + name: handle + - + name: priority + - + name: weight + - + name: caps + attributes: + - + name: ifindex + type: u32 + doc: Interface index queried for shapers capabilities. + - + name: scope + type: u32 + enum: scope + doc: The scope to which the queried capabilities apply. + - + name: support-metric-bps + type: flag + doc: The device accepts 'bps' metric for bw-min, bw-max and burst. + - + name: support-metric-pps + type: flag + doc: The device accepts 'pps' metric for bw-min, bw-max and burst. + - + name: support-nesting + type: flag + doc: | + The device supports nesting shaper belonging to this scope + below 'node' scoped shapers. Only 'queue' and 'node' + scope can have flag 'support-nesting'. + - + name: support-bw-min + type: flag + doc: The device supports a minimum guaranteed B/W. + - + name: support-bw-max + type: flag + doc: The device supports maximum B/W shaping. + - + name: support-burst + type: flag + doc: The device supports a maximum burst size. + - + name: support-priority + type: flag + doc: The device supports priority scheduling. + - + name: support-weight + type: flag + doc: The device supports weighted round robin scheduling. + +operations: + list: + - + name: get + doc: | + Get information about a shaper for a given device. + attribute-set: net-shaper + + do: + pre: net-shaper-nl-pre-doit + post: net-shaper-nl-post-doit + request: + attributes: &ns-binding + - ifindex + - handle + reply: + attributes: &ns-attrs + - ifindex + - parent + - handle + - metric + - bw-min + - bw-max + - burst + - priority + - weight + + dump: + pre: net-shaper-nl-pre-dumpit + post: net-shaper-nl-post-dumpit + request: + attributes: + - ifindex + reply: + attributes: *ns-attrs + - + name: set + doc: | + Create or update the specified shaper. + The set operation can't be used to create a @node scope shaper, + use the @group operation instead. + attribute-set: net-shaper + flags: [ admin-perm ] + + do: + pre: net-shaper-nl-pre-doit + post: net-shaper-nl-post-doit + request: + attributes: + - ifindex + - handle + - metric + - bw-min + - bw-max + - burst + - priority + - weight + + - + name: delete + doc: | + Clear (remove) the specified shaper. When deleting + a @node shaper, reattach all the node's leaves to the + deleted node's parent. + If, after the removal, the parent shaper has no more + leaves and the parent shaper scope is @node, the parent + node is deleted, recursively. + When deleting a @queue shaper or a @netdev shaper, + the shaper disappears from the hierarchy, but the + queue/device can still send traffic: it has an implicit + node with infinite bandwidth. The queue's implicit node + feeds an implicit RR node at the root of the hierarchy. + attribute-set: net-shaper + flags: [ admin-perm ] + + do: + pre: net-shaper-nl-pre-doit + post: net-shaper-nl-post-doit + request: + attributes: *ns-binding + + - + name: group + doc: | + Create or update a scheduling group, attaching the specified + @leaves shapers under the specified node identified by @handle. + The @leaves shapers scope must be @queue and the node shaper + scope must be either @node or @netdev. + When the node shaper has @node scope, if the @handle @id is not + specified, a new shaper of such scope is created, otherwise the + specified node must already exist. + When updating an existing node shaper, the specified @leaves are + added to the existing node; such node will also retain any preexisting + leave. + The @parent handle for a new node shaper defaults to the parent + of all the leaves, provided all the leaves share the same parent. + Otherwise @parent handle must be specified. + The user can optionally provide shaping attributes for the node + shaper. + The operation is atomic, on failure no change is applied to + the device shaping configuration, otherwise the @node shaper + full identifier, comprising @binding and @handle, is provided + as the reply. + attribute-set: net-shaper + flags: [ admin-perm ] + + do: + pre: net-shaper-nl-pre-doit + post: net-shaper-nl-post-doit + request: + attributes: + - ifindex + - parent + - handle + - metric + - bw-min + - bw-max + - burst + - priority + - weight + - leaves + reply: + attributes: *ns-binding + + - + name: cap-get + doc: | + Get the shaper capabilities supported by the given device + for the specified scope. + attribute-set: caps + + do: + pre: net-shaper-nl-cap-pre-doit + post: net-shaper-nl-cap-post-doit + request: + attributes: + - ifindex + - scope + reply: + attributes: &cap-attrs + - ifindex + - scope + - support-metric-bps + - support-metric-pps + - support-nesting + - support-bw-min + - support-bw-max + - support-burst + - support-priority + - support-weight + + dump: + pre: net-shaper-nl-cap-pre-dumpit + post: net-shaper-nl-cap-post-dumpit + request: + attributes: + - ifindex + reply: + attributes: *cap-attrs diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml index 08412c279297..f9cb97d6106c 100644 --- a/Documentation/netlink/specs/netdev.yaml +++ b/Documentation/netlink/specs/netdev.yaml @@ -248,6 +248,21 @@ attribute-sets: threaded mode. If NAPI is not in threaded mode (i.e. uses normal softirq context), the attribute will be absent. type: u32 + - + name: defer-hard-irqs + doc: The number of consecutive empty polls before IRQ deferral ends + and hardware IRQs are re-enabled. + type: u32 + checks: + max: s32-max + - + name: gro-flush-timeout + doc: The timeout, in nanoseconds, of when to trigger the NAPI watchdog + timer which schedules NAPI processing. Additionally, a non-zero + value will also prevent GRO from flushing recent super-frames at + the end of a NAPI cycle. This may add receive latency in exchange + for reducing the number of frames processed by the network stack. + type: uint - name: queue attributes: @@ -636,6 +651,8 @@ operations: - ifindex - irq - pid + - defer-hard-irqs + - gro-flush-timeout dump: request: attributes: @@ -676,6 +693,17 @@ operations: reply: attributes: - id + - + name: napi-set + doc: Set configurable NAPI instance settings. + attribute-set: napi + flags: [ admin-perm ] + do: + request: + attributes: + - id + - defer-hard-irqs + - gro-flush-timeout kernel-family: headers: [ "linux/list.h"] diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml index 0c4d5d40cae9..9ffa13b77dcf 100644 --- a/Documentation/netlink/specs/rt_link.yaml +++ b/Documentation/netlink/specs/rt_link.yaml @@ -920,6 +920,13 @@ definitions: - name: l2 - name: l3 + - + name: netkit-scrub + type: enum + entries: + - name: none + - name: default + attribute-sets: - name: link-attrs @@ -1137,6 +1144,10 @@ attribute-sets: name: dpll-pin type: nest nested-attributes: link-dpll-pin-attrs + - + name: max-pacing-offload-horizon + type: uint + doc: EDT offload horizon supported by the device (in nsec). - name: af-spec-attrs attributes: @@ -2147,6 +2158,14 @@ attribute-sets: name: mode type: u32 enum: netkit-mode + - + name: scrub + type: u32 + enum: netkit-scrub + - + name: peer-scrub + type: u32 + enum: netkit-scrub sub-messages: - diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst index 934752f675ba..3c46a48d99ba 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst @@ -101,6 +101,37 @@ example, if Rx packets are 10 and Netdev (software statistics) displays rx_bytes as "X", then ethtool (hardware statistics) will display rx_bytes as "X+40" (4 bytes CRC x 10 packets). +ethtool reset +------------- +The driver supports 3 types of resets: + +- PF reset - resets only components associated with the given PF, does not + impact other PFs + +- CORE reset - whole adapter is affected, reset all PFs + +- GLOBAL reset - same as CORE but mac and phy components are also reinitialized + +These are mapped to ethtool reset flags as follow: + +- PF reset: + + # ethtool --reset <ethX> irq dma filter offload + +- CORE reset: + + # ethtool --reset <ethX> irq-shared dma-shared filter-shared offload-shared \ + ram-shared + +- GLOBAL reset: + + # ethtool --reset <ethX> irq-shared dma-shared filter-shared offload-shared \ + mac-shared phy-shared ram-shared + +In switchdev mode you can reset a VF using port representor: + + # ethtool --reset <repr> irq dma filter offload + Viewing Link Messages --------------------- diff --git a/Documentation/networking/diagnostic/index.rst b/Documentation/networking/diagnostic/index.rst new file mode 100644 index 000000000000..86488aa46b48 --- /dev/null +++ b/Documentation/networking/diagnostic/index.rst @@ -0,0 +1,17 @@ +.. SPDX-License-Identifier: GPL-2.0 + +====================== +Networking Diagnostics +====================== + +.. toctree:: + :maxdepth: 2 + + twisted_pair_layer1_diagnostics.rst + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/diagnostic/twisted_pair_layer1_diagnostics.rst b/Documentation/networking/diagnostic/twisted_pair_layer1_diagnostics.rst new file mode 100644 index 000000000000..c9be5cc7e113 --- /dev/null +++ b/Documentation/networking/diagnostic/twisted_pair_layer1_diagnostics.rst @@ -0,0 +1,767 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Diagnostic Concept for Investigating Twisted Pair Ethernet Variants at OSI Layer 1 +================================================================================== + +Introduction +------------ + +This documentation is designed for two primary audiences: + +1. **Users and System Administrators**: For those dealing with real-world + Ethernet issues, this guide provides a practical, step-by-step + troubleshooting flow to help identify and resolve common problems in Twisted + Pair Ethernet at OSI Layer 1. If you're facing unstable links, speed drops, + or mysterious network issues, jump right into the step-by-step guide and + follow it through to find your solution. + +2. **Kernel Developers**: For developers working with network drivers and PHY + support, this documentation outlines the diagnostic process and highlights + areas where the Linux kernel’s diagnostic interfaces could be extended or + improved. By understanding the diagnostic flow, developers can better + prioritize future enhancements. + +Step-by-Step Diagnostic Guide from Linux (General Ethernet) +----------------------------------------------------------- + +This diagnostic guide covers common Ethernet troubleshooting scenarios, +focusing on **link stability and detection** across different Ethernet +environments, including **Single-Pair Ethernet (SPE)** and **Multi-Pair +Ethernet (MPE)**, as well as power delivery technologies like **PoDL** (Power +over Data Line) and **PoE** (Clause 33 PSE). + +The guide is designed to help users diagnose physical layer (Layer 1) issues on +systems running **Linux kernel version 6.11 or newer**, utilizing **ethtool +version 6.10 or later** and **iproute2 version 6.4.0 or later**. + +In this guide, we assume that users may have **limited or no access to the link +partner** and will focus on diagnosing issues locally. + +Diagnostic Scenarios +~~~~~~~~~~~~~~~~~~~~ + +- **Link is up and stable, but no data transfer**: If the link is stable but + there are issues with data transmission, refer to the **OSI Layer 2 + Troubleshooting Guide**. + +- **Link is unstable**: Link resets, speed drops, or other fluctuations + indicate potential issues at the hardware or physical layer. + +- **No link detected**: The interface is up, but no link is established. + +Verify Interface Status +~~~~~~~~~~~~~~~~~~~~~~~ + +Begin by verifying the status of the Ethernet interface to check if it is +administratively up. Unlike `ethtool`, which provides information on the link +and PHY status, it does not show the **administrative state** of the interface. +To check this, you should use the `ip` command, which describes the interface +state within the angle brackets `"<>"` in its output. + +For example, in the output `<NO-CARRIER,BROADCAST,MULTICAST,UP>`, the important +keywords are: + +- **UP**: The interface is in the administrative "UP" state. +- **NO-CARRIER**: The interface is administratively up, but no physical link is + detected. + +If the output shows `<BROADCAST,MULTICAST>`, this indicates the interface is in +the administrative "DOWN" state. + +- **Command:** `ip link show dev <interface>` + +- **Expected Output:** + + .. code-block:: bash + + 4: eth0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 ... + link/ether 88:14:2b:00:96:f2 brd ff:ff:ff:ff:ff:ff + +- **Interpreting the Output:** + + - **Administrative UP State**: + + - If the output contains **"UP"**, the interface is administratively up, + and the system is trying to establish a physical link. + + - If you also see **"NO-CARRIER"**, it means the physical link has not been + detected, indicating potential Layer 1 issues like a cable fault, + misconfiguration, or no connection at the link partner. In this case, + proceed to the **Inspect Link Status and PHY Configuration** section. + + - **Administrative DOWN State**: + + - If the output lacks **"UP"** and shows only states like + **"<BROADCAST,MULTICAST>"**, it means the interface is administratively + down. In this case, bring the interface up using the following command: + + .. code-block:: bash + + ip link set dev <interface> up + +- **Next Steps**: + + - If the interface is **administratively up** but shows **NO-CARRIER**, + proceed to the **Inspect Link Status and PHY Configuration** section to + troubleshoot potential physical layer issues. + + - If the interface was **administratively down** and you have brought it up, + ensure to **repeat this verification step** to confirm the new state of the + interface before proceeding + + - **If the interface is up and the link is detected**: + + - If the output shows **"UP"** and there is **no `NO-CARRIER`**, the + interface is administratively up, and the physical link has been + successfully established. If everything is working as expected, the Layer + 1 diagnostics are complete, and no further action is needed. + + - If the interface is up and the link is detected but **no data is being + transferred**, the issue is likely beyond Layer 1, and you should proceed + with diagnosing the higher layers of the OSI model. This may involve + checking Layer 2 configurations (such as VLANs or MAC address issues), + Layer 3 settings (like IP addresses, routing, or ARP), or Layer 4 and + above (firewalls, services, etc.). + + - If the **link is unstable** or **frequently resetting or dropping**, this + may indicate a physical layer issue such as a faulty cable, interference, + or power delivery problems. In this case, proceed with the next step in + this guide. + +Inspect Link Status and PHY Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use `ethtool -I` to check the link status, PHY configuration, supported link +modes, and additional statistics such as the **Link Down Events** counter. This +step is essential for diagnosing Layer 1 problems such as speed mismatches, +duplex issues, and link instability. + +For both **Single-Pair Ethernet (SPE)** and **Multi-Pair Ethernet (MPE)** +devices, you will use this step to gather key details about the link. **SPE** +links generally support a single speed and mode without autonegotiation (with +the exception of **10BaseT1L**), while **MPE** devices typically support +multiple link modes and autonegotiation. + +- **Command:** `ethtool -I <interface>` + +- **Example Output for SPE Interface (Non-autonegotiation)**: + + .. code-block:: bash + + Settings for spe4: + Supported ports: [ TP ] + Supported link modes: 100baseT1/Full + Supported pause frame use: No + Supports auto-negotiation: No + Supported FEC modes: Not reported + Advertised link modes: Not applicable + Advertised pause frame use: No + Advertised auto-negotiation: No + Advertised FEC modes: Not reported + Speed: 100Mb/s + Duplex: Full + Auto-negotiation: off + master-slave cfg: forced slave + master-slave status: slave + Port: Twisted Pair + PHYAD: 6 + Transceiver: external + MDI-X: Unknown + Supports Wake-on: d + Wake-on: d + Link detected: yes + SQI: 7/7 + Link Down Events: 2 + +- **Example Output for MPE Interface (Autonegotiation)**: + + .. code-block:: bash + + Settings for eth1: + Supported ports: [ TP MII ] + Supported link modes: 10baseT/Half 10baseT/Full + 100baseT/Half 100baseT/Full + Supported pause frame use: Symmetric Receive-only + Supports auto-negotiation: Yes + Supported FEC modes: Not reported + Advertised link modes: 10baseT/Half 10baseT/Full + 100baseT/Half 100baseT/Full + Advertised pause frame use: Symmetric Receive-only + Advertised auto-negotiation: Yes + Advertised FEC modes: Not reported + Link partner advertised link modes: 10baseT/Half 10baseT/Full + 100baseT/Half 100baseT/Full + Link partner advertised pause frame use: Symmetric Receive-only + Link partner advertised auto-negotiation: Yes + Link partner advertised FEC modes: Not reported + Speed: 100Mb/s + Duplex: Full + Auto-negotiation: on + Port: Twisted Pair + PHYAD: 10 + Transceiver: internal + MDI-X: Unknown + Supports Wake-on: pg + Wake-on: p + Link detected: yes + Link Down Events: 1 + +- **Next Steps**: + + - Record the output provided by `ethtool`, particularly noting the + **master-slave status**, **speed**, **duplex**, and other relevant fields. + This information will be useful for further analysis or troubleshooting. + Once the **ethtool** output has been collected and stored, move on to the + next diagnostic step. + +Check Power Delivery (PoDL or PoE) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If it is known that **PoDL** or **PoE** is **not implemented** on the system, +or the **PSE** (Power Sourcing Equipment) is managed by proprietary user-space +software or external tools, you can skip this step. In such cases, verify power +delivery through alternative methods, such as checking hardware indicators +(LEDs), using multimeters, or consulting vendor-specific software for +monitoring power status. + +If **PoDL** or **PoE** is implemented and managed directly by Linux, follow +these steps to ensure power is being delivered correctly: + +- **Command:** `ethtool --show-pse <interface>` + +- **Expected Output Examples**: + + 1. **PSE Not Supported**: + + If no PSE is attached or the interface does not support PSE, the following + output is expected: + + .. code-block:: bash + + netlink error: No PSE is attached + netlink error: Operation not supported + + 2. **PoDL (Single-Pair Ethernet)**: + + When PoDL is implemented, you might see the following attributes: + + .. code-block:: bash + + PSE attributes for eth1: + PoDL PSE Admin State: enabled + PoDL PSE Power Detection Status: delivering power + + 3. **PoE (Clause 33 PSE)**: + + For standard PoE, the output may look like this: + + .. code-block:: bash + + PSE attributes for eth1: + Clause 33 PSE Admin State: enabled + Clause 33 PSE Power Detection Status: delivering power + Clause 33 PSE Available Power Limit: 18000 + +- **Adjust Power Limit (if needed)**: + + - Sometimes, the available power limit may not be sufficient for the link + partner. You can increase the power limit as needed. + + - **Command:** `ethtool --set-pse <interface> c33-pse-avail-pw-limit <limit>` + + Example: + + .. code-block:: bash + + ethtool --set-pse eth1 c33-pse-avail-pw-limit 18000 + ethtool --show-pse eth1 + + **Expected Output** after adjusting the power limit: + + .. code-block:: bash + + Clause 33 PSE Available Power Limit: 18000 + + +- **Next Steps**: + + - **PoE or PoDL Not Used**: If **PoE** or **PoDL** is not implemented or used + on the system, proceed to the next diagnostic step, as power delivery is + not relevant for this setup. + + - **PoE or PoDL Controlled Externally**: If **PoE** or **PoDL** is used but + is not managed by the Linux kernel's **PSE-PD** framework (i.e., it is + controlled by proprietary user-space software or external tools), this part + is out of scope for this documentation. Please consult vendor-specific + documentation or external tools for monitoring and managing power delivery. + + - **PSE Admin State Disabled**: + + - If the `PSE Admin State:` is **disabled**, enable it by running one of + the following commands: + + .. code-block:: bash + + ethtool --set-pse <devname> podl-pse-admin-control enable + + or, for Clause 33 PSE (PoE): + + ethtool --set-pse <devname> c33-pse-admin-control enable + + - After enabling the PSE Admin State, return to the start of the **Check + Power Delivery (PoDL or PoE)** step to recheck the power delivery status. + + - **Power Not Delivered**: If the `Power Detection Status` shows something + other than "delivering power" (e.g., `over current`), troubleshoot the + **PSE**. Check for potential issues such as a short circuit in the cable, + insufficient power delivery, or a fault in the PSE itself. + + - **Power Delivered but No Link**: If power is being delivered but no link is + established, proceed with further diagnostics by performing **Cable + Diagnostics** or reviewing the **Inspect Link Status and PHY + Configuration** steps to identify any underlying issues with the physical + link or settings. + +Cable Diagnostics +~~~~~~~~~~~~~~~~~ + +Use `ethtool` to test for physical layer issues such as cable faults. The test +results can vary depending on the cable's condition, the technology in use, and +the state of the link partner. The results from the cable test will help in +diagnosing issues like open circuits, shorts, impedance mismatches, and +noise-related problems. + +- **Command:** `ethtool --cable-test <interface>` + +The following are the typical outputs for **Single-Pair Ethernet (SPE)** and +**Multi-Pair Ethernet (MPE)**: + +- **For Single-Pair Ethernet (SPE)**: + - **Expected Output (SPE)**: + + .. code-block:: bash + + Cable test completed for device eth1. + Pair A, fault length: 25.00m + Pair A code Open Circuit + + This indicates an open circuit or cable fault at the reported distance, but + results can be influenced by the link partner's state. Refer to the + **"Troubleshooting Based on Cable Test Results"** section for further + interpretation of these results. + +- **For Multi-Pair Ethernet (MPE)**: + - **Expected Output (MPE)**: + + .. code-block:: bash + + Cable test completed for device eth0. + Pair A code OK + Pair B code OK + Pair C code Open Circuit + + Here, Pair C is reported as having an open circuit, while Pairs A and B are + functioning correctly. However, if autonegotiation is in use on Pairs A and + B, the cable test may be disrupted. Refer to the **"Troubleshooting Based on + Cable Test Results"** section for a detailed explanation of these issues and + how to resolve them. + +For detailed descriptions of the different possible cable test results, please +refer to the **"Troubleshooting Based on Cable Test Results"** section. + +Troubleshooting Based on Cable Test Results +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +After running the cable test, the results can help identify specific issues in +the physical connection. However, it is important to note that **cable testing +results heavily depend on the capabilities and characteristics of both the +local hardware and the link partner**. The accuracy and reliability of the +results can vary significantly between different hardware implementations. + +In some cases, this can introduce **blind spots** in the current cable testing +implementation, where certain results may not accurately reflect the actual +physical state of the cable. For example: + +- An **Open Circuit** result might not only indicate a damaged or disconnected + cable but also occur if the cable is properly attached to a powered-down link + partner. + +- Some PHYs may report a **Short within Pair** if the link partner is in + **forced slave mode**, even though there is no actual short in the cable. + +To help users interpret the results more effectively, it could be beneficial to +extend the **kernel UAPI** (User API) to provide additional context or +**possible variants** of issues based on the hardware’s characteristics. Since +these quirks are often hardware-specific, the **kernel driver** would be an +ideal source of such information. By providing flags or hints related to +potential false positives for each test result, users would have a better +understanding of what to verify and where to investigate further. + +Until such improvements are made, users should be aware of these limitations +and manually verify cable issues as needed. Physical inspections may help +resolve uncertainties related to false positive results. + +The results can be one of the following: + +- **OK**: + + - The cable is functioning correctly, and no issues were detected. + + - **Next Steps**: If you are still experiencing issues, it might be related + to higher-layer problems, such as duplex mismatches or speed negotiation, + which are not physical-layer issues. + + - **Special Case for `BaseT1` (1000/100/10BaseT1)**: In `BaseT1` systems, an + "OK" result typically also means that the link is up and likely in **slave + mode**, since cable tests usually only pass in this mode. For some + **10BaseT1L** PHYs, an "OK" result may occur even if the cable is too long + for the PHY's configured range (for example, when the range is configured + for short-distance mode). + +- **Open Circuit**: + + - An **Open Circuit** result typically indicates that the cable is damaged or + disconnected at the reported fault length. Consider these possibilities: + + - If the link partner is in **admin down** state or powered off, you might + still get an "Open Circuit" result even if the cable is functional. + + - **Next Steps**: Inspect the cable at the fault length for visible damage + or loose connections. Verify the link partner is powered on and in the + correct mode. + +- **Short within Pair**: + + - A **Short within Pair** indicates an unintended connection within the same + pair of wires, typically caused by physical damage to the cable. + + - **Next Steps**: Replace or repair the cable and check for any physical + damage or improperly crimped connectors. + +- **Short to Another Pair**: + + - A **Short to Another Pair** means the wires from different pairs are + shorted, which could occur due to physical damage or incorrect wiring. + + - **Next Steps**: Replace or repair the damaged cable. Inspect the cable for + incorrect terminations or pinched wiring. + +- **Impedance Mismatch**: + + - **Impedance Mismatch** indicates a reflection caused by an impedance + discontinuity in the cable. This can happen when a part of the cable has + abnormal impedance (e.g., when different cable types are spliced together + or when there is a defect in the cable). + + - **Next Steps**: Check the cable quality and ensure consistent impedance + throughout its length. Replace any sections of the cable that do not meet + specifications. + +- **Noise**: + + - **Noise** means that the Time Domain Reflectometry (TDR) test could not + complete due to excessive noise on the cable, which can be caused by + interference from electromagnetic sources. + + - **Next Steps**: Identify and eliminate sources of electromagnetic + interference (EMI) near the cable. Consider using shielded cables or + rerouting the cable away from noise sources. + +- **Resolution Not Possible**: + + - **Resolution Not Possible** means that the TDR test could not detect the + issue due to the resolution limitations of the test or because the fault is + beyond the distance that the test can measure. + + - **Next Steps**: Inspect the cable manually if possible, or use alternative + diagnostic tools that can handle greater distances or higher resolution. + +- **Unknown**: + + - An **Unknown** result may occur when the test cannot classify the fault or + when a specific issue is outside the scope of the tool's detection + capabilities. + + - **Next Steps**: Re-run the test, verify the link partner's state, and inspect + the cable manually if necessary. + +Verify Link Partner PHY Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the cable test passes but the link is still not functioning correctly, it’s +essential to verify the configuration of the link partner’s PHY. Mismatches in +speed, duplex settings, or master-slave roles can cause connection issues. + +Autonegotiation Mismatch +^^^^^^^^^^^^^^^^^^^^^^^^ + +- If both link partners support autonegotiation, ensure that autonegotiation is + enabled on both sides and that all supported link modes are advertised. A + mismatch can lead to connectivity problems or sub optimal performance. + +- **Quick Fix:** Reset autonegotiation to the default settings, which will + advertise all default link modes: + + .. code-block:: bash + + ethtool -s <interface> autoneg on + +- **Command to check configuration:** `ethtool <interface>` + +- **Expected Output:** Ensure that both sides advertise compatible link modes. + If autonegotiation is off, verify that both link partners are configured for + the same speed and duplex. + + The following example shows a case where the local PHY advertises fewer link + modes than it supports. This will reduce the number of overlapping link modes + with the link partner. In the worst case, there will be no common link modes, + and the link will not be created: + + .. code-block:: bash + + Settings for eth0: + Supported link modes: 1000baseT/Full, 100baseT/Full + Advertised link modes: 1000baseT/Full + Speed: 1000Mb/s + Duplex: Full + Auto-negotiation: on + +Combined Mode Mismatch (Autonegotiation on One Side, Forced on the Other) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- One possible issue occurs when one side is using **autonegotiation** (as in + most modern systems), and the other side is set to a **forced link mode** + (e.g., older hardware with single-speed hubs). In such cases, modern PHYs + will attempt to detect the forced mode on the other side. If the link is + established, you may notice: + + - **No or empty "Link partner advertised link modes"**. + + - **"Link partner advertised auto-negotiation:"** will be **"no"** or not + present. + +- This type of detection does not always work reliably: + + - Typically, the modern PHY will default to **Half Duplex**, even if the link + partner is actually configured for **Full Duplex**. + + - Some PHYs may not work reliably if the link partner switches from one + forced mode to another. In this case, only a down/up cycle may help. + +- **Next Steps**: Set both sides to the same fixed speed and duplex mode to + avoid potential detection issues. + + .. code-block:: bash + + ethtool -s <interface> speed 1000 duplex full autoneg off + +Master/Slave Role Mismatch (BaseT1 and 1000BaseT PHYs) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- In **BaseT1** systems (e.g., 1000BaseT1, 100BaseT1), link establishment + requires that one device is configured as **master** and the other as + **slave**. A mismatch in this master-slave configuration can prevent the link + from being established. However, **1000BaseT** also supports configurable + master/slave roles and can face similar issues. + +- **Role Preference in 1000BaseT**: The **1000BaseT** specification allows link + partners to negotiate master-slave roles or role preferences during + autonegotiation. Some PHYs have hardware limitations or bugs that prevent + them from functioning properly in certain roles. In such cases, drivers may + force these PHYs into a specific role (e.g., **forced master** or **forced + slave**) or try a weaker option by setting preferences. If both link partners + have the same issue and are forced into the same mode (e.g., both forced into + master mode), they will not be able to establish a link. + +- **Next Steps**: Ensure that one side is configured as **master** and the + other as **slave** to avoid this issue, particularly when hardware + limitations are involved, or try the weaker **preferred** option instead of + **forced**. Check for any driver-related restrictions or forced modes. + +- **Command to force master/slave mode**: + + .. code-block:: bash + + ethtool -s <interface> master-slave forced-master + + or: + + .. code-block:: bash + + ethtool -s <interface> master-slave forced-master speed 1000 duplex full autoneg off + + +- **Check the current master/slave status**: + + .. code-block:: bash + + ethtool <interface> + + Example Output: + + .. code-block:: bash + + master-slave cfg: forced-master + master-slave status: master + +- **Hardware Bugs and Driver Forcing**: If a known hardware issue forces the + PHY into a specific mode, it’s essential to check the driver source code or + hardware documentation for details. Ensure that the roles are compatible + across both link partners, and if both PHYs are forced into the same mode, + adjust one side accordingly to resolve the mismatch. + +Monitor Link Resets and Speed Drops +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the link is unstable, showing frequent resets or speed drops, this may +indicate issues with the cable, PHY configuration, or environmental factors. +While there is still no completely unified way in Linux to directly monitor +downshift events or link speed changes via user space tools, both the Linux +kernel logs and `ethtool` can provide valuable insights, especially if the +driver supports reporting such events. + +- **Monitor Kernel Logs for Link Resets and Speed Drops**: + + - The Linux kernel will print link status changes, including downshift + events, in the system logs. These messages typically include speed changes, + duplex mode, and downshifted link speed (if the driver supports it). + + - **Command to monitor kernel logs in real-time:** + + .. code-block:: bash + + dmesg -w | grep "Link is Up\|Link is Down" + + - Example Output (if a downshift occurs): + + .. code-block:: bash + + eth0: Link is Up - 100Mbps/Full (downshifted) - flow control rx/tx + eth0: Link is Down + + This indicates that the link has been established but has downshifted from + a higher speed. + + - **Note**: Not all drivers or PHYs support downshift reporting, so you may + not see this information for all devices. + +- **Monitor Link Down Events Using `ethtool`**: + + - Starting with the latest kernel and `ethtool` versions, you can track + **Link Down Events** using the `ethtool -I` command. This will provide + counters for link drops, helping to diagnose link instability issues if + supported by the driver. + + - **Command to monitor link down events:** + + .. code-block:: bash + + ethtool -I <interface> + + - Example Output (if supported): + + .. code-block:: bash + + PSE attributes for eth1: + Link Down Events: 5 + + This indicates that the link has dropped 5 times. Frequent link down events + may indicate cable or environmental issues that require further + investigation. + +- **Check Link Status and Speed**: + + - Even though downshift counts or events are not easily tracked, you can + still use `ethtool` to manually check the current link speed and status. + + - **Command:** `ethtool <interface>` + + - **Expected Output:** + + .. code-block:: bash + + Speed: 1000Mb/s + Duplex: Full + Auto-negotiation: on + Link detected: yes + + Any inconsistencies in the expected speed or duplex setting could indicate + an issue. + +- **Disable Energy-Efficient Ethernet (EEE) for Diagnostics**: + + - **EEE** (Energy-Efficient Ethernet) can be a source of link instability due + to transitions in and out of low-power states. For diagnostic purposes, it + may be useful to **temporarily** disable EEE to determine if it is + contributing to link instability. This is **not a generic recommendation** + for disabling power management. + + - **Next Steps**: Disable EEE and monitor if the link becomes stable. If + disabling EEE resolves the issue, report the bug so that the driver can be + fixed. + + - **Command:** + + .. code-block:: bash + + ethtool --set-eee <interface> eee off + + - **Important**: If disabling EEE resolves the instability, the issue should + be reported to the maintainers as a bug, and the driver should be corrected + to handle EEE properly without causing instability. Disabling EEE + permanently should not be seen as a solution. + +- **Monitor Error Counters**: + + - While some NIC drivers and PHYs provide error counters, there is no unified + set of PHY-specific counters across all hardware. Additionally, not all + PHYs provide useful information related to errors like CRC errors, frame + drops, or link flaps. Therefore, this step is dependent on the specific + hardware and driver support. + + - **Next Steps**: Use `ethtool -S <interface>` to check if your driver + provides useful error counters. In some cases, counters may provide + information about errors like link flaps or physical layer problems (e.g., + excessive CRC errors), but results can vary significantly depending on the + PHY. + + - **Command:** `ethtool -S <interface>` + + - **Example Output (if supported)**: + + .. code-block:: bash + + rx_crc_errors: 123 + tx_errors: 45 + rx_frame_errors: 78 + + - **Note**: If no meaningful error counters are available or if counters are + not supported, you may need to rely on physical inspections (e.g., cable + condition) or kernel log messages (e.g., link up/down events) to further + diagnose the issue. + +When All Else Fails... +~~~~~~~~~~~~~~~~~~~~~~ + +So you've checked the cables, monitored the logs, disabled EEE, and still... +nothing? Don’t worry, you’re not alone. Sometimes, Ethernet gremlins just don’t +want to cooperate. + +But before you throw in the towel (or the Ethernet cable), take a deep breath. +It’s always possible that: + +1. Your PHY has a unique, undocumented personality. + +2. The problem is lying dormant, waiting for just the right moment to magically + resolve itself (hey, it happens!). + +3. Or, it could be that the ultimate solution simply hasn’t been invented yet. + +If none of the above bring you comfort, there’s one final step: contribute! If +you've uncovered new or unusual issues, or have creative diagnostic methods, +feel free to share your findings and extend this documentation. Together, we +can hunt down every elusive network issue - one twisted pair at a time. + +Remember: sometimes the solution is just a reboot away, but if not, it’s time to +dig deeper - or report that bug! + diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 803dfc1efb75..46c178e564b3 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -14,6 +14,7 @@ Contents: can can_ucan_protocol device_drivers/index + diagnostic/index dsa/index devlink/index caif/index diff --git a/Documentation/networking/kapi.rst b/Documentation/networking/kapi.rst index ea55f462cefa..98682b9a13ee 100644 --- a/Documentation/networking/kapi.rst +++ b/Documentation/networking/kapi.rst @@ -104,6 +104,9 @@ Driver Support .. kernel-doc:: include/linux/netdevice.h :internal: +.. kernel-doc:: include/net/net_shaper.h + :internal: + PHY Support ----------- diff --git a/Documentation/networking/net_cachelines/inet_connection_sock.rst b/Documentation/networking/net_cachelines/inet_connection_sock.rst index 7a911dc95652..4a15627fc93b 100644 --- a/Documentation/networking/net_cachelines/inet_connection_sock.rst +++ b/Documentation/networking/net_cachelines/inet_connection_sock.rst @@ -5,46 +5,48 @@ inet_connection_sock struct fast path usage breakdown ===================================================== +=================================== ====================== =================== =================== ======================================================================================================================================================== Type Name fastpath_tx_access fastpath_rx_access comment -..struct ..inet_connection_sock -struct_inet_sock icsk_inet read_mostly read_mostly tcp_init_buffer_space,tcp_init_transfer,tcp_finish_connect,tcp_connect,tcp_send_rcvq,tcp_send_syn_data -struct_request_sock_queue icsk_accept_queue - - -struct_inet_bind_bucket icsk_bind_hash read_mostly - tcp_set_state -struct_inet_bind2_bucket icsk_bind2_hash read_mostly - tcp_set_state,inet_put_port -unsigned_long icsk_timeout read_mostly - inet_csk_reset_xmit_timer,tcp_connect -struct_timer_list icsk_retransmit_timer read_mostly - inet_csk_reset_xmit_timer,tcp_connect -struct_timer_list icsk_delack_timer read_mostly - inet_csk_reset_xmit_timer,tcp_connect -u32 icsk_rto read_write - tcp_cwnd_validate,tcp_schedule_loss_probe,tcp_connect_init,tcp_connect,tcp_write_xmit,tcp_push_one -u32 icsk_rto_min - - -u32 icsk_delack_max - - -u32 icsk_pmtu_cookie read_write - tcp_sync_mss,tcp_current_mss,tcp_send_syn_data,tcp_connect_init,tcp_connect -struct_tcp_congestion_ops icsk_ca_ops read_write - tcp_cwnd_validate,tcp_tso_segs,tcp_ca_dst_init,tcp_connect_init,tcp_connect,tcp_write_xmit -struct_inet_connection_sock_af_ops icsk_af_ops read_mostly - tcp_finish_connect,tcp_send_syn_data,tcp_mtup_init,tcp_mtu_check_reprobe,tcp_mtu_probe,tcp_connect_init,tcp_connect,__tcp_transmit_skb -struct_tcp_ulp_ops* icsk_ulp_ops - - -void* icsk_ulp_data - - -u8:5 icsk_ca_state read_write - tcp_cwnd_application_limited,tcp_set_ca_state,tcp_enter_cwr,tcp_tso_should_defer,tcp_mtu_probe,tcp_schedule_loss_probe,tcp_write_xmit,__tcp_transmit_skb -u8:1 icsk_ca_initialized read_write - tcp_init_transfer,tcp_init_congestion_control,tcp_init_transfer,tcp_finish_connect,tcp_connect -u8:1 icsk_ca_setsockopt - - -u8:1 icsk_ca_dst_locked write_mostly - tcp_ca_dst_init,tcp_connect_init,tcp_connect -u8 icsk_retransmits write_mostly - tcp_connect_init,tcp_connect -u8 icsk_pending read_write - inet_csk_reset_xmit_timer,tcp_connect,tcp_check_probe_timer,__tcp_push_pending_frames,tcp_rearm_rto,tcp_event_new_data_sent,tcp_event_new_data_sent -u8 icsk_backoff write_mostly - tcp_write_queue_purge,tcp_connect_init -u8 icsk_syn_retries - - -u8 icsk_probes_out - - -u16 icsk_ext_hdr_len read_mostly - __tcp_mtu_to_mss,tcp_mtu_to_rss,tcp_mtu_probe,tcp_write_xmit,tcp_mtu_to_mss, -struct_icsk_ack_u8 pending read_write read_write inet_csk_ack_scheduled,__tcp_cleanup_rbuf,tcp_cleanup_rbuf,inet_csk_clear_xmit_timer,tcp_event_ack-sent,inet_csk_reset_xmit_timer -struct_icsk_ack_u8 quick read_write write_mostly tcp_dec_quickack_mode,tcp_event_ack_sent,__tcp_transmit_skb,__tcp_select_window,__tcp_cleanup_rbuf -struct_icsk_ack_u8 pingpong - - -struct_icsk_ack_u8 retry write_mostly read_write inet_csk_clear_xmit_timer,tcp_rearm_rto,tcp_event_new_data_sent,tcp_write_xmit,__tcp_send_ack,tcp_send_ack, -struct_icsk_ack_u8 ato read_mostly write_mostly tcp_dec_quickack_mode,tcp_event_ack_sent,__tcp_transmit_skb,__tcp_send_ack,tcp_send_ack -struct_icsk_ack_unsigned_long timeout read_write read_write inet_csk_reset_xmit_timer,tcp_connect -struct_icsk_ack_u32 lrcvtime read_write - tcp_finish_connect,tcp_connect,tcp_event_data_sent,__tcp_transmit_skb -struct_icsk_ack_u16 rcv_mss write_mostly read_mostly __tcp_select_window,__tcp_cleanup_rbuf,tcp_initialize_rcv_mss,tcp_connect_init -struct_icsk_mtup_int search_high read_write - tcp_mtup_init,tcp_sync_mss,tcp_connect_init,tcp_mtu_check_reprobe,tcp_write_xmit -struct_icsk_mtup_int search_low read_write - tcp_mtu_probe,tcp_mtu_check_reprobe,tcp_write_xmit,tcp_sync_mss,tcp_connect_init,tcp_mtup_init -struct_icsk_mtup_u32:31 probe_size read_write - tcp_mtup_init,tcp_connect_init,__tcp_transmit_skb -struct_icsk_mtup_u32:1 enabled read_write - tcp_mtup_init,tcp_sync_mss,tcp_connect_init,tcp_mtu_probe,tcp_write_xmit -struct_icsk_mtup_u32 probe_timestamp read_write - tcp_mtup_init,tcp_connect_init,tcp_mtu_check_reprobe,tcp_mtu_probe -u32 icsk_probes_tstamp - - -u32 icsk_user_timeout - - -u64[104/sizeof(u64)] icsk_ca_priv - - +=================================== ====================== =================== =================== ======================================================================================================================================================== +struct inet_sock icsk_inet read_mostly read_mostly tcp_init_buffer_space,tcp_init_transfer,tcp_finish_connect,tcp_connect,tcp_send_rcvq,tcp_send_syn_data +struct request_sock_queue icsk_accept_queue +struct inet_bind_bucket icsk_bind_hash read_mostly tcp_set_state +struct inet_bind2_bucket icsk_bind2_hash read_mostly tcp_set_state,inet_put_port +unsigned_long icsk_timeout read_mostly inet_csk_reset_xmit_timer,tcp_connect +struct timer_list icsk_retransmit_timer read_mostly inet_csk_reset_xmit_timer,tcp_connect +struct timer_list icsk_delack_timer read_mostly inet_csk_reset_xmit_timer,tcp_connect +u32 icsk_rto read_write tcp_cwnd_validate,tcp_schedule_loss_probe,tcp_connect_init,tcp_connect,tcp_write_xmit,tcp_push_one +u32 icsk_rto_min +u32 icsk_delack_max +u32 icsk_pmtu_cookie read_write tcp_sync_mss,tcp_current_mss,tcp_send_syn_data,tcp_connect_init,tcp_connect +struct tcp_congestion_ops icsk_ca_ops read_write tcp_cwnd_validate,tcp_tso_segs,tcp_ca_dst_init,tcp_connect_init,tcp_connect,tcp_write_xmit +struct inet_connection_sock_af_ops icsk_af_ops read_mostly tcp_finish_connect,tcp_send_syn_data,tcp_mtup_init,tcp_mtu_check_reprobe,tcp_mtu_probe,tcp_connect_init,tcp_connect,__tcp_transmit_skb +struct tcp_ulp_ops* icsk_ulp_ops +void* icsk_ulp_data +u8:5 icsk_ca_state read_write tcp_cwnd_application_limited,tcp_set_ca_state,tcp_enter_cwr,tcp_tso_should_defer,tcp_mtu_probe,tcp_schedule_loss_probe,tcp_write_xmit,__tcp_transmit_skb +u8:1 icsk_ca_initialized read_write tcp_init_transfer,tcp_init_congestion_control,tcp_init_transfer,tcp_finish_connect,tcp_connect +u8:1 icsk_ca_setsockopt +u8:1 icsk_ca_dst_locked write_mostly tcp_ca_dst_init,tcp_connect_init,tcp_connect +u8 icsk_retransmits write_mostly tcp_connect_init,tcp_connect +u8 icsk_pending read_write inet_csk_reset_xmit_timer,tcp_connect,tcp_check_probe_timer,__tcp_push_pending_frames,tcp_rearm_rto,tcp_event_new_data_sent,tcp_event_new_data_sent +u8 icsk_backoff write_mostly tcp_write_queue_purge,tcp_connect_init +u8 icsk_syn_retries +u8 icsk_probes_out +u16 icsk_ext_hdr_len read_mostly __tcp_mtu_to_mss,tcp_mtu_to_rss,tcp_mtu_probe,tcp_write_xmit,tcp_mtu_to_mss, +struct icsk_ack_u8 pending read_write read_write inet_csk_ack_scheduled,__tcp_cleanup_rbuf,tcp_cleanup_rbuf,inet_csk_clear_xmit_timer,tcp_event_ack-sent,inet_csk_reset_xmit_timer +struct icsk_ack_u8 quick read_write write_mostly tcp_dec_quickack_mode,tcp_event_ack_sent,__tcp_transmit_skb,__tcp_select_window,__tcp_cleanup_rbuf +struct icsk_ack_u8 pingpong +struct icsk_ack_u8 retry write_mostly read_write inet_csk_clear_xmit_timer,tcp_rearm_rto,tcp_event_new_data_sent,tcp_write_xmit,__tcp_send_ack,tcp_send_ack, +struct icsk_ack_u8 ato read_mostly write_mostly tcp_dec_quickack_mode,tcp_event_ack_sent,__tcp_transmit_skb,__tcp_send_ack,tcp_send_ack +struct icsk_ack_unsigned_long timeout read_write read_write inet_csk_reset_xmit_timer,tcp_connect +struct icsk_ack_u32 lrcvtime read_write tcp_finish_connect,tcp_connect,tcp_event_data_sent,__tcp_transmit_skb +struct icsk_ack_u16 rcv_mss write_mostly read_mostly __tcp_select_window,__tcp_cleanup_rbuf,tcp_initialize_rcv_mss,tcp_connect_init +struct icsk_mtup_int search_high read_write tcp_mtup_init,tcp_sync_mss,tcp_connect_init,tcp_mtu_check_reprobe,tcp_write_xmit +struct icsk_mtup_int search_low read_write tcp_mtu_probe,tcp_mtu_check_reprobe,tcp_write_xmit,tcp_sync_mss,tcp_connect_init,tcp_mtup_init +struct icsk_mtup_u32:31 probe_size read_write tcp_mtup_init,tcp_connect_init,__tcp_transmit_skb +struct icsk_mtup_u32:1 enabled read_write tcp_mtup_init,tcp_sync_mss,tcp_connect_init,tcp_mtu_probe,tcp_write_xmit +struct icsk_mtup_u32 probe_timestamp read_write tcp_mtup_init,tcp_connect_init,tcp_mtu_check_reprobe,tcp_mtu_probe +u32 icsk_probes_tstamp +u32 icsk_user_timeout +u64[104/sizeof(u64)] icsk_ca_priv +=================================== ====================== =================== =================== ======================================================================================================================================================== diff --git a/Documentation/networking/net_cachelines/inet_sock.rst b/Documentation/networking/net_cachelines/inet_sock.rst index 595d7ef5fc8b..b11bf48fa2b3 100644 --- a/Documentation/networking/net_cachelines/inet_sock.rst +++ b/Documentation/networking/net_cachelines/inet_sock.rst @@ -5,40 +5,42 @@ inet_sock struct fast path usage breakdown ========================================== +======================= ===================== =================== =================== ====================================================================================================== Type Name fastpath_tx_access fastpath_rx_access comment -..struct ..inet_sock -struct_sock sk read_mostly read_mostly tcp_init_buffer_space,tcp_init_transfer,tcp_finish_connect,tcp_connect,tcp_send_rcvq,tcp_send_syn_data -struct_ipv6_pinfo* pinet6 - - -be16 inet_sport read_mostly - __tcp_transmit_skb -be32 inet_daddr read_mostly - ip_select_ident_segs -be32 inet_rcv_saddr - - -be16 inet_dport read_mostly - __tcp_transmit_skb -u16 inet_num - - -be32 inet_saddr - - -s16 uc_ttl read_mostly - __ip_queue_xmit/ip_select_ttl -u16 cmsg_flags - - -struct_ip_options_rcu* inet_opt read_mostly - __ip_queue_xmit -u16 inet_id read_mostly - ip_select_ident_segs -u8 tos read_mostly - ip_queue_xmit -u8 min_ttl - - -u8 mc_ttl - - -u8 pmtudisc - - -u8:1 recverr - - -u8:1 is_icsk - - -u8:1 freebind - - -u8:1 hdrincl - - -u8:1 mc_loop - - -u8:1 transparent - - -u8:1 mc_all - - -u8:1 nodefrag - - -u8:1 bind_address_no_port - - -u8:1 recverr_rfc4884 - - -u8:1 defer_connect read_mostly - tcp_sendmsg_fastopen -u8 rcv_tos - - -u8 convert_csum - - -int uc_index - - -int mc_index - - -be32 mc_addr - - -struct_ip_mc_socklist* mc_list - - -struct_inet_cork_full cork read_mostly - __tcp_transmit_skb -struct local_port_range - - +======================= ===================== =================== =================== ====================================================================================================== +struct sock sk read_mostly read_mostly tcp_init_buffer_space,tcp_init_transfer,tcp_finish_connect,tcp_connect,tcp_send_rcvq,tcp_send_syn_data +struct ipv6_pinfo* pinet6 +be16 inet_sport read_mostly __tcp_transmit_skb +be32 inet_daddr read_mostly ip_select_ident_segs +be32 inet_rcv_saddr +be16 inet_dport read_mostly __tcp_transmit_skb +u16 inet_num +be32 inet_saddr +s16 uc_ttl read_mostly __ip_queue_xmit/ip_select_ttl +u16 cmsg_flags +struct ip_options_rcu* inet_opt read_mostly __ip_queue_xmit +u16 inet_id read_mostly ip_select_ident_segs +u8 tos read_mostly ip_queue_xmit +u8 min_ttl +u8 mc_ttl +u8 pmtudisc +u8:1 recverr +u8:1 is_icsk +u8:1 freebind +u8:1 hdrincl +u8:1 mc_loop +u8:1 transparent +u8:1 mc_all +u8:1 nodefrag +u8:1 bind_address_no_port +u8:1 recverr_rfc4884 +u8:1 defer_connect read_mostly tcp_sendmsg_fastopen +u8 rcv_tos +u8 convert_csum +int uc_index +int mc_index +be32 mc_addr +struct ip_mc_socklist* mc_list +struct inet_cork_full cork read_mostly __tcp_transmit_skb +struct local_port_range +======================= ===================== =================== =================== ====================================================================================================== diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst index 22b07c814f4a..db6192b2bb50 100644 --- a/Documentation/networking/net_cachelines/net_device.rst +++ b/Documentation/networking/net_cachelines/net_device.rst @@ -5,181 +5,188 @@ net_device struct fast path usage breakdown =========================================== -Type Name fastpath_tx_access fastpath_rx_access Comments -..struct ..net_device -unsigned_long:32 priv_flags read_mostly - __dev_queue_xmit(tx) -unsigned_long:1 lltx read_mostly - HARD_TX_LOCK,HARD_TX_TRYLOCK,HARD_TX_UNLOCK(tx) -char name[16] - - -struct_netdev_name_node* name_node -struct_dev_ifalias* ifalias -unsigned_long mem_end -unsigned_long mem_start -unsigned_long base_addr -unsigned_long state read_mostly read_mostly netif_running(dev) -struct_list_head dev_list -struct_list_head napi_list -struct_list_head unreg_list -struct_list_head close_list -struct_list_head ptype_all read_mostly - dev_nit_active(tx) -struct_list_head ptype_specific read_mostly deliver_ptype_list_skb/__netif_receive_skb_core(rx) -struct adj_list -unsigned_int flags read_mostly read_mostly __dev_queue_xmit,__dev_xmit_skb,ip6_output,__ip6_finish_output(tx);ip6_rcv_core(rx) -xdp_features_t xdp_features -struct_net_device_ops* netdev_ops read_mostly - netdev_core_pick_tx,netdev_start_xmit(tx) -struct_xdp_metadata_ops* xdp_metadata_ops -int ifindex - read_mostly ip6_rcv_core -unsigned_short gflags -unsigned_short hard_header_len read_mostly read_mostly ip6_xmit(tx);gro_list_prepare(rx) -unsigned_int mtu read_mostly - ip_finish_output2 -unsigned_short needed_headroom read_mostly - LL_RESERVED_SPACE/ip_finish_output2 -unsigned_short needed_tailroom -netdev_features_t features read_mostly read_mostly HARD_TX_LOCK,netif_skb_features,sk_setup_caps(tx);netif_elide_gro(rx) -netdev_features_t hw_features -netdev_features_t wanted_features -netdev_features_t vlan_features -netdev_features_t hw_enc_features - - netif_skb_features -netdev_features_t mpls_features -netdev_features_t gso_partial_features read_mostly gso_features_check -unsigned_int min_mtu -unsigned_int max_mtu -unsigned_short type -unsigned_char min_header_len -unsigned_char name_assign_type -int group -struct_net_device_stats stats -struct_net_device_core_stats* core_stats -atomic_t carrier_up_count -atomic_t carrier_down_count -struct_iw_handler_def* wireless_handlers -struct_iw_public_data* wireless_data -struct_ethtool_ops* ethtool_ops -struct_l3mdev_ops* l3mdev_ops -struct_ndisc_ops* ndisc_ops -struct_xfrmdev_ops* xfrmdev_ops -struct_tlsdev_ops* tlsdev_ops -struct_header_ops* header_ops read_mostly - ip_finish_output2,ip6_finish_output2(tx) -unsigned_char operstate -unsigned_char link_mode -unsigned_char if_port -unsigned_char dma -unsigned_char perm_addr[32] -unsigned_char addr_assign_type -unsigned_char addr_len -unsigned_char upper_level -unsigned_char lower_level -unsigned_short neigh_priv_len -unsigned_short padded -unsigned_short dev_id -unsigned_short dev_port -spinlock_t addr_list_lock -int irq -struct_netdev_hw_addr_list uc -struct_netdev_hw_addr_list mc -struct_netdev_hw_addr_list dev_addrs -struct_kset* queues_kset -struct_list_head unlink_list -unsigned_int promiscuity -unsigned_int allmulti -bool uc_promisc -unsigned_char nested_level -struct_in_device* ip_ptr read_mostly read_mostly __in_dev_get -struct_inet6_dev* ip6_ptr read_mostly read_mostly __in6_dev_get -struct_vlan_info* vlan_info -struct_dsa_port* dsa_ptr -struct_tipc_bearer* tipc_ptr -void* atalk_ptr -void* ax25_ptr -struct_wireless_dev* ieee80211_ptr -struct_wpan_dev* ieee802154_ptr -struct_mpls_dev* mpls_ptr -struct_mctp_dev* mctp_ptr -unsigned_char* dev_addr -struct_netdev_queue* _rx read_mostly - netdev_get_rx_queue(rx) -unsigned_int num_rx_queues -unsigned_int real_num_rx_queues - read_mostly get_rps_cpu -struct_bpf_prog* xdp_prog - read_mostly netif_elide_gro() -unsigned_long gro_flush_timeout - read_mostly napi_complete_done -u32 napi_defer_hard_irqs - read_mostly napi_complete_done -unsigned_int gro_max_size - read_mostly skb_gro_receive -unsigned_int gro_ipv4_max_size - read_mostly skb_gro_receive -rx_handler_func_t* rx_handler read_mostly - __netif_receive_skb_core -void* rx_handler_data read_mostly - -struct_netdev_queue* ingress_queue read_mostly - -struct_bpf_mprog_entry tcx_ingress - read_mostly sch_handle_ingress -struct_nf_hook_entries* nf_hooks_ingress -unsigned_char broadcast[32] -struct_cpu_rmap* rx_cpu_rmap -struct_hlist_node index_hlist -struct_netdev_queue* _tx read_mostly - netdev_get_tx_queue(tx) -unsigned_int num_tx_queues - - -unsigned_int real_num_tx_queues read_mostly - skb_tx_hash,netdev_core_pick_tx(tx) -unsigned_int tx_queue_len -spinlock_t tx_global_lock -struct_xdp_dev_bulk_queue__percpu* xdp_bulkq -struct_xps_dev_maps* xps_maps[2] read_mostly - __netif_set_xps_queue -struct_bpf_mprog_entry tcx_egress read_mostly - sch_handle_egress -struct_nf_hook_entries* nf_hooks_egress read_mostly - -struct_hlist_head qdisc_hash[16] -struct_timer_list watchdog_timer -int watchdog_timeo -u32 proto_down_reason -struct_list_head todo_list -int__percpu* pcpu_refcnt -refcount_t dev_refcnt -struct_ref_tracker_dir refcnt_tracker -struct_list_head link_watch_list -enum:8 reg_state -bool dismantle -enum:16 rtnl_link_state -bool needs_free_netdev -void*priv_destructor struct_net_device -struct_netpoll_info* npinfo - read_mostly napi_poll/napi_poll_lock -possible_net_t nd_net - read_mostly (dev_net)napi_busy_loop,tcp_v(4/6)_rcv,ip(v6)_rcv,ip(6)_input,ip(6)_input_finish -void* ml_priv -enum_netdev_ml_priv_type ml_priv_type -struct_pcpu_lstats__percpu* lstats read_mostly dev_lstats_add() -struct_pcpu_sw_netstats__percpu* tstats read_mostly dev_sw_netstats_tx_add() -struct_pcpu_dstats__percpu* dstats -struct_garp_port* garp_port -struct_mrp_port* mrp_port -struct_dm_hw_stat_delta* dm_private -struct_device dev - - -struct_attribute_group* sysfs_groups[4] -struct_attribute_group* sysfs_rx_queue_group -struct_rtnl_link_ops* rtnl_link_ops -unsigned_int gso_max_size read_mostly - sk_dst_gso_max_size -unsigned_int tso_max_size -u16 gso_max_segs read_mostly - gso_max_segs -u16 tso_max_segs -unsigned_int gso_ipv4_max_size read_mostly - sk_dst_gso_max_size -struct_dcbnl_rtnl_ops* dcbnl_ops -s16 num_tc read_mostly - skb_tx_hash -struct_netdev_tc_txq tc_to_txq[16] read_mostly - skb_tx_hash -u8 prio_tc_map[16] -unsigned_int fcoe_ddp_xid -struct_netprio_map* priomap -struct_phy_device* phydev -struct_sfp_bus* sfp_bus -struct_lock_class_key* qdisc_tx_busylock -bool proto_down -unsigned:1 wol_enabled -unsigned:1 threaded - - napi_poll(napi_enable,dev_set_threaded) -unsigned_long:1 see_all_hwtstamp_requests -unsigned_long:1 change_proto_down -unsigned_long:1 netns_local -unsigned_long:1 fcoe_mtu -struct_list_head net_notifier_list -struct_macsec_ops* macsec_ops -struct_udp_tunnel_nic_info* udp_tunnel_nic_info -struct_udp_tunnel_nic* udp_tunnel_nic -unsigned_int xdp_zc_max_segs -struct_bpf_xdp_entity xdp_state[3] -u8 dev_addr_shadow[32] -netdevice_tracker linkwatch_dev_tracker -netdevice_tracker watchdog_dev_tracker -netdevice_tracker dev_registered_tracker -struct_rtnl_hw_stats64* offload_xstats_l3 -struct_devlink_port* devlink_port -struct_dpll_pin* dpll_pin +=================================== =========================== =================== =================== =================================================================================== +Type Name fastpath_tx_access fastpath_rx_access Comments +=================================== =========================== =================== =================== =================================================================================== +unsigned_long:32 priv_flags read_mostly __dev_queue_xmit(tx) +unsigned_long:1 lltx read_mostly HARD_TX_LOCK,HARD_TX_TRYLOCK,HARD_TX_UNLOCK(tx) +char name[16] +struct netdev_name_node* name_node +struct dev_ifalias* ifalias +unsigned_long mem_end +unsigned_long mem_start +unsigned_long base_addr +unsigned_long state read_mostly read_mostly netif_running(dev) +struct list_head dev_list +struct list_head napi_list +struct list_head unreg_list +struct list_head close_list +struct list_head ptype_all read_mostly dev_nit_active(tx) +struct list_head ptype_specific read_mostly deliver_ptype_list_skb/__netif_receive_skb_core(rx) +struct adj_list +unsigned_int flags read_mostly read_mostly __dev_queue_xmit,__dev_xmit_skb,ip6_output,__ip6_finish_output(tx);ip6_rcv_core(rx) +xdp_features_t xdp_features +struct net_device_ops* netdev_ops read_mostly netdev_core_pick_tx,netdev_start_xmit(tx) +struct xdp_metadata_ops* xdp_metadata_ops +int ifindex read_mostly ip6_rcv_core +unsigned_short gflags +unsigned_short hard_header_len read_mostly read_mostly ip6_xmit(tx);gro_list_prepare(rx) +unsigned_int mtu read_mostly ip_finish_output2 +unsigned_short needed_headroom read_mostly LL_RESERVED_SPACE/ip_finish_output2 +unsigned_short needed_tailroom +netdev_features_t features read_mostly read_mostly HARD_TX_LOCK,netif_skb_features,sk_setup_caps(tx);netif_elide_gro(rx) +netdev_features_t hw_features +netdev_features_t wanted_features +netdev_features_t vlan_features +netdev_features_t hw_enc_features netif_skb_features +netdev_features_t mpls_features +netdev_features_t gso_partial_features read_mostly gso_features_check +unsigned_int min_mtu +unsigned_int max_mtu +unsigned_short type +unsigned_char min_header_len +unsigned_char name_assign_type +int group +struct net_device_stats stats +struct net_device_core_stats* core_stats +atomic_t carrier_up_count +atomic_t carrier_down_count +struct iw_handler_def* wireless_handlers +struct iw_public_data* wireless_data +struct ethtool_ops* ethtool_ops +struct l3mdev_ops* l3mdev_ops +struct ndisc_ops* ndisc_ops +struct xfrmdev_ops* xfrmdev_ops +struct tlsdev_ops* tlsdev_ops +struct header_ops* header_ops read_mostly ip_finish_output2,ip6_finish_output2(tx) +unsigned_char operstate +unsigned_char link_mode +unsigned_char if_port +unsigned_char dma +unsigned_char perm_addr[32] +unsigned_char addr_assign_type +unsigned_char addr_len +unsigned_char upper_level +unsigned_char lower_level +unsigned_short neigh_priv_len +unsigned_short padded +unsigned_short dev_id +unsigned_short dev_port +spinlock_t addr_list_lock +int irq +struct netdev_hw_addr_list uc +struct netdev_hw_addr_list mc +struct netdev_hw_addr_list dev_addrs +struct kset* queues_kset +struct list_head unlink_list +unsigned_int promiscuity +unsigned_int allmulti +bool uc_promisc +unsigned_char nested_level +struct in_device* ip_ptr read_mostly read_mostly __in_dev_get +struct hlist_head fib_nh_head +struct inet6_dev* ip6_ptr read_mostly read_mostly __in6_dev_get +struct vlan_info* vlan_info +struct dsa_port* dsa_ptr +struct tipc_bearer* tipc_ptr +void* atalk_ptr +void* ax25_ptr +struct wireless_dev* ieee80211_ptr +struct wpan_dev* ieee802154_ptr +struct mpls_dev* mpls_ptr +struct mctp_dev* mctp_ptr +unsigned_char* dev_addr +struct netdev_queue* _rx read_mostly netdev_get_rx_queue(rx) +unsigned_int num_rx_queues +unsigned_int real_num_rx_queues read_mostly get_rps_cpu +struct bpf_prog* xdp_prog read_mostly netif_elide_gro() +unsigned_long gro_flush_timeout read_mostly napi_complete_done +u32 napi_defer_hard_irqs read_mostly napi_complete_done +unsigned_int gro_max_size read_mostly skb_gro_receive +unsigned_int gro_ipv4_max_size read_mostly skb_gro_receive +rx_handler_func_t* rx_handler read_mostly __netif_receive_skb_core +void* rx_handler_data read_mostly +struct netdev_queue* ingress_queue read_mostly +struct bpf_mprog_entry tcx_ingress read_mostly sch_handle_ingress +struct nf_hook_entries* nf_hooks_ingress +unsigned_char broadcast[32] +struct cpu_rmap* rx_cpu_rmap +struct hlist_node index_hlist +struct netdev_queue* _tx read_mostly netdev_get_tx_queue(tx) +unsigned_int num_tx_queues +unsigned_int real_num_tx_queues read_mostly skb_tx_hash,netdev_core_pick_tx(tx) +unsigned_int tx_queue_len +spinlock_t tx_global_lock +struct xdp_dev_bulk_queue__percpu* xdp_bulkq +struct xps_dev_maps* xps_maps[2] read_mostly __netif_set_xps_queue +struct bpf_mprog_entry tcx_egress read_mostly sch_handle_egress +struct nf_hook_entries* nf_hooks_egress read_mostly +struct hlist_head qdisc_hash[16] +struct timer_list watchdog_timer +int watchdog_timeo +u32 proto_down_reason +struct list_head todo_list +int__percpu* pcpu_refcnt +refcount_t dev_refcnt +struct ref_tracker_dir refcnt_tracker +struct list_head link_watch_list +enum:8 reg_state +bool dismantle +enum:16 rtnl_link_state +bool needs_free_netdev +void*priv_destructor struct net_device +struct netpoll_info* npinfo read_mostly napi_poll/napi_poll_lock +possible_net_t nd_net read_mostly (dev_net)napi_busy_loop,tcp_v(4/6)_rcv,ip(v6)_rcv,ip(6)_input,ip(6)_input_finish +void* ml_priv +enum_netdev_ml_priv_type ml_priv_type +struct pcpu_lstats__percpu* lstats read_mostly dev_lstats_add() +struct pcpu_sw_netstats__percpu* tstats read_mostly dev_sw_netstats_tx_add() +struct pcpu_dstats__percpu* dstats +struct garp_port* garp_port +struct mrp_port* mrp_port +struct dm_hw_stat_delta* dm_private +struct device dev +struct attribute_group* sysfs_groups[4] +struct attribute_group* sysfs_rx_queue_group +struct rtnl_link_ops* rtnl_link_ops +unsigned_int gso_max_size read_mostly sk_dst_gso_max_size +unsigned_int tso_max_size +u16 gso_max_segs read_mostly gso_max_segs +u16 tso_max_segs +unsigned_int gso_ipv4_max_size read_mostly sk_dst_gso_max_size +struct dcbnl_rtnl_ops* dcbnl_ops +s16 num_tc read_mostly skb_tx_hash +struct netdev_tc_txq tc_to_txq[16] read_mostly skb_tx_hash +u8 prio_tc_map[16] +unsigned_int fcoe_ddp_xid +struct netprio_map* priomap +struct phy_device* phydev +struct sfp_bus* sfp_bus +struct lock_class_key* qdisc_tx_busylock +bool proto_down +unsigned:1 wol_enabled +unsigned:1 threaded napi_poll(napi_enable,dev_set_threaded) +unsigned_long:1 see_all_hwtstamp_requests +unsigned_long:1 change_proto_down +unsigned_long:1 netns_local +unsigned_long:1 fcoe_mtu +struct list_head net_notifier_list +struct macsec_ops* macsec_ops +struct udp_tunnel_nic_info* udp_tunnel_nic_info +struct udp_tunnel_nic* udp_tunnel_nic +unsigned_int xdp_zc_max_segs +struct bpf_xdp_entity xdp_state[3] +u8 dev_addr_shadow[32] +netdevice_tracker linkwatch_dev_tracker +netdevice_tracker watchdog_dev_tracker +netdevice_tracker dev_registered_tracker +struct rtnl_hw_stats64* offload_xstats_l3 +struct devlink_port* devlink_port +struct dpll_pin* dpll_pin struct hlist_head page_pools struct dim_irq_moder* irq_moder +u64 max_pacing_offload_horizon +struct_napi_config* napi_config +unsigned_long gro_flush_timeout +u32 napi_defer_hard_irqs +=================================== =========================== =================== =================== =================================================================================== diff --git a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst index 9b87089a84c6..629da6dc6d74 100644 --- a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst +++ b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst @@ -5,154 +5,156 @@ netns_ipv4 struct fast path usage breakdown =========================================== +=============================== ============================================ =================== =================== ================================================= Type Name fastpath_tx_access fastpath_rx_access comment -..struct ..netns_ipv4 -struct_inet_timewait_death_row tcp_death_row -struct_udp_table* udp_table -struct_ctl_table_header* forw_hdr -struct_ctl_table_header* frags_hdr -struct_ctl_table_header* ipv4_hdr -struct_ctl_table_header* route_hdr -struct_ctl_table_header* xfrm4_hdr -struct_ipv4_devconf* devconf_all -struct_ipv4_devconf* devconf_dflt -struct_ip_ra_chain ra_chain -struct_mutex ra_mutex -struct_fib_rules_ops* rules_ops -struct_fib_table fib_main -struct_fib_table fib_default -unsigned_int fib_rules_require_fldissect -bool fib_has_custom_rules -bool fib_has_custom_local_routes -bool fib_offload_disabled -atomic_t fib_num_tclassid_users -struct_hlist_head* fib_table_hash -struct_sock* fibnl -struct_sock* mc_autojoin_sk -struct_inet_peer_base* peers -struct_fqdir* fqdir -u8 sysctl_icmp_echo_ignore_all -u8 sysctl_icmp_echo_enable_probe -u8 sysctl_icmp_echo_ignore_broadcasts -u8 sysctl_icmp_ignore_bogus_error_responses -u8 sysctl_icmp_errors_use_inbound_ifaddr -int sysctl_icmp_ratelimit -int sysctl_icmp_ratemask -u32 ip_rt_min_pmtu - - -int ip_rt_mtu_expires - - -int ip_rt_min_advmss - - -struct_local_ports ip_local_ports - - -u8 sysctl_tcp_ecn - - -u8 sysctl_tcp_ecn_fallback - - -u8 sysctl_ip_default_ttl - - ip4_dst_hoplimit/ip_select_ttl -u8 sysctl_ip_no_pmtu_disc - - -u8 sysctl_ip_fwd_use_pmtu read_mostly - ip_dst_mtu_maybe_forward/ip_skb_dst_mtu -u8 sysctl_ip_fwd_update_priority - - ip_forward -u8 sysctl_ip_nonlocal_bind - - -u8 sysctl_ip_autobind_reuse - - -u8 sysctl_ip_dynaddr - - -u8 sysctl_ip_early_demux - read_mostly ip(6)_rcv_finish_core -u8 sysctl_raw_l3mdev_accept - - -u8 sysctl_tcp_early_demux - read_mostly ip(6)_rcv_finish_core -u8 sysctl_udp_early_demux -u8 sysctl_nexthop_compat_mode - - -u8 sysctl_fwmark_reflect - - -u8 sysctl_tcp_fwmark_accept - - -u8 sysctl_tcp_l3mdev_accept - - -u8 sysctl_tcp_mtu_probing - - -int sysctl_tcp_mtu_probe_floor - - -int sysctl_tcp_base_mss - - -int sysctl_tcp_min_snd_mss read_mostly - __tcp_mtu_to_mss(tcp_write_xmit) -int sysctl_tcp_probe_threshold - - tcp_mtu_probe(tcp_write_xmit) -u32 sysctl_tcp_probe_interval - - tcp_mtu_check_reprobe(tcp_write_xmit) -int sysctl_tcp_keepalive_time - - -int sysctl_tcp_keepalive_intvl - - -u8 sysctl_tcp_keepalive_probes - - -u8 sysctl_tcp_syn_retries - - -u8 sysctl_tcp_synack_retries - - -u8 sysctl_tcp_syncookies - - generated_on_syn -u8 sysctl_tcp_migrate_req - - reuseport -u8 sysctl_tcp_comp_sack_nr - - __tcp_ack_snd_check -int sysctl_tcp_reordering - read_mostly tcp_may_raise_cwnd/tcp_cong_control -u8 sysctl_tcp_retries1 - - -u8 sysctl_tcp_retries2 - - -u8 sysctl_tcp_orphan_retries - - -u8 sysctl_tcp_tw_reuse - - timewait_sock_ops -int sysctl_tcp_fin_timeout - - TCP_LAST_ACK/tcp_rcv_state_process -unsigned_int sysctl_tcp_notsent_lowat read_mostly - tcp_notsent_lowat/tcp_stream_memory_free -u8 sysctl_tcp_sack - - tcp_syn_options -u8 sysctl_tcp_window_scaling - - tcp_syn_options,tcp_parse_options -u8 sysctl_tcp_timestamps -u8 sysctl_tcp_early_retrans read_mostly - tcp_schedule_loss_probe(tcp_write_xmit) -u8 sysctl_tcp_recovery - - tcp_fastretrans_alert -u8 sysctl_tcp_thin_linear_timeouts - - tcp_retrans_timer(on_thin_streams) -u8 sysctl_tcp_slow_start_after_idle - - unlikely(tcp_cwnd_validate-network-not-starved) -u8 sysctl_tcp_retrans_collapse - - -u8 sysctl_tcp_stdurg - - unlikely(tcp_check_urg) -u8 sysctl_tcp_rfc1337 - - -u8 sysctl_tcp_abort_on_overflow - - -u8 sysctl_tcp_fack - - -int sysctl_tcp_max_reordering - - tcp_check_sack_reordering -int sysctl_tcp_adv_win_scale - - tcp_init_buffer_space -u8 sysctl_tcp_dsack - - partial_packet_or_retrans_in_tcp_data_queue -u8 sysctl_tcp_app_win - - tcp_win_from_space -u8 sysctl_tcp_frto - - tcp_enter_loss -u8 sysctl_tcp_nometrics_save - - TCP_LAST_ACK/tcp_update_metrics -u8 sysctl_tcp_no_ssthresh_metrics_save - - TCP_LAST_ACK/tcp_(update/init)_metrics +=============================== ============================================ =================== =================== ================================================= +struct_inet_timewait_death_row tcp_death_row +struct_udp_table* udp_table +struct_ctl_table_header* forw_hdr +struct_ctl_table_header* frags_hdr +struct_ctl_table_header* ipv4_hdr +struct_ctl_table_header* route_hdr +struct_ctl_table_header* xfrm4_hdr +struct_ipv4_devconf* devconf_all +struct_ipv4_devconf* devconf_dflt +struct_ip_ra_chain ra_chain +struct_mutex ra_mutex +struct_fib_rules_ops* rules_ops +struct_fib_table fib_main +struct_fib_table fib_default +unsigned_int fib_rules_require_fldissect +bool fib_has_custom_rules +bool fib_has_custom_local_routes +bool fib_offload_disabled +atomic_t fib_num_tclassid_users +struct_hlist_head* fib_table_hash +struct_sock* fibnl +struct_sock* mc_autojoin_sk +struct_inet_peer_base* peers +struct_fqdir* fqdir +u8 sysctl_icmp_echo_ignore_all +u8 sysctl_icmp_echo_enable_probe +u8 sysctl_icmp_echo_ignore_broadcasts +u8 sysctl_icmp_ignore_bogus_error_responses +u8 sysctl_icmp_errors_use_inbound_ifaddr +int sysctl_icmp_ratelimit +int sysctl_icmp_ratemask +u32 ip_rt_min_pmtu +int ip_rt_mtu_expires +int ip_rt_min_advmss +struct_local_ports ip_local_ports +u8 sysctl_tcp_ecn +u8 sysctl_tcp_ecn_fallback +u8 sysctl_ip_default_ttl ip4_dst_hoplimit/ip_select_ttl +u8 sysctl_ip_no_pmtu_disc +u8 sysctl_ip_fwd_use_pmtu read_mostly ip_dst_mtu_maybe_forward/ip_skb_dst_mtu +u8 sysctl_ip_fwd_update_priority ip_forward +u8 sysctl_ip_nonlocal_bind +u8 sysctl_ip_autobind_reuse +u8 sysctl_ip_dynaddr +u8 sysctl_ip_early_demux read_mostly ip(6)_rcv_finish_core +u8 sysctl_raw_l3mdev_accept +u8 sysctl_tcp_early_demux read_mostly ip(6)_rcv_finish_core +u8 sysctl_udp_early_demux +u8 sysctl_nexthop_compat_mode +u8 sysctl_fwmark_reflect +u8 sysctl_tcp_fwmark_accept +u8 sysctl_tcp_l3mdev_accept read_mostly __inet6_lookup_established/inet_request_bound_dev_if +u8 sysctl_tcp_mtu_probing +int sysctl_tcp_mtu_probe_floor +int sysctl_tcp_base_mss +int sysctl_tcp_min_snd_mss read_mostly __tcp_mtu_to_mss(tcp_write_xmit) +int sysctl_tcp_probe_threshold tcp_mtu_probe(tcp_write_xmit) +u32 sysctl_tcp_probe_interval tcp_mtu_check_reprobe(tcp_write_xmit) +int sysctl_tcp_keepalive_time +int sysctl_tcp_keepalive_intvl +u8 sysctl_tcp_keepalive_probes +u8 sysctl_tcp_syn_retries +u8 sysctl_tcp_synack_retries +u8 sysctl_tcp_syncookies generated_on_syn +u8 sysctl_tcp_migrate_req reuseport +u8 sysctl_tcp_comp_sack_nr __tcp_ack_snd_check +int sysctl_tcp_reordering read_mostly tcp_may_raise_cwnd/tcp_cong_control +u8 sysctl_tcp_retries1 +u8 sysctl_tcp_retries2 +u8 sysctl_tcp_orphan_retries +u8 sysctl_tcp_tw_reuse timewait_sock_ops +int sysctl_tcp_fin_timeout TCP_LAST_ACK/tcp_rcv_state_process +unsigned_int sysctl_tcp_notsent_lowat read_mostly tcp_notsent_lowat/tcp_stream_memory_free +u8 sysctl_tcp_sack tcp_syn_options +u8 sysctl_tcp_window_scaling tcp_syn_options,tcp_parse_options +u8 sysctl_tcp_timestamps +u8 sysctl_tcp_early_retrans read_mostly tcp_schedule_loss_probe(tcp_write_xmit) +u8 sysctl_tcp_recovery tcp_fastretrans_alert +u8 sysctl_tcp_thin_linear_timeouts tcp_retrans_timer(on_thin_streams) +u8 sysctl_tcp_slow_start_after_idle unlikely(tcp_cwnd_validate-network-not-starved) +u8 sysctl_tcp_retrans_collapse +u8 sysctl_tcp_stdurg unlikely(tcp_check_urg) +u8 sysctl_tcp_rfc1337 +u8 sysctl_tcp_abort_on_overflow +u8 sysctl_tcp_fack +int sysctl_tcp_max_reordering tcp_check_sack_reordering +int sysctl_tcp_adv_win_scale tcp_init_buffer_space +u8 sysctl_tcp_dsack partial_packet_or_retrans_in_tcp_data_queue +u8 sysctl_tcp_app_win tcp_win_from_space +u8 sysctl_tcp_frto tcp_enter_loss +u8 sysctl_tcp_nometrics_save TCP_LAST_ACK/tcp_update_metrics +u8 sysctl_tcp_no_ssthresh_metrics_save TCP_LAST_ACK/tcp_(update/init)_metrics u8 sysctl_tcp_moderate_rcvbuf read_mostly read_mostly tcp_tso_should_defer(tx);tcp_rcv_space_adjust(rx) -u8 sysctl_tcp_tso_win_divisor read_mostly - tcp_tso_should_defer(tcp_write_xmit) -u8 sysctl_tcp_workaround_signed_windows - - tcp_select_window -int sysctl_tcp_limit_output_bytes read_mostly - tcp_small_queue_check(tcp_write_xmit) -int sysctl_tcp_challenge_ack_limit - - -int sysctl_tcp_min_rtt_wlen read_mostly - tcp_ack_update_rtt -u8 sysctl_tcp_min_tso_segs - - unlikely(icsk_ca_ops-written) -u8 sysctl_tcp_tso_rtt_log read_mostly - tcp_tso_autosize -u8 sysctl_tcp_autocorking read_mostly - tcp_push/tcp_should_autocork -u8 sysctl_tcp_reflect_tos - - tcp_v(4/6)_send_synack -int sysctl_tcp_invalid_ratelimit - - -int sysctl_tcp_pacing_ss_ratio - - default_cong_cont(tcp_update_pacing_rate) -int sysctl_tcp_pacing_ca_ratio - - default_cong_cont(tcp_update_pacing_rate) -int sysctl_tcp_wmem[3] read_mostly - tcp_wmem_schedule(sendmsg/sendpage) -int sysctl_tcp_rmem[3] - read_mostly __tcp_grow_window(tx),tcp_rcv_space_adjust(rx) -unsigned_int sysctl_tcp_child_ehash_entries -unsigned_long sysctl_tcp_comp_sack_delay_ns - - __tcp_ack_snd_check -unsigned_long sysctl_tcp_comp_sack_slack_ns - - __tcp_ack_snd_check -int sysctl_max_syn_backlog - - -int sysctl_tcp_fastopen - - -struct_tcp_congestion_ops tcp_congestion_control - - init_cc -struct_tcp_fastopen_context tcp_fastopen_ctx - - -unsigned_int sysctl_tcp_fastopen_blackhole_timeout - - -atomic_t tfo_active_disable_times - - -unsigned_long tfo_active_disable_stamp - - -u32 tcp_challenge_timestamp - - -u32 tcp_challenge_count - - -u8 sysctl_tcp_plb_enabled - - -u8 sysctl_tcp_plb_idle_rehash_rounds - - -u8 sysctl_tcp_plb_rehash_rounds - - -u8 sysctl_tcp_plb_suspend_rto_sec - - -int sysctl_tcp_plb_cong_thresh - - -int sysctl_udp_wmem_min -int sysctl_udp_rmem_min -u8 sysctl_fib_notify_on_flag_change -u8 sysctl_udp_l3mdev_accept -u8 sysctl_igmp_llm_reports -int sysctl_igmp_max_memberships -int sysctl_igmp_max_msf -int sysctl_igmp_qrv -struct_ping_group_range ping_group_range -atomic_t dev_addr_genid -unsigned_int sysctl_udp_child_hash_entries -unsigned_long* sysctl_local_reserved_ports -int sysctl_ip_prot_sock -struct_mr_table* mrt -struct_list_head mr_tables -struct_fib_rules_ops* mr_rules_ops -u32 sysctl_fib_multipath_hash_fields -u8 sysctl_fib_multipath_use_neigh -u8 sysctl_fib_multipath_hash_policy -struct_fib_notifier_ops* notifier_ops -unsigned_int fib_seq -struct_fib_notifier_ops* ipmr_notifier_ops -unsigned_int ipmr_seq -atomic_t rt_genid -siphash_key_t ip_id_key +u8 sysctl_tcp_tso_win_divisor read_mostly tcp_tso_should_defer(tcp_write_xmit) +u8 sysctl_tcp_workaround_signed_windows tcp_select_window +int sysctl_tcp_limit_output_bytes read_mostly tcp_small_queue_check(tcp_write_xmit) +int sysctl_tcp_challenge_ack_limit +int sysctl_tcp_min_rtt_wlen read_mostly tcp_ack_update_rtt +u8 sysctl_tcp_min_tso_segs unlikely(icsk_ca_ops-written) +u8 sysctl_tcp_tso_rtt_log read_mostly tcp_tso_autosize +u8 sysctl_tcp_autocorking read_mostly tcp_push/tcp_should_autocork +u8 sysctl_tcp_reflect_tos tcp_v(4/6)_send_synack +int sysctl_tcp_invalid_ratelimit +int sysctl_tcp_pacing_ss_ratio default_cong_cont(tcp_update_pacing_rate) +int sysctl_tcp_pacing_ca_ratio default_cong_cont(tcp_update_pacing_rate) +int sysctl_tcp_wmem[3] read_mostly tcp_wmem_schedule(sendmsg/sendpage) +int sysctl_tcp_rmem[3] read_mostly __tcp_grow_window(tx),tcp_rcv_space_adjust(rx) +unsigned_int sysctl_tcp_child_ehash_entries +unsigned_long sysctl_tcp_comp_sack_delay_ns __tcp_ack_snd_check +unsigned_long sysctl_tcp_comp_sack_slack_ns __tcp_ack_snd_check +int sysctl_max_syn_backlog +int sysctl_tcp_fastopen +struct_tcp_congestion_ops tcp_congestion_control init_cc +struct_tcp_fastopen_context tcp_fastopen_ctx +unsigned_int sysctl_tcp_fastopen_blackhole_timeout +atomic_t tfo_active_disable_times +unsigned_long tfo_active_disable_stamp +u32 tcp_challenge_timestamp +u32 tcp_challenge_count +u8 sysctl_tcp_plb_enabled +u8 sysctl_tcp_plb_idle_rehash_rounds +u8 sysctl_tcp_plb_rehash_rounds +u8 sysctl_tcp_plb_suspend_rto_sec +int sysctl_tcp_plb_cong_thresh +int sysctl_udp_wmem_min +int sysctl_udp_rmem_min +u8 sysctl_fib_notify_on_flag_change +u8 sysctl_udp_l3mdev_accept +u8 sysctl_igmp_llm_reports +int sysctl_igmp_max_memberships +int sysctl_igmp_max_msf +int sysctl_igmp_qrv +struct_ping_group_range ping_group_range +atomic_t dev_addr_genid +unsigned_int sysctl_udp_child_hash_entries +unsigned_long* sysctl_local_reserved_ports +int sysctl_ip_prot_sock +struct_mr_table* mrt +struct_list_head mr_tables +struct_fib_rules_ops* mr_rules_ops +u32 sysctl_fib_multipath_hash_fields +u8 sysctl_fib_multipath_use_neigh +u8 sysctl_fib_multipath_hash_policy +struct_fib_notifier_ops* notifier_ops +unsigned_int fib_seq +struct_fib_notifier_ops* ipmr_notifier_ops +unsigned_int ipmr_seq +atomic_t rt_genid +siphash_key_t ip_id_key +=============================== ============================================ =================== =================== ================================================= diff --git a/Documentation/networking/net_cachelines/snmp.rst b/Documentation/networking/net_cachelines/snmp.rst index 6a071538566c..90ca2d92547d 100644 --- a/Documentation/networking/net_cachelines/snmp.rst +++ b/Documentation/networking/net_cachelines/snmp.rst @@ -5,131 +5,133 @@ netns_ipv4 enum fast path usage breakdown =========================================== +============== ===================================== =================== =================== ================================================== Type Name fastpath_tx_access fastpath_rx_access comment -..enum -unsigned_long LINUX_MIB_TCPKEEPALIVE write_mostly - tcp_keepalive_timer -unsigned_long LINUX_MIB_DELAYEDACKS write_mostly - tcp_delack_timer_handler,tcp_delack_timer -unsigned_long LINUX_MIB_DELAYEDACKLOCKED write_mostly - tcp_delack_timer_handler,tcp_delack_timer -unsigned_long LINUX_MIB_TCPAUTOCORKING write_mostly - tcp_push,tcp_sendmsg_locked -unsigned_long LINUX_MIB_TCPFROMZEROWINDOWADV write_mostly - tcp_select_window,tcp_transmit-skb -unsigned_long LINUX_MIB_TCPTOZEROWINDOWADV write_mostly - tcp_select_window,tcp_transmit-skb -unsigned_long LINUX_MIB_TCPWANTZEROWINDOWADV write_mostly - tcp_select_window,tcp_transmit-skb -unsigned_long LINUX_MIB_TCPORIGDATASENT write_mostly - tcp_write_xmit -unsigned_long LINUX_MIB_TCPHPHITS - write_mostly tcp_rcv_established,tcp_v4_do_rcv,tcp_v6_do_rcv -unsigned_long LINUX_MIB_TCPRCVCOALESCE - write_mostly tcp_try_coalesce,tcp_queue_rcv,tcp_rcv_established -unsigned_long LINUX_MIB_TCPPUREACKS - write_mostly tcp_ack,tcp_rcv_established -unsigned_long LINUX_MIB_TCPHPACKS - write_mostly tcp_ack,tcp_rcv_established -unsigned_long LINUX_MIB_TCPDELIVERED - write_mostly tcp_newly_delivered,tcp_ack,tcp_rcv_established -unsigned_long LINUX_MIB_SYNCOOKIESSENT -unsigned_long LINUX_MIB_SYNCOOKIESRECV -unsigned_long LINUX_MIB_SYNCOOKIESFAILED -unsigned_long LINUX_MIB_EMBRYONICRSTS -unsigned_long LINUX_MIB_PRUNECALLED -unsigned_long LINUX_MIB_RCVPRUNED -unsigned_long LINUX_MIB_OFOPRUNED -unsigned_long LINUX_MIB_OUTOFWINDOWICMPS -unsigned_long LINUX_MIB_LOCKDROPPEDICMPS -unsigned_long LINUX_MIB_ARPFILTER -unsigned_long LINUX_MIB_TIMEWAITED -unsigned_long LINUX_MIB_TIMEWAITRECYCLED -unsigned_long LINUX_MIB_TIMEWAITKILLED -unsigned_long LINUX_MIB_PAWSACTIVEREJECTED -unsigned_long LINUX_MIB_PAWSESTABREJECTED -unsigned_long LINUX_MIB_DELAYEDACKLOST -unsigned_long LINUX_MIB_LISTENOVERFLOWS -unsigned_long LINUX_MIB_LISTENDROPS -unsigned_long LINUX_MIB_TCPRENORECOVERY -unsigned_long LINUX_MIB_TCPSACKRECOVERY -unsigned_long LINUX_MIB_TCPSACKRENEGING -unsigned_long LINUX_MIB_TCPSACKREORDER -unsigned_long LINUX_MIB_TCPRENOREORDER -unsigned_long LINUX_MIB_TCPTSREORDER -unsigned_long LINUX_MIB_TCPFULLUNDO -unsigned_long LINUX_MIB_TCPPARTIALUNDO -unsigned_long LINUX_MIB_TCPDSACKUNDO -unsigned_long LINUX_MIB_TCPLOSSUNDO -unsigned_long LINUX_MIB_TCPLOSTRETRANSMIT -unsigned_long LINUX_MIB_TCPRENOFAILURES -unsigned_long LINUX_MIB_TCPSACKFAILURES -unsigned_long LINUX_MIB_TCPLOSSFAILURES -unsigned_long LINUX_MIB_TCPFASTRETRANS -unsigned_long LINUX_MIB_TCPSLOWSTARTRETRANS -unsigned_long LINUX_MIB_TCPTIMEOUTS -unsigned_long LINUX_MIB_TCPLOSSPROBES -unsigned_long LINUX_MIB_TCPLOSSPROBERECOVERY -unsigned_long LINUX_MIB_TCPRENORECOVERYFAIL -unsigned_long LINUX_MIB_TCPSACKRECOVERYFAIL -unsigned_long LINUX_MIB_TCPRCVCOLLAPSED -unsigned_long LINUX_MIB_TCPDSACKOLDSENT -unsigned_long LINUX_MIB_TCPDSACKOFOSENT -unsigned_long LINUX_MIB_TCPDSACKRECV -unsigned_long LINUX_MIB_TCPDSACKOFORECV -unsigned_long LINUX_MIB_TCPABORTONDATA -unsigned_long LINUX_MIB_TCPABORTONCLOSE -unsigned_long LINUX_MIB_TCPABORTONMEMORY -unsigned_long LINUX_MIB_TCPABORTONTIMEOUT -unsigned_long LINUX_MIB_TCPABORTONLINGER -unsigned_long LINUX_MIB_TCPABORTFAILED -unsigned_long LINUX_MIB_TCPMEMORYPRESSURES -unsigned_long LINUX_MIB_TCPMEMORYPRESSURESCHRONO -unsigned_long LINUX_MIB_TCPSACKDISCARD -unsigned_long LINUX_MIB_TCPDSACKIGNOREDOLD -unsigned_long LINUX_MIB_TCPDSACKIGNOREDNOUNDO -unsigned_long LINUX_MIB_TCPSPURIOUSRTOS -unsigned_long LINUX_MIB_TCPMD5NOTFOUND -unsigned_long LINUX_MIB_TCPMD5UNEXPECTED -unsigned_long LINUX_MIB_TCPMD5FAILURE -unsigned_long LINUX_MIB_SACKSHIFTED -unsigned_long LINUX_MIB_SACKMERGED -unsigned_long LINUX_MIB_SACKSHIFTFALLBACK -unsigned_long LINUX_MIB_TCPBACKLOGDROP -unsigned_long LINUX_MIB_PFMEMALLOCDROP -unsigned_long LINUX_MIB_TCPMINTTLDROP -unsigned_long LINUX_MIB_TCPDEFERACCEPTDROP -unsigned_long LINUX_MIB_IPRPFILTER -unsigned_long LINUX_MIB_TCPTIMEWAITOVERFLOW -unsigned_long LINUX_MIB_TCPREQQFULLDOCOOKIES -unsigned_long LINUX_MIB_TCPREQQFULLDROP -unsigned_long LINUX_MIB_TCPRETRANSFAIL -unsigned_long LINUX_MIB_TCPBACKLOGCOALESCE -unsigned_long LINUX_MIB_TCPOFOQUEUE -unsigned_long LINUX_MIB_TCPOFODROP -unsigned_long LINUX_MIB_TCPOFOMERGE -unsigned_long LINUX_MIB_TCPCHALLENGEACK -unsigned_long LINUX_MIB_TCPSYNCHALLENGE -unsigned_long LINUX_MIB_TCPFASTOPENACTIVE -unsigned_long LINUX_MIB_TCPFASTOPENACTIVEFAIL -unsigned_long LINUX_MIB_TCPFASTOPENPASSIVE -unsigned_long LINUX_MIB_TCPFASTOPENPASSIVEFAIL -unsigned_long LINUX_MIB_TCPFASTOPENLISTENOVERFLOW -unsigned_long LINUX_MIB_TCPFASTOPENCOOKIEREQD -unsigned_long LINUX_MIB_TCPFASTOPENBLACKHOLE -unsigned_long LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES -unsigned_long LINUX_MIB_BUSYPOLLRXPACKETS -unsigned_long LINUX_MIB_TCPSYNRETRANS -unsigned_long LINUX_MIB_TCPHYSTARTTRAINDETECT -unsigned_long LINUX_MIB_TCPHYSTARTTRAINCWND -unsigned_long LINUX_MIB_TCPHYSTARTDELAYDETECT -unsigned_long LINUX_MIB_TCPHYSTARTDELAYCWND -unsigned_long LINUX_MIB_TCPACKSKIPPEDSYNRECV -unsigned_long LINUX_MIB_TCPACKSKIPPEDPAWS -unsigned_long LINUX_MIB_TCPACKSKIPPEDSEQ -unsigned_long LINUX_MIB_TCPACKSKIPPEDFINWAIT2 -unsigned_long LINUX_MIB_TCPACKSKIPPEDTIMEWAIT -unsigned_long LINUX_MIB_TCPACKSKIPPEDCHALLENGE -unsigned_long LINUX_MIB_TCPWINPROBE -unsigned_long LINUX_MIB_TCPMTUPFAIL -unsigned_long LINUX_MIB_TCPMTUPSUCCESS -unsigned_long LINUX_MIB_TCPDELIVEREDCE -unsigned_long LINUX_MIB_TCPACKCOMPRESSED -unsigned_long LINUX_MIB_TCPZEROWINDOWDROP -unsigned_long LINUX_MIB_TCPRCVQDROP -unsigned_long LINUX_MIB_TCPWQUEUETOOBIG -unsigned_long LINUX_MIB_TCPFASTOPENPASSIVEALTKEY -unsigned_long LINUX_MIB_TCPTIMEOUTREHASH -unsigned_long LINUX_MIB_TCPDUPLICATEDATAREHASH -unsigned_long LINUX_MIB_TCPDSACKRECVSEGS -unsigned_long LINUX_MIB_TCPDSACKIGNOREDDUBIOUS -unsigned_long LINUX_MIB_TCPMIGRATEREQSUCCESS -unsigned_long LINUX_MIB_TCPMIGRATEREQFAILURE -unsigned_long __LINUX_MIB_MAX +============== ===================================== =================== =================== ================================================== +unsigned_long LINUX_MIB_TCPKEEPALIVE write_mostly tcp_keepalive_timer +unsigned_long LINUX_MIB_DELAYEDACKS write_mostly tcp_delack_timer_handler,tcp_delack_timer +unsigned_long LINUX_MIB_DELAYEDACKLOCKED write_mostly tcp_delack_timer_handler,tcp_delack_timer +unsigned_long LINUX_MIB_TCPAUTOCORKING write_mostly tcp_push,tcp_sendmsg_locked +unsigned_long LINUX_MIB_TCPFROMZEROWINDOWADV write_mostly tcp_select_window,tcp_transmit-skb +unsigned_long LINUX_MIB_TCPTOZEROWINDOWADV write_mostly tcp_select_window,tcp_transmit-skb +unsigned_long LINUX_MIB_TCPWANTZEROWINDOWADV write_mostly tcp_select_window,tcp_transmit-skb +unsigned_long LINUX_MIB_TCPORIGDATASENT write_mostly tcp_write_xmit +unsigned_long LINUX_MIB_TCPHPHITS write_mostly tcp_rcv_established,tcp_v4_do_rcv,tcp_v6_do_rcv +unsigned_long LINUX_MIB_TCPRCVCOALESCE write_mostly tcp_try_coalesce,tcp_queue_rcv,tcp_rcv_established +unsigned_long LINUX_MIB_TCPPUREACKS write_mostly tcp_ack,tcp_rcv_established +unsigned_long LINUX_MIB_TCPHPACKS write_mostly tcp_ack,tcp_rcv_established +unsigned_long LINUX_MIB_TCPDELIVERED write_mostly tcp_newly_delivered,tcp_ack,tcp_rcv_established +unsigned_long LINUX_MIB_SYNCOOKIESSENT +unsigned_long LINUX_MIB_SYNCOOKIESRECV +unsigned_long LINUX_MIB_SYNCOOKIESFAILED +unsigned_long LINUX_MIB_EMBRYONICRSTS +unsigned_long LINUX_MIB_PRUNECALLED +unsigned_long LINUX_MIB_RCVPRUNED +unsigned_long LINUX_MIB_OFOPRUNED +unsigned_long LINUX_MIB_OUTOFWINDOWICMPS +unsigned_long LINUX_MIB_LOCKDROPPEDICMPS +unsigned_long LINUX_MIB_ARPFILTER +unsigned_long LINUX_MIB_TIMEWAITED +unsigned_long LINUX_MIB_TIMEWAITRECYCLED +unsigned_long LINUX_MIB_TIMEWAITKILLED +unsigned_long LINUX_MIB_PAWSACTIVEREJECTED +unsigned_long LINUX_MIB_PAWSESTABREJECTED +unsigned_long LINUX_MIB_DELAYEDACKLOST +unsigned_long LINUX_MIB_LISTENOVERFLOWS +unsigned_long LINUX_MIB_LISTENDROPS +unsigned_long LINUX_MIB_TCPRENORECOVERY +unsigned_long LINUX_MIB_TCPSACKRECOVERY +unsigned_long LINUX_MIB_TCPSACKRENEGING +unsigned_long LINUX_MIB_TCPSACKREORDER +unsigned_long LINUX_MIB_TCPRENOREORDER +unsigned_long LINUX_MIB_TCPTSREORDER +unsigned_long LINUX_MIB_TCPFULLUNDO +unsigned_long LINUX_MIB_TCPPARTIALUNDO +unsigned_long LINUX_MIB_TCPDSACKUNDO +unsigned_long LINUX_MIB_TCPLOSSUNDO +unsigned_long LINUX_MIB_TCPLOSTRETRANSMIT +unsigned_long LINUX_MIB_TCPRENOFAILURES +unsigned_long LINUX_MIB_TCPSACKFAILURES +unsigned_long LINUX_MIB_TCPLOSSFAILURES +unsigned_long LINUX_MIB_TCPFASTRETRANS +unsigned_long LINUX_MIB_TCPSLOWSTARTRETRANS +unsigned_long LINUX_MIB_TCPTIMEOUTS +unsigned_long LINUX_MIB_TCPLOSSPROBES +unsigned_long LINUX_MIB_TCPLOSSPROBERECOVERY +unsigned_long LINUX_MIB_TCPRENORECOVERYFAIL +unsigned_long LINUX_MIB_TCPSACKRECOVERYFAIL +unsigned_long LINUX_MIB_TCPRCVCOLLAPSED +unsigned_long LINUX_MIB_TCPDSACKOLDSENT +unsigned_long LINUX_MIB_TCPDSACKOFOSENT +unsigned_long LINUX_MIB_TCPDSACKRECV +unsigned_long LINUX_MIB_TCPDSACKOFORECV +unsigned_long LINUX_MIB_TCPABORTONDATA +unsigned_long LINUX_MIB_TCPABORTONCLOSE +unsigned_long LINUX_MIB_TCPABORTONMEMORY +unsigned_long LINUX_MIB_TCPABORTONTIMEOUT +unsigned_long LINUX_MIB_TCPABORTONLINGER +unsigned_long LINUX_MIB_TCPABORTFAILED +unsigned_long LINUX_MIB_TCPMEMORYPRESSURES +unsigned_long LINUX_MIB_TCPMEMORYPRESSURESCHRONO +unsigned_long LINUX_MIB_TCPSACKDISCARD +unsigned_long LINUX_MIB_TCPDSACKIGNOREDOLD +unsigned_long LINUX_MIB_TCPDSACKIGNOREDNOUNDO +unsigned_long LINUX_MIB_TCPSPURIOUSRTOS +unsigned_long LINUX_MIB_TCPMD5NOTFOUND +unsigned_long LINUX_MIB_TCPMD5UNEXPECTED +unsigned_long LINUX_MIB_TCPMD5FAILURE +unsigned_long LINUX_MIB_SACKSHIFTED +unsigned_long LINUX_MIB_SACKMERGED +unsigned_long LINUX_MIB_SACKSHIFTFALLBACK +unsigned_long LINUX_MIB_TCPBACKLOGDROP +unsigned_long LINUX_MIB_PFMEMALLOCDROP +unsigned_long LINUX_MIB_TCPMINTTLDROP +unsigned_long LINUX_MIB_TCPDEFERACCEPTDROP +unsigned_long LINUX_MIB_IPRPFILTER +unsigned_long LINUX_MIB_TCPTIMEWAITOVERFLOW +unsigned_long LINUX_MIB_TCPREQQFULLDOCOOKIES +unsigned_long LINUX_MIB_TCPREQQFULLDROP +unsigned_long LINUX_MIB_TCPRETRANSFAIL +unsigned_long LINUX_MIB_TCPBACKLOGCOALESCE +unsigned_long LINUX_MIB_TCPOFOQUEUE +unsigned_long LINUX_MIB_TCPOFODROP +unsigned_long LINUX_MIB_TCPOFOMERGE +unsigned_long LINUX_MIB_TCPCHALLENGEACK +unsigned_long LINUX_MIB_TCPSYNCHALLENGE +unsigned_long LINUX_MIB_TCPFASTOPENACTIVE +unsigned_long LINUX_MIB_TCPFASTOPENACTIVEFAIL +unsigned_long LINUX_MIB_TCPFASTOPENPASSIVE +unsigned_long LINUX_MIB_TCPFASTOPENPASSIVEFAIL +unsigned_long LINUX_MIB_TCPFASTOPENLISTENOVERFLOW +unsigned_long LINUX_MIB_TCPFASTOPENCOOKIEREQD +unsigned_long LINUX_MIB_TCPFASTOPENBLACKHOLE +unsigned_long LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES +unsigned_long LINUX_MIB_BUSYPOLLRXPACKETS +unsigned_long LINUX_MIB_TCPSYNRETRANS +unsigned_long LINUX_MIB_TCPHYSTARTTRAINDETECT +unsigned_long LINUX_MIB_TCPHYSTARTTRAINCWND +unsigned_long LINUX_MIB_TCPHYSTARTDELAYDETECT +unsigned_long LINUX_MIB_TCPHYSTARTDELAYCWND +unsigned_long LINUX_MIB_TCPACKSKIPPEDSYNRECV +unsigned_long LINUX_MIB_TCPACKSKIPPEDPAWS +unsigned_long LINUX_MIB_TCPACKSKIPPEDSEQ +unsigned_long LINUX_MIB_TCPACKSKIPPEDFINWAIT2 +unsigned_long LINUX_MIB_TCPACKSKIPPEDTIMEWAIT +unsigned_long LINUX_MIB_TCPACKSKIPPEDCHALLENGE +unsigned_long LINUX_MIB_TCPWINPROBE +unsigned_long LINUX_MIB_TCPMTUPFAIL +unsigned_long LINUX_MIB_TCPMTUPSUCCESS +unsigned_long LINUX_MIB_TCPDELIVEREDCE +unsigned_long LINUX_MIB_TCPACKCOMPRESSED +unsigned_long LINUX_MIB_TCPZEROWINDOWDROP +unsigned_long LINUX_MIB_TCPRCVQDROP +unsigned_long LINUX_MIB_TCPWQUEUETOOBIG +unsigned_long LINUX_MIB_TCPFASTOPENPASSIVEALTKEY +unsigned_long LINUX_MIB_TCPTIMEOUTREHASH +unsigned_long LINUX_MIB_TCPDUPLICATEDATAREHASH +unsigned_long LINUX_MIB_TCPDSACKRECVSEGS +unsigned_long LINUX_MIB_TCPDSACKIGNOREDDUBIOUS +unsigned_long LINUX_MIB_TCPMIGRATEREQSUCCESS +unsigned_long LINUX_MIB_TCPMIGRATEREQFAILURE +unsigned_long __LINUX_MIB_MAX +============== ===================================== =================== =================== ================================================== diff --git a/Documentation/networking/net_cachelines/tcp_sock.rst b/Documentation/networking/net_cachelines/tcp_sock.rst index 1c154cbd1848..1f79765072b1 100644 --- a/Documentation/networking/net_cachelines/tcp_sock.rst +++ b/Documentation/networking/net_cachelines/tcp_sock.rst @@ -5,153 +5,155 @@ tcp_sock struct fast path usage breakdown ========================================= +============================= ======================= =================== =================== ================================================================================================================================================================================================================== Type Name fastpath_tx_access fastpath_rx_access Comments -..struct ..tcp_sock -struct_inet_connection_sock inet_conn +============================= ======================= =================== =================== ================================================================================================================================================================================================================== +struct inet_connection_sock inet_conn u16 tcp_header_len read_mostly read_mostly tcp_bound_to_half_wnd,tcp_current_mss(tx);tcp_rcv_established(rx) -u16 gso_segs read_mostly - tcp_xmit_size_goal +u16 gso_segs read_mostly tcp_xmit_size_goal __be32 pred_flags read_write read_mostly tcp_select_window(tx);tcp_rcv_established(rx) -u64 bytes_received - read_write tcp_rcv_nxt_update(rx) -u32 segs_in - read_write tcp_v6_rcv(rx) -u32 data_segs_in - read_write tcp_v6_rcv(rx) +u64 bytes_received read_write tcp_rcv_nxt_update(rx) +u32 segs_in read_write tcp_v6_rcv(rx) +u32 data_segs_in read_write tcp_v6_rcv(rx) u32 rcv_nxt read_mostly read_write tcp_cleanup_rbuf,tcp_send_ack,tcp_inq_hint,tcp_transmit_skb,tcp_receive_window(tx);tcp_v6_do_rcv,tcp_rcv_established,tcp_data_queue,tcp_receive_window,tcp_rcv_nxt_update(write)(rx) -u32 copied_seq - read_mostly tcp_cleanup_rbuf,tcp_rcv_space_adjust,tcp_inq_hint -u32 rcv_wup - read_write __tcp_cleanup_rbuf,tcp_receive_window,tcp_receive_established +u32 copied_seq read_mostly tcp_cleanup_rbuf,tcp_rcv_space_adjust,tcp_inq_hint +u32 rcv_wup read_write __tcp_cleanup_rbuf,tcp_receive_window,tcp_receive_established u32 snd_nxt read_write read_mostly tcp_rate_check_app_limited,__tcp_transmit_skb,tcp_event_new_data_sent(write)(tx);tcp_rcv_established,tcp_ack,tcp_clean_rtx_queue(rx) -u32 segs_out read_write - __tcp_transmit_skb -u32 data_segs_out read_write - __tcp_transmit_skb,tcp_update_skb_after_send -u64 bytes_sent read_write - __tcp_transmit_skb -u64 bytes_acked - read_write tcp_snd_una_update/tcp_ack -u32 dsack_dups +u32 segs_out read_write __tcp_transmit_skb +u32 data_segs_out read_write __tcp_transmit_skb,tcp_update_skb_after_send +u64 bytes_sent read_write __tcp_transmit_skb +u64 bytes_acked read_write tcp_snd_una_update/tcp_ack +u32 dsack_dups u32 snd_una read_mostly read_write tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx) -u32 snd_sml read_write - tcp_minshall_check,tcp_minshall_update -u32 rcv_tstamp - read_mostly tcp_ack -u32 lsndtime read_write - tcp_slow_start_after_idle_check,tcp_event_data_sent -u32 last_oow_ack_time -u32 compressed_ack_rcv_nxt +u32 snd_sml read_write tcp_minshall_check,tcp_minshall_update +u32 rcv_tstamp read_mostly tcp_ack +u32 lsndtime read_write tcp_slow_start_after_idle_check,tcp_event_data_sent +u32 last_oow_ack_time +u32 compressed_ack_rcv_nxt u32 tsoffset read_mostly read_mostly tcp_established_options(tx);tcp_fast_parse_options(rx) -struct_list_head tsq_node - - -struct_list_head tsorted_sent_queue read_write - tcp_update_skb_after_send -u32 snd_wl1 - read_mostly tcp_may_update_window +struct list_head tsq_node +struct list_head tsorted_sent_queue read_write tcp_update_skb_after_send +u32 snd_wl1 read_mostly tcp_may_update_window u32 snd_wnd read_mostly read_mostly tcp_wnd_end,tcp_tso_should_defer(tx);tcp_fast_path_on(rx) -u32 max_window read_mostly - tcp_bound_to_half_wnd,forced_push +u32 max_window read_mostly tcp_bound_to_half_wnd,forced_push u32 mss_cache read_mostly read_mostly tcp_rate_check_app_limited,tcp_current_mss,tcp_sync_mss,tcp_sndbuf_expand,tcp_tso_should_defer(tx);tcp_update_pacing_rate,tcp_clean_rtx_queue(rx) u32 window_clamp read_mostly read_write tcp_rcv_space_adjust,__tcp_select_window -u32 rcv_ssthresh read_mostly - __tcp_select_window +u32 rcv_ssthresh read_mostly __tcp_select_window u8 scaling_ratio read_mostly read_mostly tcp_win_from_space -struct tcp_rack -u16 advmss - read_mostly tcp_rcv_space_adjust -u8 compressed_ack -u8:2 dup_ack_counter -u8:1 tlp_retrans +struct tcp_rack +u16 advmss read_mostly tcp_rcv_space_adjust +u8 compressed_ack +u8:2 dup_ack_counter +u8:1 tlp_retrans u8:1 tcp_usec_ts read_mostly read_mostly -u32 chrono_start read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) -u32[3] chrono_stat read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) -u8:2 chrono_type read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) -u8:1 rate_app_limited - read_write tcp_rate_gen -u8:1 fastopen_connect -u8:1 fastopen_no_cookie -u8:1 is_sack_reneg - read_mostly tcp_skb_entail,tcp_ack -u8:2 fastopen_client_fail -u8:4 nonagle read_write - tcp_skb_entail,tcp_push_pending_frames -u8:1 thin_lto -u8:1 recvmsg_inq -u8:1 repair read_mostly - tcp_write_xmit -u8:1 frto -u8 repair_queue - - -u8:2 save_syn -u8:1 syn_data -u8:1 syn_fastopen -u8:1 syn_fastopen_exp -u8:1 syn_fastopen_ch -u8:1 syn_data_acked -u8:1 is_cwnd_limited read_mostly - tcp_cwnd_validate,tcp_is_cwnd_limited -u32 tlp_high_seq - read_mostly tcp_ack -u32 tcp_tx_delay -u64 tcp_wstamp_ns read_write - tcp_pacing_check,tcp_tso_should_defer,tcp_update_skb_after_send +u32 chrono_start read_write tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) +u32[3] chrono_stat read_write tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) +u8:2 chrono_type read_write tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) +u8:1 rate_app_limited read_write tcp_rate_gen +u8:1 fastopen_connect +u8:1 fastopen_no_cookie +u8:1 is_sack_reneg read_mostly tcp_skb_entail,tcp_ack +u8:2 fastopen_client_fail +u8:4 nonagle read_write tcp_skb_entail,tcp_push_pending_frames +u8:1 thin_lto +u8:1 recvmsg_inq +u8:1 repair read_mostly tcp_write_xmit +u8:1 frto +u8 repair_queue +u8:2 save_syn +u8:1 syn_data +u8:1 syn_fastopen +u8:1 syn_fastopen_exp +u8:1 syn_fastopen_ch +u8:1 syn_data_acked +u8:1 is_cwnd_limited read_mostly tcp_cwnd_validate,tcp_is_cwnd_limited +u32 tlp_high_seq read_mostly tcp_ack +u32 tcp_tx_delay +u64 tcp_wstamp_ns read_write tcp_pacing_check,tcp_tso_should_defer,tcp_update_skb_after_send u64 tcp_clock_cache read_write read_write tcp_mstamp_refresh(tcp_write_xmit/tcp_rcv_space_adjust),__tcp_transmit_skb,tcp_tso_should_defer;timer u64 tcp_mstamp read_write read_write tcp_mstamp_refresh(tcp_write_xmit/tcp_rcv_space_adjust)(tx);tcp_rcv_space_adjust,tcp_rate_gen,tcp_clean_rtx_queue,tcp_ack_update_rtt/tcp_time_stamp(rx);timer u32 srtt_us read_mostly read_write tcp_tso_should_defer(tx);tcp_update_pacing_rate,__tcp_set_rto,tcp_rtt_estimator(rx) -u32 mdev_us read_write - tcp_rtt_estimator -u32 mdev_max_us -u32 rttvar_us - read_mostly __tcp_set_rto +u32 mdev_us read_write tcp_rtt_estimator +u32 mdev_max_us +u32 rttvar_us read_mostly __tcp_set_rto u32 rtt_seq read_write tcp_rtt_estimator -struct_minmax rtt_min - read_mostly tcp_min_rtt/tcp_rate_gen,tcp_min_rtttcp_update_rtt_min +struct minmax rtt_min read_mostly tcp_min_rtt/tcp_rate_gen,tcp_min_rtttcp_update_rtt_min u32 packets_out read_write read_write tcp_packets_in_flight(tx/rx);tcp_slow_start_after_idle_check,tcp_nagle_check,tcp_rate_skb_sent,tcp_event_new_data_sent,tcp_cwnd_validate,tcp_write_xmit(tx);tcp_ack,tcp_clean_rtx_queue,tcp_update_pacing_rate(rx) -u32 retrans_out - read_mostly tcp_packets_in_flight,tcp_rate_check_app_limited -u32 max_packets_out - read_write tcp_cwnd_validate -u32 cwnd_usage_seq - read_write tcp_cwnd_validate -u16 urg_data - read_mostly tcp_fast_path_check -u8 ecn_flags read_write - tcp_ecn_send -u8 keepalive_probes -u32 reordering read_mostly - tcp_sndbuf_expand -u32 reord_seen +u32 retrans_out read_mostly tcp_packets_in_flight,tcp_rate_check_app_limited +u32 max_packets_out read_write tcp_cwnd_validate +u32 cwnd_usage_seq read_write tcp_cwnd_validate +u16 urg_data read_mostly tcp_fast_path_check +u8 ecn_flags read_write tcp_ecn_send +u8 keepalive_probes +u32 reordering read_mostly tcp_sndbuf_expand +u32 reord_seen u32 snd_up read_write read_mostly tcp_mark_urg,tcp_urg_mode,__tcp_transmit_skb(tx);tcp_clean_rtx_queue(rx) -struct_tcp_options_received rx_opt read_mostly read_write tcp_established_options(tx);tcp_fast_path_on,tcp_ack_update_window,tcp_is_sack,tcp_data_queue,tcp_rcv_established,tcp_ack_update_rtt(rx) -u32 snd_ssthresh - read_mostly tcp_update_pacing_rate +struct tcp_options_received rx_opt read_mostly read_write tcp_established_options(tx);tcp_fast_path_on,tcp_ack_update_window,tcp_is_sack,tcp_data_queue,tcp_rcv_established,tcp_ack_update_rtt(rx) +u32 snd_ssthresh read_mostly tcp_update_pacing_rate u32 snd_cwnd read_mostly read_mostly tcp_snd_cwnd,tcp_rate_check_app_limited,tcp_tso_should_defer(tx);tcp_update_pacing_rate -u32 snd_cwnd_cnt -u32 snd_cwnd_clamp -u32 snd_cwnd_used -u32 snd_cwnd_stamp -u32 prior_cwnd -u32 prr_delivered +u32 snd_cwnd_cnt +u32 snd_cwnd_clamp +u32 snd_cwnd_used +u32 snd_cwnd_stamp +u32 prior_cwnd +u32 prr_delivered u32 prr_out read_mostly read_mostly tcp_rate_skb_sent,tcp_newly_delivered(tx);tcp_ack,tcp_rate_gen,tcp_clean_rtx_queue(rx) u32 delivered read_mostly read_write tcp_rate_skb_sent, tcp_newly_delivered(tx);tcp_ack, tcp_rate_gen, tcp_clean_rtx_queue (rx) u32 delivered_ce read_mostly read_write tcp_rate_skb_sent(tx);tcp_rate_gen(rx) -u32 lost - read_mostly tcp_ack +u32 lost read_mostly tcp_ack u32 app_limited read_write read_mostly tcp_rate_check_app_limited,tcp_rate_skb_sent(tx);tcp_rate_gen(rx) -u64 first_tx_mstamp read_write - tcp_rate_skb_sent -u64 delivered_mstamp read_write - tcp_rate_skb_sent -u32 rate_delivered - read_mostly tcp_rate_gen -u32 rate_interval_us - read_mostly rate_delivered,rate_app_limited +u64 first_tx_mstamp read_write tcp_rate_skb_sent +u64 delivered_mstamp read_write tcp_rate_skb_sent +u32 rate_delivered read_mostly tcp_rate_gen +u32 rate_interval_us read_mostly rate_delivered,rate_app_limited u32 rcv_wnd read_write read_mostly tcp_select_window,tcp_receive_window,tcp_fast_path_check -u32 write_seq read_write - tcp_rate_check_app_limited,tcp_write_queue_empty,tcp_skb_entail,forced_push,tcp_mark_push -u32 notsent_lowat read_mostly - tcp_stream_memory_free -u32 pushed_seq read_write - tcp_mark_push,forced_push +u32 write_seq read_write tcp_rate_check_app_limited,tcp_write_queue_empty,tcp_skb_entail,forced_push,tcp_mark_push +u32 notsent_lowat read_mostly tcp_stream_memory_free +u32 pushed_seq read_write tcp_mark_push,forced_push u32 lost_out read_mostly read_mostly tcp_left_out(tx);tcp_packets_in_flight(tx/rx);tcp_rate_check_app_limited(rx) u32 sacked_out read_mostly read_mostly tcp_left_out(tx);tcp_packets_in_flight(tx/rx);tcp_clean_rtx_queue(rx) -struct_hrtimer pacing_timer -struct_hrtimer compressed_ack_timer -struct_sk_buff* lost_skb_hint read_mostly tcp_clean_rtx_queue -struct_sk_buff* retransmit_skb_hint read_mostly - tcp_clean_rtx_queue -struct_rb_root out_of_order_queue - read_mostly tcp_data_queue,tcp_fast_path_check -struct_sk_buff* ooo_last_skb -struct_tcp_sack_block[1] duplicate_sack -struct_tcp_sack_block[4] selective_acks -struct_tcp_sack_block[4] recv_sack_cache -struct_sk_buff* highest_sack read_write - tcp_event_new_data_sent -int lost_cnt_hint -u32 prior_ssthresh -u32 high_seq -u32 retrans_stamp -u32 undo_marker -int undo_retrans -u64 bytes_retrans -u32 total_retrans -u32 rto_stamp -u16 total_rto -u16 total_rto_recoveries -u32 total_rto_time -u32 urg_seq - - -unsigned_int keepalive_time -unsigned_int keepalive_intvl -int linger2 -u8 bpf_sock_ops_cb_flags -u8:1 bpf_chg_cc_inprogress -u16 timeout_rehash -u32 rcv_ooopack -u32 rcv_rtt_last_tsecr -struct rcv_rtt_est - read_write tcp_rcv_space_adjust,tcp_rcv_established -struct rcvq_space - read_write tcp_rcv_space_adjust -struct mtu_probe -u32 plb_rehash -u32 mtu_info -bool is_mptcp -bool smc_hs_congested -bool syn_smc -struct_tcp_sock_af_ops* af_specific -struct_tcp_md5sig_info* md5sig_info -struct_tcp_fastopen_request* fastopen_req -struct_request_sock* fastopen_rsk -struct_saved_syn* saved_syn
\ No newline at end of file +struct hrtimer pacing_timer +struct hrtimer compressed_ack_timer +struct sk_buff* lost_skb_hint read_mostly tcp_clean_rtx_queue +struct sk_buff* retransmit_skb_hint read_mostly tcp_clean_rtx_queue +struct rb_root out_of_order_queue read_mostly tcp_data_queue,tcp_fast_path_check +struct sk_buff* ooo_last_skb +struct tcp_sack_block[1] duplicate_sack +struct tcp_sack_block[4] selective_acks +struct tcp_sack_block[4] recv_sack_cache +struct sk_buff* highest_sack read_write tcp_event_new_data_sent +int lost_cnt_hint +u32 prior_ssthresh +u32 high_seq +u32 retrans_stamp +u32 undo_marker +int undo_retrans +u64 bytes_retrans +u32 total_retrans +u32 rto_stamp +u16 total_rto +u16 total_rto_recoveries +u32 total_rto_time +u32 urg_seq +unsigned_int keepalive_time +unsigned_int keepalive_intvl +int linger2 +u8 bpf_sock_ops_cb_flags +u8:1 bpf_chg_cc_inprogress +u16 timeout_rehash +u32 rcv_ooopack +u32 rcv_rtt_last_tsecr +struct rcv_rtt_est read_write tcp_rcv_space_adjust,tcp_rcv_established +struct rcvq_space read_write tcp_rcv_space_adjust +struct mtu_probe +u32 plb_rehash +u32 mtu_info +bool is_mptcp +bool smc_hs_congested +bool syn_smc +struct tcp_sock_af_ops* af_specific +struct tcp_md5sig_info* md5sig_info +struct tcp_fastopen_request* fastopen_req +struct request_sock* fastopen_rsk +struct saved_syn* saved_syn +============================= ======================= =================== =================== ================================================================================================================================================================================================================== diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst index 8199e6917671..b37bfbfc7d79 100644 --- a/Documentation/networking/timestamping.rst +++ b/Documentation/networking/timestamping.rst @@ -194,6 +194,20 @@ SOF_TIMESTAMPING_OPT_ID: among all possibly concurrently outstanding timestamp requests for that socket. + The process can optionally override the default generated ID, by + passing a specific ID with control message SCM_TS_OPT_ID (not + supported for TCP sockets):: + + struct msghdr *msg; + ... + cmsg = CMSG_FIRSTHDR(msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_TS_OPT_ID; + cmsg->cmsg_len = CMSG_LEN(sizeof(__u32)); + *((__u32 *) CMSG_DATA(cmsg)) = opt_id; + err = sendmsg(fd, msg, 0); + + SOF_TIMESTAMPING_OPT_ID_TCP: Pass this modifier along with SOF_TIMESTAMPING_OPT_ID for new TCP timestamping applications. SOF_TIMESTAMPING_OPT_ID defines how the diff --git a/MAINTAINERS b/MAINTAINERS index 6880a8fac74c..f39ab140710f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10248,6 +10248,12 @@ S: Maintained W: http://www.hisilicon.com F: drivers/net/ethernet/hisilicon/hns3/ +HISILICON NETWORK HIBMCGE DRIVER +M: Jijie Shao <shaojijie@huawei.com> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/hisilicon/hibmcge/ + HISILICON NETWORK SUBSYSTEM DRIVER M: Jian Shen <shenjian15@huawei.com> M: Salil Mehta <salil.mehta@huawei.com> @@ -16005,6 +16011,7 @@ F: include/linux/platform_data/wiznet.h F: include/uapi/linux/cn_proc.h F: include/uapi/linux/ethtool_netlink.h F: include/uapi/linux/if_* +F: include/uapi/linux/net_shaper.h F: include/uapi/linux/netdev* F: tools/testing/selftests/drivers/net/ X: Documentation/devicetree/bindings/net/bluetooth/ @@ -16186,7 +16193,7 @@ F: include/net/mptcp.h F: include/trace/events/mptcp.h F: include/uapi/linux/mptcp*.h F: net/mptcp/ -F: tools/testing/selftests/bpf/*/*mptcp*.c +F: tools/testing/selftests/bpf/*/*mptcp*.[ch] F: tools/testing/selftests/net/mptcp/ NETWORKING [TCP] @@ -17367,6 +17374,7 @@ S: Supported F: Documentation/core-api/packing.rst F: include/linux/packing.h F: lib/packing.c +F: lib/packing_test.c PADATA PARALLEL EXECUTION MECHANISM M: Steffen Klassert <steffen.klassert@secunet.com> @@ -18571,6 +18579,13 @@ S: Maintained F: drivers/ptp/ptp_vclock.c F: net/ethtool/phc_vclocks.c +PTP VMCLOCK SUPPORT +M: David Woodhouse <dwmw2@infradead.org> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/ptp/ptp_vmclock.c +F: include/uapi/linux/vmclock-abi.h + PTRACE SUPPORT M: Oleg Nesterov <oleg@redhat.com> S: Maintained @@ -22113,12 +22128,6 @@ S: Maintained F: Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml F: drivers/input/keyboard/sun4i-lradc-keys.c -SUNDANCE NETWORK DRIVER -M: Denis Kirjanov <kda@linux-powerpc.org> -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/ethernet/dlink/sundance.c - SUNPLUS ETHERNET DRIVER M: Wells Lu <wellslutw@gmail.com> L: netdev@vger.kernel.org diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 251b73c5481e..302507bf9b5d 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -146,6 +146,8 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 80 +#define SCM_TS_OPT_ID 81 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 935585d8bb26..8e98c0796437 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -275,7 +275,6 @@ CONFIG_DM9102=m CONFIG_ULI526X=m CONFIG_PCMCIA_XIRCOM=m CONFIG_DL2K=m -CONFIG_SUNDANCE=m CONFIG_PCMCIA_FMVJ18X=m CONFIG_E100=m CONFIG_E1000=m diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 8ab7582291ab..d118d4731580 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -157,6 +157,8 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 80 +#define SCM_TS_OPT_ID 81 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 38fc0b188e08..d268d69bfcd2 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -138,6 +138,8 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 80 +#define SCM_TS_OPT_ID 0x404C + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index c06344db0eb3..4d77e17541e9 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -435,7 +435,6 @@ CONFIG_DM9102=m CONFIG_ULI526X=m CONFIG_PCMCIA_XIRCOM=m CONFIG_DL2K=m -CONFIG_SUNDANCE=m CONFIG_S2IO=m CONFIG_FEC_MPC52xx=m CONFIG_GIANFAR=m diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 57084ed2f3c4..113cd9f353e3 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -139,6 +139,8 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 0x0059 +#define SCM_TS_OPT_ID 0x005a + #if !defined(__KERNEL__) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9920b3a68ed1..1fd5acdc73c6 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -641,6 +641,7 @@ config NETDEVSIM depends on PTP_1588_CLOCK_MOCK || PTP_1588_CLOCK_MOCK=n select NET_DEVLINK select PAGE_POOL + select NET_SHAPER help This driver is a developer testing tool and software model that can be used to test various control path networking APIs, especially diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index e057526448d7..fa2dd76ba3d9 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -317,7 +317,7 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be32 saddr; int err; - if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) return -EINVAL; if (!sock) @@ -387,7 +387,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) return -EINVAL; if (!sock) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b1bffd8e9a95..3928287f5865 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1476,7 +1476,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) slave_disable_netpoll(slave); } -static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int bond_netpoll_setup(struct net_device *dev) { struct bonding *bond = netdev_priv(dev); struct list_head *iter; diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 3a89349dc918..c687360a5b7f 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -370,7 +370,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table); static struct platform_driver b53_mmap_driver = { .probe = b53_mmap_probe, - .remove_new = b53_mmap_remove, + .remove = b53_mmap_remove, .shutdown = b53_mmap_shutdown, .driver = { .name = "b53-switch", diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index f3f95332ff17..b9939bbd2cd5 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -682,7 +682,7 @@ static void b53_srab_shutdown(struct platform_device *pdev) static struct platform_driver b53_srab_driver = { .probe = b53_srab_probe, - .remove_new = b53_srab_remove, + .remove = b53_srab_remove, .shutdown = b53_srab_shutdown, .driver = { .name = "b53-srab-switch", diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 0e663ec0c12a..9201f07839ad 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -513,12 +513,12 @@ static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv) u32 reg; int i; - mask = BIT(priv->num_crossbar_int_ports) - 1; + mask = BIT(priv->num_crossbar_ext_bits) - 1; reg = reg_readl(priv, REG_CROSSBAR); switch (priv->type) { case BCM4908_DEVICE_ID: - shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports; + shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_ext_bits; reg &= ~(mask << shift); if (0) /* FIXME */ reg |= CROSSBAR_BCM4908_EXT_SERDES << shift; @@ -536,7 +536,7 @@ static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv) reg = reg_readl(priv, REG_CROSSBAR); for (i = 0; i < priv->num_crossbar_int_ports; i++) { - shift = i * priv->num_crossbar_int_ports; + shift = i * priv->num_crossbar_ext_bits; dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i, (reg >> shift) & mask); @@ -1260,6 +1260,7 @@ struct bcm_sf2_of_data { unsigned int core_reg_align; unsigned int num_cfp_rules; unsigned int num_crossbar_int_ports; + unsigned int num_crossbar_ext_bits; }; static const u16 bcm_sf2_4908_reg_offsets[] = { @@ -1288,6 +1289,7 @@ static const struct bcm_sf2_of_data bcm_sf2_4908_data = { .reg_offsets = bcm_sf2_4908_reg_offsets, .num_cfp_rules = 256, .num_crossbar_int_ports = 2, + .num_crossbar_ext_bits = 2, }; /* Register offsets for the SWITCH_REG_* block */ @@ -1399,6 +1401,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->core_reg_align = data->core_reg_align; priv->num_cfp_rules = data->num_cfp_rules; priv->num_crossbar_int_ports = data->num_crossbar_int_ports; + priv->num_crossbar_ext_bits = data->num_crossbar_ext_bits; priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev, "switch"); @@ -1620,7 +1623,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, - .remove_new = bcm_sf2_sw_remove, + .remove = bcm_sf2_sw_remove, .shutdown = bcm_sf2_sw_shutdown, .driver = { .name = "brcm-sf2", diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index f95f4880b69e..4fda075a3449 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -75,6 +75,7 @@ struct bcm_sf2_priv { unsigned int core_reg_align; unsigned int num_cfp_rules; unsigned int num_crossbar_int_ports; + unsigned int num_crossbar_ext_bits; /* spinlock protecting access to the indirect registers */ spinlock_t indir_lock; diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index beda1e9d350f..d798f17cf7ea 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -2105,7 +2105,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match); static struct platform_driver hellcreek_driver = { .probe = hellcreek_probe, - .remove_new = hellcreek_remove, + .remove = hellcreek_remove, .shutdown = hellcreek_shutdown, .driver = { .name = "hellcreek", diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index fcd4505f4925..6eb3140d4044 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -2249,7 +2249,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match); static struct platform_driver gswip_driver = { .probe = gswip_probe, - .remove_new = gswip_remove, + .remove = gswip_remove, .shutdown = gswip_shutdown, .driver = { .name = "gswip", diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c index 050f17c43ef6..22fb9ef4645c 100644 --- a/drivers/net/dsa/microchip/ksz_ptp.c +++ b/drivers/net/dsa/microchip/ksz_ptp.c @@ -1106,7 +1106,7 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n) ptpmsg_irq->port = port; ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]); - snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]); + strscpy(ptpmsg_irq->name, name[n]); ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n); if (ptpmsg_irq->num < 0) diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c index 10dc49961f15..5f2db4317dd3 100644 --- a/drivers/net/dsa/mt7530-mmio.c +++ b/drivers/net/dsa/mt7530-mmio.c @@ -86,7 +86,7 @@ static void mt7988_shutdown(struct platform_device *pdev) static struct platform_driver mt7988_platform_driver = { .probe = mt7988_probe, - .remove_new = mt7988_remove, + .remove = mt7988_remove, .shutdown = mt7988_shutdown, .driver = { .name = "mt7530-mmio", diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index e3181d5471df..64ae3882d17c 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -17,3 +17,13 @@ config NET_DSA_MV88E6XXX_PTP help Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch chips that support it. + +config NET_DSA_MV88E6XXX_LEDS + bool "LED support for Marvell 88E6xxx" + default y + depends on NET_DSA_MV88E6XXX + depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_MV88E6XXX + depends on LEDS_TRIGGERS + help + This enabled support for controlling the LEDs attached to the + Marvell 88E6xxx switch chips. diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile index a9a9651187db..dd961081d631 100644 --- a/drivers/net/dsa/mv88e6xxx/Makefile +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -9,6 +9,7 @@ mv88e6xxx-objs += global2.o mv88e6xxx-objs += global2_avb.o mv88e6xxx-objs += global2_scratch.o mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o +mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_LEDS) += leds.o mv88e6xxx-objs += pcs-6185.o mv88e6xxx-objs += pcs-6352.o mv88e6xxx-objs += pcs-639x.o diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 284270a4ade1..4f5193d86e65 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -27,6 +27,7 @@ #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/platform_data/mv88e6xxx.h> +#include <linux/property.h> #include <linux/netdevice.h> #include <linux/gpio/consumer.h> #include <linux/phylink.h> @@ -867,7 +868,7 @@ mv88e6xxx_mac_select_pcs(struct phylink_config *config, { struct dsa_port *dp = dsa_phylink_to_port(config); struct mv88e6xxx_chip *chip = dp->ds->priv; - struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); + struct phylink_pcs *pcs = NULL; if (chip->info->ops->pcs_ops) pcs = chip->info->ops->pcs_ops->pcs_select(chip, dp->index, @@ -1929,36 +1930,9 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, return chip->info->ops->vtu_loadpurge(chip, entry); } -static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip, - const struct mv88e6xxx_vtu_entry *entry, - void *_fid_bitmap) -{ - unsigned long *fid_bitmap = _fid_bitmap; - - set_bit(entry->fid, fid_bitmap); - return 0; -} - -int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap) -{ - bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); - - /* Every FID has an associated VID, so walking the VTU - * will discover the full set of FIDs in use. - */ - return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap); -} - static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) { - DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); - int err; - - err = mv88e6xxx_fid_map(chip, fid_bitmap); - if (err) - return err; - - *fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID); + *fid = find_first_zero_bit(chip->fid_bitmap, MV88E6XXX_N_FID); if (unlikely(*fid >= mv88e6xxx_num_databases(chip))) return -ENOSPC; @@ -2665,6 +2639,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, port, vid); } + /* Record FID used in SW FID map */ + bitmap_set(chip->fid_bitmap, vlan.fid, 1); + return 0; } @@ -2770,6 +2747,9 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip, err = mv88e6xxx_mst_put(chip, vlan.sid); if (err) return err; + + /* Record FID freed in SW FID map */ + bitmap_clear(chip->fid_bitmap, vlan.fid, 1); } return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false); @@ -3371,14 +3351,43 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) { struct device_node *phy_handle = NULL; + struct fwnode_handle *ports_fwnode; + struct fwnode_handle *port_fwnode; struct dsa_switch *ds = chip->ds; + struct mv88e6xxx_port *p; struct dsa_port *dp; int tx_amp; int err; u16 reg; + u32 val; + + p = &chip->ports[port]; + p->chip = chip; + p->port = port; + + /* Look up corresponding fwnode if any */ + ports_fwnode = device_get_named_child_node(chip->dev, "ethernet-ports"); + if (!ports_fwnode) + ports_fwnode = device_get_named_child_node(chip->dev, "ports"); + if (ports_fwnode) { + fwnode_for_each_child_node(ports_fwnode, port_fwnode) { + if (fwnode_property_read_u32(port_fwnode, "reg", &val)) + continue; + if (val == port) { + p->fwnode = port_fwnode; + p->fiber = fwnode_property_present(port_fwnode, "sfp"); + break; + } + } + } else { + dev_dbg(chip->dev, "no ethernet ports node defined for the device\n"); + } - chip->ports[port].chip = chip; - chip->ports[port].port = port; + if (chip->info->ops->port_setup_leds) { + err = chip->info->ops->port_setup_leds(chip, port); + if (err && err != -EOPNOTSUPP) + return err; + } err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED, SPEED_UNFORCED, DUPLEX_UNFORCED, @@ -4597,6 +4606,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_leds = mv88e6xxx_port_setup_leds, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, @@ -4699,6 +4709,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_leds = mv88e6xxx_port_setup_leds, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, @@ -4974,6 +4985,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_leds = mv88e6xxx_port_setup_leds, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, @@ -5396,6 +5408,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_leds = mv88e6xxx_port_setup_leds, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index a54682240839..48399ab5355a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -13,7 +13,9 @@ #include <linux/irq.h> #include <linux/gpio/consumer.h> #include <linux/kthread.h> +#include <linux/leds.h> #include <linux/phy.h> +#include <linux/property.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> #include <net/dsa.h> @@ -276,6 +278,7 @@ struct mv88e6xxx_vlan { struct mv88e6xxx_port { struct mv88e6xxx_chip *chip; int port; + struct fwnode_handle *fwnode; struct mv88e6xxx_vlan bridge_pvid; u64 serdes_stats[2]; u64 atu_member_violation; @@ -290,6 +293,11 @@ struct mv88e6xxx_port { struct devlink_region *region; void *pcs_private; + /* LED related information */ + bool fiber; + struct led_classdev led0; + struct led_classdev led1; + /* MacAuth Bypass control flag */ bool mab; }; @@ -434,6 +442,9 @@ struct mv88e6xxx_chip { /* Bridge MST to SID mappings */ struct list_head msts; + + /* FID map */ + DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); }; struct mv88e6xxx_bus_ops { @@ -574,6 +585,9 @@ struct mv88e6xxx_ops { phy_interface_t mode); int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode); + /* LED control */ + int (*port_setup_leds)(struct mv88e6xxx_chip *chip, int port); + /* Some devices have a per port register indicating what is * the upstream port this port should forward to. */ @@ -830,6 +844,4 @@ int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip, void *priv), void *priv); -int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap); - #endif /* _MV88E6XXX_CHIP_H */ diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c index a08dab75e0c0..795c8df7b6a7 100644 --- a/drivers/net/dsa/mv88e6xxx/devlink.c +++ b/drivers/net/dsa/mv88e6xxx/devlink.c @@ -374,10 +374,9 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl, u8 **data) { struct dsa_switch *ds = dsa_devlink_to_ds(dl); - DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); struct mv88e6xxx_devlink_atu_entry *table; struct mv88e6xxx_chip *chip = ds->priv; - int fid = -1, count, err; + int fid = -1, err = 0, count; table = kmalloc_array(mv88e6xxx_num_databases(chip), sizeof(struct mv88e6xxx_devlink_atu_entry), @@ -392,14 +391,8 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl, mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_fid_map(chip, fid_bitmap); - if (err) { - kfree(table); - goto out; - } - while (1) { - fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1); + fid = find_next_bit(chip->fid_bitmap, MV88E6XXX_N_FID, fid + 1); if (fid == MV88E6XXX_N_FID) break; diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index bcfb4a812055..b524f27a2f0d 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -471,6 +471,9 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip) { int err; + /* As part of the VTU flush, refresh FID map */ + bitmap_zero(chip->fid_bitmap, MV88E6XXX_N_FID); + err = mv88e6xxx_g1_vtu_op_wait(chip); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/leds.c b/drivers/net/dsa/mv88e6xxx/leds.c new file mode 100644 index 000000000000..1c88bfaea46b --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/leds.c @@ -0,0 +1,839 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/bitfield.h> +#include <linux/leds.h> +#include <linux/property.h> + +#include "chip.h" +#include "global2.h" +#include "port.h" + +/* Offset 0x16: LED control */ + +static int mv88e6xxx_port_led_write(struct mv88e6xxx_chip *chip, int port, u16 reg) +{ + reg |= MV88E6XXX_PORT_LED_CONTROL_UPDATE; + + return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_LED_CONTROL, reg); +} + +static int mv88e6xxx_port_led_read(struct mv88e6xxx_chip *chip, int port, + u16 ptr, u16 *val) +{ + int err; + + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_LED_CONTROL, ptr); + if (err) + return err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_LED_CONTROL, val); + *val &= 0x3ff; + + return err; +} + +static int mv88e6xxx_led_brightness_set(struct mv88e6xxx_port *p, int led, + int brightness) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_led_read(p->chip, p->port, + MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL, + ®); + if (err) + return err; + + if (led == 1) + reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK; + else + reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK; + + if (brightness) { + /* Selector 0x0f == Force LED ON */ + if (led == 1) + reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELF; + else + reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELF; + } else { + /* Selector 0x0e == Force LED OFF */ + if (led == 1) + reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELE; + else + reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELE; + } + + reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL; + + return mv88e6xxx_port_led_write(p->chip, p->port, reg); +} + +static int mv88e6xxx_led0_brightness_set_blocking(struct led_classdev *ldev, + enum led_brightness brightness) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0); + int err; + + mv88e6xxx_reg_lock(p->chip); + err = mv88e6xxx_led_brightness_set(p, 0, brightness); + mv88e6xxx_reg_unlock(p->chip); + + return err; +} + +static int mv88e6xxx_led1_brightness_set_blocking(struct led_classdev *ldev, + enum led_brightness brightness) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1); + int err; + + mv88e6xxx_reg_lock(p->chip); + err = mv88e6xxx_led_brightness_set(p, 1, brightness); + mv88e6xxx_reg_unlock(p->chip); + + return err; +} + +struct mv88e6xxx_led_hwconfig { + int led; + u8 portmask; + unsigned long rules; + bool fiber; + bool blink_activity; + u16 selector; +}; + +/* The following is a lookup table to check what rules we can support on a + * certain LED given restrictions such as that some rules only work with fiber + * (SFP) connections and some blink on activity by default. + */ +#define MV88E6XXX_PORTS_0_3 (BIT(0) | BIT(1) | BIT(2) | BIT(3)) +#define MV88E6XXX_PORTS_4_5 (BIT(4) | BIT(5)) +#define MV88E6XXX_PORT_4 BIT(4) +#define MV88E6XXX_PORT_5 BIT(5) + +/* Entries are listed in selector order. + * + * These configurations vary across different switch families, list + * different tables per-family here. + */ +static const struct mv88e6xxx_led_hwconfig mv88e6352_led_hwconfigs[] = { + { + .led = 0, + .portmask = MV88E6XXX_PORT_4, + .rules = BIT(TRIGGER_NETDEV_LINK), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL0, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORT_5, + .rules = BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL0, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_4_5, + .rules = BIT(TRIGGER_NETDEV_LINK_100), + .blink_activity = true, + .fiber = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_4_5, + .rules = BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .fiber = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_4_5, + .rules = BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .fiber = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_4_5, + .rules = BIT(TRIGGER_NETDEV_LINK_100), + .blink_activity = true, + .fiber = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL3, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_1000), + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_4_5, + .rules = BIT(TRIGGER_NETDEV_LINK), + .fiber = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORT_4, + .rules = BIT(TRIGGER_NETDEV_LINK), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL4, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORT_5, + .rules = BIT(TRIGGER_NETDEV_LINK), + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL5, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORT_4, + .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORT_5, + .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL7, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000), + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL7, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK), + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL8, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORT_5, + .rules = BIT(TRIGGER_NETDEV_LINK), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_10), + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL9, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_100), + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL9, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_10), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SELA, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_100), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SELA, + }, + { + .led = 0, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000), + .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SELB, + }, + { + .led = 1, + .portmask = MV88E6XXX_PORTS_0_3, + .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000), + .blink_activity = true, + .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SELB, + }, +}; + +/* mv88e6xxx_led_match_selector() - look up the appropriate LED mode selector + * @p: port state container + * @led: LED number, 0 or 1 + * @blink_activity: blink the LED (usually blink on indicated activity) + * @fiber: the link is connected to fiber such as SFP + * @rules: LED status flags from the LED classdev core + * @selector: fill in the selector in this parameter with an OR operation + */ +static int mv88e6xxx_led_match_selector(struct mv88e6xxx_port *p, int led, bool blink_activity, + bool fiber, unsigned long rules, u16 *selector) +{ + const struct mv88e6xxx_led_hwconfig *conf; + int i; + + /* No rules means we turn the LED off */ + if (!rules) { + if (led == 1) + *selector |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELE; + else + *selector |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELE; + return 0; + } + + /* TODO: these rules are for MV88E6352, when adding other families, + * think about making sure you select the table that match the + * specific switch family. + */ + for (i = 0; i < ARRAY_SIZE(mv88e6352_led_hwconfigs); i++) { + conf = &mv88e6352_led_hwconfigs[i]; + + if (conf->led != led) + continue; + + if (!(conf->portmask & BIT(p->port))) + continue; + + if (conf->blink_activity != blink_activity) + continue; + + if (conf->fiber != fiber) + continue; + + if (conf->rules == rules) { + dev_dbg(p->chip->dev, "port%d LED %d set selector %04x for rules %08lx\n", + p->port, led, conf->selector, rules); + *selector |= conf->selector; + return 0; + } + } + + return -EOPNOTSUPP; +} + +/* mv88e6xxx_led_match_selector() - find Linux netdev rules from a selector value + * @p: port state container + * @selector: the selector value from the LED actity register + * @led: LED number, 0 or 1 + * @rules: Linux netdev activity rules found from selector + */ +static int +mv88e6xxx_led_match_rule(struct mv88e6xxx_port *p, u16 selector, int led, unsigned long *rules) +{ + const struct mv88e6xxx_led_hwconfig *conf; + int i; + + /* Find the selector in the table, we just look for the right selector + * and ignore if the activity has special properties such as blinking + * or is fiber-only. + */ + for (i = 0; i < ARRAY_SIZE(mv88e6352_led_hwconfigs); i++) { + conf = &mv88e6352_led_hwconfigs[i]; + + if (conf->led != led) + continue; + + if (!(conf->portmask & BIT(p->port))) + continue; + + if (conf->selector == selector) { + dev_dbg(p->chip->dev, "port%d LED %d has selector %04x, rules %08lx\n", + p->port, led, selector, conf->rules); + *rules = conf->rules; + return 0; + } + } + + return -EINVAL; +} + +/* mv88e6xxx_led_get_selector() - get the appropriate LED mode selector + * @p: port state container + * @led: LED number, 0 or 1 + * @fiber: the link is connected to fiber such as SFP + * @rules: LED status flags from the LED classdev core + * @selector: fill in the selector in this parameter with an OR operation + */ +static int mv88e6xxx_led_get_selector(struct mv88e6xxx_port *p, int led, + bool fiber, unsigned long rules, u16 *selector) +{ + int err; + + /* What happens here is that we first try to locate a trigger with solid + * indicator (such as LED is on for a 1000 link) else we try a second + * sweep to find something suitable with a trigger that will blink on + * activity. + */ + err = mv88e6xxx_led_match_selector(p, led, false, fiber, rules, selector); + if (err) + return mv88e6xxx_led_match_selector(p, led, true, fiber, rules, selector); + + return 0; +} + +/* Sets up the hardware blinking period */ +static int mv88e6xxx_led_set_blinking_period(struct mv88e6xxx_port *p, int led, + unsigned long delay_on, unsigned long delay_off) +{ + unsigned long period; + u16 reg; + + period = delay_on + delay_off; + + reg = 0; + + switch (period) { + case 21: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_21MS; + break; + case 42: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_42MS; + break; + case 84: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_84MS; + break; + case 168: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_168MS; + break; + case 336: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_336MS; + break; + case 672: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_672MS; + break; + default: + /* Fall back to software blinking */ + return -EINVAL; + } + + /* This is essentially PWM duty cycle: how long time of the period + * will the LED be on. Zero isn't great in most cases. + */ + switch (delay_on) { + case 0: + /* This is usually pretty useless and will make the LED look OFF */ + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_NONE; + break; + case 21: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS; + break; + case 42: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_42MS; + break; + case 84: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_84MS; + break; + case 168: + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_168MS; + break; + default: + /* Just use something non-zero */ + reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS; + break; + } + + /* Set up blink rate */ + reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_STRETCH_BLINK; + + return mv88e6xxx_port_led_write(p->chip, p->port, reg); +} + +static int mv88e6xxx_led_blink_set(struct mv88e6xxx_port *p, int led, + unsigned long *delay_on, unsigned long *delay_off) +{ + u16 reg; + int err; + + /* Choose a sensible default 336 ms (~3 Hz) */ + if ((*delay_on == 0) && (*delay_off == 0)) { + *delay_on = 168; + *delay_off = 168; + } + + /* No off delay is just on */ + if (*delay_off == 0) + return mv88e6xxx_led_brightness_set(p, led, 1); + + err = mv88e6xxx_led_set_blinking_period(p, led, *delay_on, *delay_off); + if (err) + return err; + + err = mv88e6xxx_port_led_read(p->chip, p->port, + MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL, + ®); + if (err) + return err; + + if (led == 1) + reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK; + else + reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK; + + /* This will select the forced blinking status */ + if (led == 1) + reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELD; + else + reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELD; + + reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL; + + return mv88e6xxx_port_led_write(p->chip, p->port, reg); +} + +static int mv88e6xxx_led0_blink_set(struct led_classdev *ldev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0); + int err; + + mv88e6xxx_reg_lock(p->chip); + err = mv88e6xxx_led_blink_set(p, 0, delay_on, delay_off); + mv88e6xxx_reg_unlock(p->chip); + + return err; +} + +static int mv88e6xxx_led1_blink_set(struct led_classdev *ldev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1); + int err; + + mv88e6xxx_reg_lock(p->chip); + err = mv88e6xxx_led_blink_set(p, 1, delay_on, delay_off); + mv88e6xxx_reg_unlock(p->chip); + + return err; +} + +static int +mv88e6xxx_led0_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0); + u16 selector = 0; + + return mv88e6xxx_led_get_selector(p, 0, p->fiber, rules, &selector); +} + +static int +mv88e6xxx_led1_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1); + u16 selector = 0; + + return mv88e6xxx_led_get_selector(p, 1, p->fiber, rules, &selector); +} + +static int mv88e6xxx_led_hw_control_set(struct mv88e6xxx_port *p, + int led, unsigned long rules) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_led_read(p->chip, p->port, + MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL, + ®); + if (err) + return err; + + if (led == 1) + reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK; + else + reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK; + + err = mv88e6xxx_led_get_selector(p, led, p->fiber, rules, ®); + if (err) + return err; + + reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL; + + if (led == 0) + dev_dbg(p->chip->dev, "LED 0 hw control on port %d trigger selector 0x%02x\n", + p->port, + (unsigned int)(reg & MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK)); + else + dev_dbg(p->chip->dev, "LED 1 hw control on port %d trigger selector 0x%02x\n", + p->port, + (unsigned int)(reg & MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK) >> 4); + + return mv88e6xxx_port_led_write(p->chip, p->port, reg); +} + +static int +mv88e6xxx_led_hw_control_get(struct mv88e6xxx_port *p, int led, unsigned long *rules) +{ + u16 val; + int err; + + mv88e6xxx_reg_lock(p->chip); + err = mv88e6xxx_port_led_read(p->chip, p->port, + MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL, &val); + mv88e6xxx_reg_unlock(p->chip); + if (err) + return err; + + /* Mask out the selector bits for this port */ + if (led == 1) { + val &= MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK; + /* It's forced blinking/OFF/ON */ + if (val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELD || + val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELE || + val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELF) { + *rules = 0; + return 0; + } + } else { + val &= MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK; + /* It's forced blinking/OFF/ON */ + if (val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELD || + val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELE || + val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELF) { + *rules = 0; + return 0; + } + } + + err = mv88e6xxx_led_match_rule(p, val, led, rules); + if (!err) + return 0; + + dev_dbg(p->chip->dev, "couldn't find matching selector for %04x\n", val); + *rules = 0; + return 0; +} + +static int +mv88e6xxx_led0_hw_control_set(struct led_classdev *ldev, unsigned long rules) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0); + int err; + + mv88e6xxx_reg_lock(p->chip); + err = mv88e6xxx_led_hw_control_set(p, 0, rules); + mv88e6xxx_reg_unlock(p->chip); + + return err; +} + +static int +mv88e6xxx_led1_hw_control_set(struct led_classdev *ldev, unsigned long rules) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1); + int err; + + mv88e6xxx_reg_lock(p->chip); + err = mv88e6xxx_led_hw_control_set(p, 1, rules); + mv88e6xxx_reg_unlock(p->chip); + + return err; +} + +static int +mv88e6xxx_led0_hw_control_get(struct led_classdev *ldev, unsigned long *rules) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0); + + return mv88e6xxx_led_hw_control_get(p, 0, rules); +} + +static int +mv88e6xxx_led1_hw_control_get(struct led_classdev *ldev, unsigned long *rules) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1); + + return mv88e6xxx_led_hw_control_get(p, 1, rules); +} + +static struct device *mv88e6xxx_led_hw_control_get_device(struct mv88e6xxx_port *p) +{ + struct dsa_port *dp; + + dp = dsa_to_port(p->chip->ds, p->port); + if (!dp) + return NULL; + if (dp->user) + return &dp->user->dev; + return NULL; +} + +static struct device * +mv88e6xxx_led0_hw_control_get_device(struct led_classdev *ldev) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0); + + return mv88e6xxx_led_hw_control_get_device(p); +} + +static struct device * +mv88e6xxx_led1_hw_control_get_device(struct led_classdev *ldev) +{ + struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1); + + return mv88e6xxx_led_hw_control_get_device(p); +} + +int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port) +{ + struct fwnode_handle *led = NULL, *leds = NULL; + struct led_init_data init_data = { }; + enum led_default_state state; + struct mv88e6xxx_port *p; + struct led_classdev *l; + struct device *dev; + u32 led_num; + int ret; + + /* LEDs are on ports 1,2,3,4, 5 and 6 (index 0..5), no more */ + if (port > 5) + return -EOPNOTSUPP; + + p = &chip->ports[port]; + if (!p->fwnode) + return 0; + + dev = chip->dev; + + leds = fwnode_get_named_child_node(p->fwnode, "leds"); + if (!leds) { + dev_dbg(dev, "No Leds node specified in device tree for port %d!\n", + port); + return 0; + } + + fwnode_for_each_child_node(leds, led) { + /* Reg represent the led number of the port, max 2 + * LEDs can be connected to each port, in some designs + * only one LED is connected. + */ + if (fwnode_property_read_u32(led, "reg", &led_num)) + continue; + if (led_num > 1) { + dev_err(dev, "invalid LED specified port %d\n", port); + return -EINVAL; + } + + if (led_num == 0) + l = &p->led0; + else + l = &p->led1; + + state = led_init_default_state_get(led); + switch (state) { + case LEDS_DEFSTATE_ON: + l->brightness = 1; + mv88e6xxx_led_brightness_set(p, led_num, 1); + break; + case LEDS_DEFSTATE_KEEP: + break; + default: + l->brightness = 0; + mv88e6xxx_led_brightness_set(p, led_num, 0); + } + + l->max_brightness = 1; + if (led_num == 0) { + l->brightness_set_blocking = mv88e6xxx_led0_brightness_set_blocking; + l->blink_set = mv88e6xxx_led0_blink_set; + l->hw_control_is_supported = mv88e6xxx_led0_hw_control_is_supported; + l->hw_control_set = mv88e6xxx_led0_hw_control_set; + l->hw_control_get = mv88e6xxx_led0_hw_control_get; + l->hw_control_get_device = mv88e6xxx_led0_hw_control_get_device; + } else { + l->brightness_set_blocking = mv88e6xxx_led1_brightness_set_blocking; + l->blink_set = mv88e6xxx_led1_blink_set; + l->hw_control_is_supported = mv88e6xxx_led1_hw_control_is_supported; + l->hw_control_set = mv88e6xxx_led1_hw_control_set; + l->hw_control_get = mv88e6xxx_led1_hw_control_get; + l->hw_control_get_device = mv88e6xxx_led1_hw_control_get_device; + } + l->hw_control_trigger = "netdev"; + + init_data.default_label = ":port"; + init_data.fwnode = led; + init_data.devname_mandatory = true; + init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d:0%d", chip->info->name, + port, led_num); + if (!init_data.devicename) + return -ENOMEM; + + ret = devm_led_classdev_register_ext(dev, l, &init_data); + kfree(init_data.devicename); + + if (ret) { + dev_err(dev, "Failed to init LED %d for port %d", led_num, port); + return ret; + } + } + + return 0; +} diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 04053fdc6489..dc777ddce1f3 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -12,6 +12,7 @@ #include <linux/if_bridge.h> #include <linux/phy.h> #include <linux/phylink.h> +#include <linux/property.h> #include "chip.h" #include "global2.h" diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index ddadeb9bfdae..c1d2f99efb1c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -309,6 +309,130 @@ /* Offset 0x13: OutFiltered Counter */ #define MV88E6XXX_PORT_OUT_FILTERED 0x13 +/* Offset 0x16: LED Control */ +#define MV88E6XXX_PORT_LED_CONTROL 0x16 +#define MV88E6XXX_PORT_LED_CONTROL_UPDATE BIT(15) +#define MV88E6XXX_PORT_LED_CONTROL_POINTER_MASK GENMASK(14, 12) +#define MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL (0x00 << 12) /* Control for LED 0 and 1 */ +#define MV88E6XXX_PORT_LED_CONTROL_POINTER_STRETCH_BLINK (0x06 << 12) /* Stetch and Blink Rate */ +#define MV88E6XXX_PORT_LED_CONTROL_POINTER_CNTL_SPECIAL (0x07 << 12) /* Control for the Port's Special LED */ +#define MV88E6XXX_PORT_LED_CONTROL_DATA_MASK GENMASK(10, 0) +/* Selection masks valid for either port 1,2,3,4 or 5 */ +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK GENMASK(3, 0) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK GENMASK(7, 4) +/* Selection control for LED 0 and 1, ports 5 and 6 only has LED 0 + * Bits Function + * 0..3 LED 0 control selector on ports 1-5 + * 4..7 LED 1 control selector on ports 1-4 on port 5 this controls LED 0 of port 6 + * + * Sel Port LED Function for the 6352 family: + * 0 1-4 0 Link/Act/Speed by Blink Rate (off=no link, on=link, blink=activity, blink speed=link speed) + * 1-4 1 Port 2's Special LED + * 5-6 0 Port 5 Link/Act (off=no link, on=link, blink=activity) + * 5-6 1 Port 6 Link/Act (off=no link, on=link 1000, blink=activity) + * 1 1-4 0 100/1000 Link/Act (off=no link, on=100 or 1000 link, blink=activity) + * 1-4 1 10/100 Link Act (off=no link, on=10 or 100 link, blink=activity) + * 5-6 0 Fiber 100 Link/Act (off=no link, on=link 100, blink=activity) + * 5-6 1 Fiber 1000 Link/Act (off=no link, on=link 1000, blink=activity) + * 2 1-4 0 1000 Link/Act (off=no link, on=link 1000, blink=activity) + * 1-4 1 10/100 Link/Act (off=no link, on=10 or 100 link, blink=activity) + * 5-6 0 Fiber 1000 Link/Act (off=no link, on=link 1000, blink=activity) + * 5-6 1 Fiber 100 Link/Act (off=no link, on=link 100, blink=activity) + * 3 1-4 0 Link/Act (off=no link, on=link, blink=activity) + * 1-4 1 1000 Link (off=no link, on=1000 link) + * 5-6 0 Port 0's Special LED + * 5-6 1 Fiber Link (off=no link, on=link) + * 4 1-4 0 Port 0's Special LED + * 1-4 1 Port 1's Special LED + * 5-6 0 Port 1's Special LED + * 5-6 1 Port 5 Link/Act (off=no link, on=link, blink=activity) + * 5 1-4 0 Reserved + * 1-4 1 Reserved + * 5-6 0 Port 2's Special LED + * 5-6 1 Port 6 Link (off=no link, on=link) + * 6 1-4 0 Duplex/Collision (off=half-duplex,on=full-duplex,blink=collision) + * 1-4 1 10/1000 Link/Act (off=no link, on=10 or 1000 link, blink=activity) + * 5-6 0 Port 5 Duplex/Collision (off=half-duplex, on=full-duplex, blink=col) + * 5-6 1 Port 6 Duplex/Collision (off=half-duplex, on=full-duplex, blink=col) + * 7 1-4 0 10/1000 Link/Act (off=no link, on=10 or 1000 link, blink=activity) + * 1-4 1 10/1000 Link (off=no link, on=10 or 1000 link) + * 5-6 0 Port 5 Link/Act/Speed by Blink rate (off=no link, on=link, blink=activity, blink speed=link speed) + * 5-6 1 Port 6 Link/Act/Speed by Blink rate (off=no link, on=link, blink=activity, blink speed=link speed) + * 8 1-4 0 Link (off=no link, on=link) + * 1-4 1 Activity (off=no link, blink on=activity) + * 5-6 0 Port 6 Link/Act (off=no link, on=link, blink=activity) + * 5-6 1 Port 0's Special LED + * 9 1-4 0 10 Link (off=no link, on=10 link) + * 1-4 1 100 Link (off=no link, on=100 link) + * 5-6 0 Reserved + * 5-6 1 Port 1's Special LED + * a 1-4 0 10 Link/Act (off=no link, on=10 link, blink=activity) + * 1-4 1 100 Link/Act (off=no link, on=100 link, blink=activity) + * 5-6 0 Reserved + * 5-6 1 Port 2's Special LED + * b 1-4 0 100/1000 Link (off=no link, on=100 or 1000 link) + * 1-4 1 10/100 Link (off=no link, on=100 link, blink=activity) + * 5-6 0 Reserved + * 5-6 1 Reserved + * c * * PTP Act (blink on=PTP activity) + * d * * Force Blink + * e * * Force Off + * f * * Force On + */ +/* Select LED0 output */ +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL0 0x0 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1 0x1 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2 0x2 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL3 0x3 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL4 0x4 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL5 0x5 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6 0x6 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL7 0x7 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8 0x8 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL9 0x9 +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELA 0xa +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELB 0xb +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELC 0xc +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELD 0xd +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELE 0xe +#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELF 0xf +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL0 (0x0 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1 (0x1 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2 (0x2 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3 (0x3 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL4 (0x4 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL5 (0x5 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6 (0x6 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL7 (0x7 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL8 (0x8 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL9 (0x9 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELA (0xa << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELB (0xb << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELC (0xc << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELD (0xd << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELE (0xe << 4) +#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELF (0xf << 4) +/* Stretch and Blink Rate Control (Index 0x06 of LED Control) */ +/* Pulse Stretch Selection for all LED's on this port */ +#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_NONE (0 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS (1 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_42MS (2 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_84MS (3 << 4) +#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_168MS (4 << 4) +/* Blink Rate Selection for all LEDs on this port */ +#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_21MS 0 +#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_42MS 1 +#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_84MS 2 +#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_168MS 3 +#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_336MS 4 +#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_672MS 5 + /* Control for Special LED (Index 0x7 of LED Control on Port0) */ +#define MV88E6XXX_PORT_LED_CONTROL_0x07_P0_LAN_LINKACT_SHIFT 0 /* bits 6:0 LAN Link Activity LED */ +/* Control for Special LED (Index 0x7 of LED Control on Port 1) */ +#define MV88E6XXX_PORT_LED_CONTROL_0x07_P1_WAN_LINKACT_SHIFT 0 /* bits 6:0 WAN Link Activity LED */ +/* Control for Special LED (Index 0x7 of LED Control on Port 2) */ +#define MV88E6XXX_PORT_LED_CONTROL_0x07_P2_PTP_ACT 0 /* bits 6:0 PTP Activity */ + /* Offset 0x18: IEEE Priority Mapping Table */ #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18 #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000 @@ -457,6 +581,15 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); +#ifdef CONFIG_NET_DSA_MV88E6XXX_LEDS +int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port); +#else +static inline int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, + int port) +{ + return 0; +} +#endif int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port, bool drop_untagged); int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map); diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c index 5632a7248cd4..450bda18ef37 100644 --- a/drivers/net/dsa/ocelot/ocelot_ext.c +++ b/drivers/net/dsa/ocelot/ocelot_ext.c @@ -102,7 +102,7 @@ static struct platform_driver ocelot_ext_switch_driver = { .of_match_table = ocelot_ext_switch_of_match, }, .probe = ocelot_ext_probe, - .remove_new = ocelot_ext_remove, + .remove = ocelot_ext_remove, .shutdown = ocelot_ext_shutdown, }; module_platform_driver(ocelot_ext_switch_driver); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 70782649c395..eb3944ba2a72 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -1014,7 +1014,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match); static struct platform_driver seville_vsc9953_driver = { .probe = seville_probe, - .remove_new = seville_remove, + .remove = seville_remove, .shutdown = seville_shutdown, .driver = { .name = "mscc_seville", diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c index 04b758e5a680..5f545dda702b 100644 --- a/drivers/net/dsa/realtek/realtek-mdio.c +++ b/drivers/net/dsa/realtek/realtek-mdio.c @@ -146,7 +146,7 @@ EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, REALTEK_DSA); * realtek_mdio_remove() - Remove the driver of an MDIO-connected switch * @mdiodev: mdio_device to be removed. * - * This function should be used as the .remove_new in an mdio_driver. First + * This function should be used as the .remove in an mdio_driver. First * it unregisters the DSA switch and then it calls the common remove function. * * Context: Can sleep. diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c index 88590ae95a75..d750bddf27b4 100644 --- a/drivers/net/dsa/realtek/realtek-smi.c +++ b/drivers/net/dsa/realtek/realtek-smi.c @@ -367,7 +367,7 @@ EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, REALTEK_DSA); * realtek_smi_remove() - Remove the driver of a SMI-connected switch * @pdev: platform_device to be removed. * - * This function should be used as the .remove_new in a platform_driver. First + * This function should be used as the .remove in a platform_driver. First * it unregisters the DSA switch and then it calls the common remove function. * * Context: Can sleep. diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index ad7044b295ec..6b9dbdb00941 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -2164,7 +2164,7 @@ static struct platform_driver rtl8365mb_smi_driver = { .of_match_table = rtl8365mb_of_match, }, .probe = realtek_smi_probe, - .remove_new = realtek_smi_remove, + .remove = realtek_smi_remove, .shutdown = realtek_smi_shutdown, }; diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index c7a8cd060587..6ba03f81c882 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -2102,7 +2102,7 @@ static struct platform_driver rtl8366rb_smi_driver = { .of_match_table = rtl8366rb_of_match, }, .probe = realtek_smi_probe, - .remove_new = realtek_smi_remove, + .remove = realtek_smi_remove, .shutdown = realtek_smi_shutdown, }; diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 92e032972b34..1135a32e4b7e 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -1324,7 +1324,7 @@ static struct platform_driver a5psw_driver = { .of_match_table = a5psw_of_mtable, }, .probe = a5psw_probe, - .remove_new = a5psw_remove, + .remove = a5psw_remove, .shutdown = a5psw_shutdown, }; module_platform_driver(a5psw_driver); diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 8c66d3bf61f0..dceb96ae9c83 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -278,7 +278,7 @@ struct sja1105_private { struct mii_bus *mdio_base_t1; struct mii_bus *mdio_base_tx; struct mii_bus *mdio_pcs; - struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS]; + struct phylink_pcs *pcs[SJA1105_MAX_NUM_PORTS]; struct sja1105_ptp_data ptp_data; struct sja1105_tas_data tas_data; }; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index d0563ef59acf..f8454f3b6f9c 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -15,7 +15,6 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/pcs/pcs-xpcs.h> #include <linux/netdev_features.h> #include <linux/netdevice.h> #include <linux/if_bridge.h> @@ -1257,29 +1256,11 @@ static int sja1105_parse_dt(struct sja1105_private *priv) return rc; } -/* Convert link speed from SJA1105 to ethtool encoding */ -static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv, - u64 speed) -{ - if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) - return SPEED_10; - if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) - return SPEED_100; - if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) - return SPEED_1000; - if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS]) - return SPEED_2500; - return SPEED_UNKNOWN; -} - -/* Set link speed in the MAC configuration for a specific port. */ -static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, - int speed_mbps) +static int sja1105_set_port_speed(struct sja1105_private *priv, int port, + int speed_mbps) { struct sja1105_mac_config_entry *mac; - struct device *dev = priv->ds->dev; u64 speed; - int rc; /* On P/Q/R/S, one can read from the device via the MAC reconfiguration * tables. On E/T, MAC reconfig tables are not readable, only writable. @@ -1313,7 +1294,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; break; default: - dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); + dev_err(priv->ds->dev, "Invalid speed %iMbps\n", speed_mbps); return -EINVAL; } @@ -1325,11 +1306,31 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, * we need to configure the PCS only (if even that). */ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII) - mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; + speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX) - mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; - else - mac[port].speed = speed; + speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; + + mac[port].speed = speed; + + return 0; +} + +/* Write the MAC Configuration Table entry and, if necessary, the CGU settings, + * after a link speedchange for this port. + */ +static int sja1105_set_port_config(struct sja1105_private *priv, int port) +{ + struct sja1105_mac_config_entry *mac; + struct device *dev = priv->ds->dev; + int rc; + + /* On P/Q/R/S, one can read from the device via the MAC reconfiguration + * tables. On E/T, MAC reconfig tables are not readable, only writable. + * We have to *know* what the MAC looks like. For the sake of keeping + * the code common, we'll use the static configuration tables as a + * reasonable approximation for both E/T and P/Q/R/S. + */ + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; /* Write to the dynamic reconfiguration tables */ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, @@ -1356,12 +1357,8 @@ sja1105_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) { struct dsa_port *dp = dsa_phylink_to_port(config); struct sja1105_private *priv = dp->ds->priv; - struct dw_xpcs *xpcs = priv->xpcs[dp->index]; - - if (xpcs) - return &xpcs->pcs; - return NULL; + return priv->pcs[dp->index]; } static void sja1105_mac_config(struct phylink_config *config, @@ -1390,7 +1387,8 @@ static void sja1105_mac_link_up(struct phylink_config *config, struct sja1105_private *priv = dp->ds->priv; int port = dp->index; - sja1105_adjust_port_config(priv, port, speed); + if (!sja1105_set_port_speed(priv, port, speed)) + sja1105_set_port_config(priv, port); sja1105_inhibit_tx(priv, BIT(port), false); } @@ -2293,8 +2291,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv, { struct ptp_system_timestamp ptp_sts_before; struct ptp_system_timestamp ptp_sts_after; - int speed_mbps[SJA1105_MAX_NUM_PORTS]; u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0}; + u64 mac_speed[SJA1105_MAX_NUM_PORTS]; struct sja1105_mac_config_entry *mac; struct dsa_switch *ds = priv->ds; s64 t1, t2, t3, t4; @@ -2307,17 +2305,16 @@ int sja1105_static_config_reload(struct sja1105_private *priv, mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; - /* Back up the dynamic link speed changed by sja1105_adjust_port_config + /* Back up the dynamic link speed changed by sja1105_set_port_speed() * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the * switch wants to see in the static config in order to allow us to * change it through the dynamic interface later. */ for (i = 0; i < ds->num_ports; i++) { - speed_mbps[i] = sja1105_port_speed_to_ethtool(priv, - mac[i].speed); + mac_speed[i] = mac[i].speed; mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; - if (priv->xpcs[i]) + if (priv->pcs[i]) bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, MDIO_MMD_VEND2, MDIO_CTRL1); } @@ -2374,14 +2371,15 @@ int sja1105_static_config_reload(struct sja1105_private *priv, } for (i = 0; i < ds->num_ports; i++) { - struct dw_xpcs *xpcs = priv->xpcs[i]; + struct phylink_pcs *pcs = priv->pcs[i]; unsigned int neg_mode; - rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); + mac[i].speed = mac_speed[i]; + rc = sja1105_set_port_config(priv, i); if (rc < 0) goto out; - if (!xpcs) + if (!pcs) continue; if (bmcr[i] & BMCR_ANENABLE) @@ -2389,7 +2387,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv, else neg_mode = PHYLINK_PCS_NEG_OUTBAND; - rc = xpcs_do_config(xpcs, priv->phy_mode[i], NULL, neg_mode); + rc = pcs->ops->pcs_config(pcs, neg_mode, priv->phy_mode[i], + NULL, true); if (rc < 0) goto out; @@ -2405,8 +2404,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv, else speed = SPEED_10; - xpcs_link_up(&xpcs->pcs, neg_mode, priv->phy_mode[i], - speed, DUPLEX_FULL); + pcs->ops->pcs_link_up(pcs, neg_mode, priv->phy_mode[i], + speed, DUPLEX_FULL); } } diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c index 52ddb4ef259e..84b7169f2974 100644 --- a/drivers/net/dsa/sja1105/sja1105_mdio.c +++ b/drivers/net/dsa/sja1105/sja1105_mdio.c @@ -400,7 +400,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv) } for (port = 0; port < ds->num_ports; port++) { - struct dw_xpcs *xpcs; + struct phylink_pcs *pcs; if (dsa_is_unused_port(ds, port)) continue; @@ -409,13 +409,13 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv) priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX) continue; - xpcs = xpcs_create_mdiodev(bus, port, priv->phy_mode[port]); - if (IS_ERR(xpcs)) { - rc = PTR_ERR(xpcs); + pcs = xpcs_create_pcs_mdiodev(bus, port); + if (IS_ERR(pcs)) { + rc = PTR_ERR(pcs); goto out_pcs_free; } - priv->xpcs[port] = xpcs; + priv->pcs[port] = pcs; } priv->mdio_pcs = bus; @@ -424,11 +424,10 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv) out_pcs_free: for (port = 0; port < ds->num_ports; port++) { - if (!priv->xpcs[port]) - continue; - - xpcs_destroy(priv->xpcs[port]); - priv->xpcs[port] = NULL; + if (priv->pcs[port]) { + xpcs_destroy_pcs(priv->pcs[port]); + priv->pcs[port] = NULL; + } } mdiobus_unregister(bus); @@ -446,11 +445,10 @@ static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv) return; for (port = 0; port < ds->num_ports; port++) { - if (!priv->xpcs[port]) - continue; - - xpcs_destroy(priv->xpcs[port]); - priv->xpcs[port] = NULL; + if (priv->pcs[port]) { + xpcs_destroy_pcs(priv->pcs[port]); + priv->pcs[port] = NULL; + } } mdiobus_unregister(priv->mdio_pcs); diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c index 755b7895a15a..7a2e0a619b85 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-platform.c +++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c @@ -158,7 +158,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match); static struct platform_driver vsc73xx_platform_driver = { .probe = vsc73xx_platform_probe, - .remove_new = vsc73xx_platform_remove, + .remove = vsc73xx_platform_remove, .shutdown = vsc73xx_platform_shutdown, .driver = { .name = "vsc73xx-platform", diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 2874680ef24d..e1695d0fbd8b 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -1009,7 +1009,7 @@ static struct platform_driver axdrv = { .name = "ax88796", }, .probe = ax_probe, - .remove_new = ax_remove, + .remove = ax_remove, .suspend = ax_suspend, .resume = ax_resume, }; diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 5a0fa995e643..94ff8364cdf0 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -457,7 +457,7 @@ static struct platform_driver mcf8390_drv = { .name = "mcf8390", }, .probe = mcf8390_probe, - .remove_new = mcf8390_remove, + .remove = mcf8390_remove, }; module_platform_driver(mcf8390_drv); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 350683a09d2e..961019c32842 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -894,7 +894,7 @@ static int ne_drv_resume(struct platform_device *pdev) #endif static struct platform_driver ne_driver = { - .remove_new = ne_drv_remove, + .remove = ne_drv_remove, .suspend = ne_drv_suspend, .resume = ne_drv_resume, .driver = { diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index e03193da5874..115f48b3342c 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -1607,7 +1607,7 @@ static struct platform_driver owl_emac_driver = { .pm = &owl_emac_pm_ops, }, .probe = owl_emac_probe, - .remove_new = owl_emac_remove, + .remove = owl_emac_remove, }; module_platform_driver(owl_emac_driver); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index adf6f67c5fcb..a593adc16c78 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1565,7 +1565,7 @@ static struct platform_driver greth_of_driver = { .of_match_table = greth_of_match, }, .probe = greth_of_probe, - .remove_new = greth_of_remove, + .remove = greth_of_remove, }; module_platform_driver(greth_of_driver); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index d761c08fe5c1..2f516b950f4e 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -1142,7 +1142,7 @@ static struct platform_driver emac_driver = { .of_match_table = emac_of_match, }, .probe = emac_probe, - .remove_new = emac_remove, + .remove = emac_remove, .suspend = emac_suspend, .resume = emac_resume, }; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3c112c18ae6a..3f6204de9e6b 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1519,7 +1519,7 @@ MODULE_DEVICE_TABLE(of, altera_tse_ids); static struct platform_driver altera_tse_driver = { .probe = altera_tse_probe, - .remove_new = altera_tse_remove, + .remove = altera_tse_remove, .suspend = NULL, .resume = NULL, .driver = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c5b50cfa935a..96df20854eb9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1677,9 +1677,9 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter) static int ena_request_io_irq(struct ena_adapter *adapter) { u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; + int rc = 0, i, k, irq_idx; unsigned long flags = 0; struct ena_irq *irq; - int rc = 0, i, k; if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { netif_err(adapter, ifup, adapter->netdev, @@ -1705,6 +1705,16 @@ static int ena_request_io_irq(struct ena_adapter *adapter) irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); } + /* Now that IO IRQs have been successfully allocated map them to the + * corresponding IO NAPI instance. Note that the mgmnt IRQ does not + * have a NAPI, so care must be taken to correctly map IRQs to NAPIs. + */ + for (i = 0; i < io_queue_count; i++) { + irq_idx = ENA_IO_IRQ_IDX(i); + irq = &adapter->irq_tbl[irq_idx]; + netif_napi_set_irq(&adapter->ena_napi[i].napi, irq->vector); + } + return rc; err: @@ -1811,20 +1821,40 @@ static void ena_napi_disable_in_range(struct ena_adapter *adapter, int first_index, int count) { + struct napi_struct *napi; int i; - for (i = first_index; i < first_index + count; i++) - napi_disable(&adapter->ena_napi[i].napi); + for (i = first_index; i < first_index + count; i++) { + napi = &adapter->ena_napi[i].napi; + if (!ENA_IS_XDP_INDEX(adapter, i)) { + /* This API is supported for non-XDP queues only */ + netif_queue_set_napi(adapter->netdev, i, + NETDEV_QUEUE_TYPE_TX, NULL); + netif_queue_set_napi(adapter->netdev, i, + NETDEV_QUEUE_TYPE_RX, NULL); + } + napi_disable(napi); + } } static void ena_napi_enable_in_range(struct ena_adapter *adapter, int first_index, int count) { + struct napi_struct *napi; int i; - for (i = first_index; i < first_index + count; i++) - napi_enable(&adapter->ena_napi[i].napi); + for (i = first_index; i < first_index + count; i++) { + napi = &adapter->ena_napi[i].napi; + napi_enable(napi); + if (!ENA_IS_XDP_INDEX(adapter, i)) { + /* This API is supported for non-XDP queues only */ + netif_queue_set_napi(adapter->netdev, i, + NETDEV_QUEUE_TYPE_RX, napi); + netif_queue_set_napi(adapter->netdev, i, + NETDEV_QUEUE_TYPE_TX, napi); + } + } } /* Configure the Rx forwarding */ diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 85c978149bf6..0671a066913b 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1363,7 +1363,7 @@ static void au1000_remove(struct platform_device *pdev) static struct platform_driver au1000_eth_driver = { .probe = au1000_probe, - .remove_new = au1000_remove, + .remove = au1000_remove, .driver = { .name = "au1000-eth", }, diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index c78706d21a6a..0f98b92408ed 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1514,7 +1514,7 @@ static struct platform_driver sunlance_sbus_driver = { .of_match_table = sunlance_sbus_match, }, .probe = sunlance_sbus_probe, - .remove_new = sunlance_sbus_remove, + .remove = sunlance_sbus_remove, }; module_platform_driver(sunlance_sbus_driver); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 7912b3b45148..4365bd62942c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -565,7 +565,7 @@ static struct platform_driver xgbe_driver = { .pm = &xgbe_platform_pm_ops, }, .probe = xgbe_platform_probe, - .remove_new = xgbe_platform_remove, + .remove = xgbe_platform_remove, }; int xgbe_platform_init(void) diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 9e90c2381491..2a91c84aebdb 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -734,7 +734,7 @@ static struct platform_driver xge_driver = { .acpi_match_table = ACPI_PTR(xge_acpi_match), }, .probe = xge_probe, - .remove_new = xge_remove, + .remove = xge_remove, .shutdown = xge_shutdown, }; module_platform_driver(xge_driver); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4af9d89d5f88..3b2951030a38 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -2159,7 +2159,7 @@ static struct platform_driver xgene_enet_driver = { .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), }, .probe = xgene_enet_probe, - .remove_new = xgene_enet_remove, + .remove = xgene_enet_remove, .shutdown = xgene_enet_shutdown, }; diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 766ab78256fe..8989506e6248 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -759,7 +759,7 @@ static void mac_mace_device_remove(struct platform_device *pdev) static struct platform_driver mac_mace_driver = { .probe = mace_probe, - .remove_new = mac_mace_device_remove, + .remove = mac_mace_device_remove, .driver = { .name = mac_mace_string, }, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 440ff4616fec..6fef47ba0a59 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -15,6 +15,7 @@ #include "aq_macsec.h" #include "aq_main.h" +#include <linux/ethtool.h> #include <linux/linkmode.h> #include <linux/ptp_clock_kernel.h> @@ -977,6 +978,76 @@ static int aq_ethtool_set_phy_tunable(struct net_device *ndev, return err; } +static int aq_ethtool_get_module_info(struct net_device *ndev, + struct ethtool_modinfo *modinfo) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u8 compliance_val, dom_type; + int err; + + /* Module EEPROM is only supported for controllers with external PHY */ + if (aq_nic->aq_nic_cfg.aq_hw_caps->media_type != AQ_HW_MEDIA_TYPE_FIBRE || + !aq_nic->aq_hw_ops->hw_read_module_eeprom) + return -EOPNOTSUPP; + + err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw, + SFF_8472_ID_ADDR, SFF_8472_COMP_ADDR, 1, &compliance_val); + if (err) + return err; + + err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw, + SFF_8472_ID_ADDR, SFF_8472_DOM_TYPE_ADDR, 1, &dom_type); + if (err) + return err; + + if (dom_type & SFF_8472_ADDRESS_CHANGE_REQ_MASK || compliance_val == 0x00) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + return 0; +} + +static int aq_ethtool_get_module_eeprom(struct net_device *ndev, + struct ethtool_eeprom *ee, unsigned char *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + unsigned int first, last, len; + int err; + + if (!aq_nic->aq_hw_ops->hw_read_module_eeprom) + return -EOPNOTSUPP; + + first = ee->offset; + last = ee->offset + ee->len; + + if (first < ETH_MODULE_SFF_8079_LEN) { + len = min(last, ETH_MODULE_SFF_8079_LEN); + len -= first; + + err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw, + SFF_8472_ID_ADDR, first, len, data); + if (err) + return err; + + first += len; + data += len; + } + if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) { + len = min(last, ETH_MODULE_SFF_8472_LEN); + len -= first; + first -= ETH_MODULE_SFF_8079_LEN; + + err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw, + SFF_8472_DIAGNOSTICS_ADDR, first, len, data); + if (err) + return err; + } + return 0; +} + const struct ethtool_ops aq_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, @@ -1014,4 +1085,6 @@ const struct ethtool_ops aq_ethtool_ops = { .get_ts_info = aq_ethtool_get_ts_info, .get_phy_tunable = aq_ethtool_get_phy_tunable, .set_phy_tunable = aq_ethtool_set_phy_tunable, + .get_module_info = aq_ethtool_get_module_info, + .get_module_eeprom = aq_ethtool_get_module_eeprom, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h index 6d5be5ebeb13..f26fe1a75539 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h @@ -14,4 +14,12 @@ extern const struct ethtool_ops aq_ethtool_ops; #define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK) +#define SFF_8472_ID_ADDR 0x50 +#define SFF_8472_DIAGNOSTICS_ADDR 0x51 + +#define SFF_8472_COMP_ADDR 0x5e +#define SFF_8472_DOM_TYPE_ADDR 0x5c + +#define SFF_8472_ADDRESS_CHANGE_REQ_MASK 0x4 + #endif /* AQ_ETHTOOL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index f010bda61c96..42c0efc1b455 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -340,6 +340,9 @@ struct aq_hw_ops { int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable); int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp); + + int (*hw_read_module_eeprom)(struct aq_hw_s *self, u8 dev_addr, + u8 reg_start_addr, int len, u8 *data); }; struct aq_fw_ops { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 56c46266bb0a..493432d036b9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -1654,6 +1654,137 @@ static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp) return 0; } +#define START_TRANSMIT 0x5001 +#define START_READ_TRANSMIT 0x5101 +#define STOP_TRANSMIT 0x3001 +#define REPEAT_TRANSMIT 0x1001 +#define REPEAT_NACK_TRANSMIT 0x1011 + +static int hw_atl_b0_smb0_wait_result(struct aq_hw_s *self, bool expect_ack) +{ + int err; + u32 val; + + err = readx_poll_timeout(hw_atl_smb0_byte_transfer_complete_get, + self, val, val == 1, 100U, 10000U); + if (err) + return err; + if (hw_atl_smb0_receive_acknowledged_get(self) != expect_ack) + return -EIO; + return 0; +} + +/* Starts an I2C/SMBUS write to a given address. addr is in 7-bit format, + * the read/write bit is not part of it. + */ +static int hw_atl_b0_smb0_start_write(struct aq_hw_s *self, u32 addr) +{ + hw_atl_smb0_tx_data_set(self, (addr << 1) | 0); + hw_atl_smb0_provisioning2_set(self, START_TRANSMIT); + return hw_atl_b0_smb0_wait_result(self, 0); +} + +/* Writes a single byte as part of an ongoing write started by start_write. */ +static int hw_atl_b0_smb0_write_byte(struct aq_hw_s *self, u32 data) +{ + hw_atl_smb0_tx_data_set(self, data); + hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT); + return hw_atl_b0_smb0_wait_result(self, 0); +} + +/* Starts an I2C/SMBUS read to a given address. addr is in 7-bit format, + * the read/write bit is not part of it. + */ +static int hw_atl_b0_smb0_start_read(struct aq_hw_s *self, u32 addr) +{ + int err; + + hw_atl_smb0_tx_data_set(self, (addr << 1) | 1); + hw_atl_smb0_provisioning2_set(self, START_READ_TRANSMIT); + err = hw_atl_b0_smb0_wait_result(self, 0); + if (err) + return err; + if (hw_atl_smb0_repeated_start_detect_get(self) == 0) + return -EIO; + return 0; +} + +/* Reads a single byte as part of an ongoing read started by start_read. */ +static int hw_atl_b0_smb0_read_byte(struct aq_hw_s *self) +{ + int err; + + hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT); + err = hw_atl_b0_smb0_wait_result(self, 0); + if (err) + return err; + return hw_atl_smb0_rx_data_get(self); +} + +/* Reads the last byte of an ongoing read. */ +static int hw_atl_b0_smb0_read_byte_nack(struct aq_hw_s *self) +{ + int err; + + hw_atl_smb0_provisioning2_set(self, REPEAT_NACK_TRANSMIT); + err = hw_atl_b0_smb0_wait_result(self, 1); + if (err) + return err; + return hw_atl_smb0_rx_data_get(self); +} + +/* Sends a stop condition and ends a transfer. */ +static void hw_atl_b0_smb0_stop(struct aq_hw_s *self) +{ + hw_atl_smb0_provisioning2_set(self, STOP_TRANSMIT); +} + +static int hw_atl_b0_read_module_eeprom(struct aq_hw_s *self, u8 dev_addr, + u8 reg_start_addr, int len, u8 *data) +{ + int i, b; + int err; + u32 val; + + /* Wait for SMBUS0 to be idle */ + err = readx_poll_timeout(hw_atl_smb0_bus_busy_get, self, + val, val == 0, 100U, 10000U); + if (err) + return err; + + err = hw_atl_b0_smb0_start_write(self, dev_addr); + if (err) + goto out; + + err = hw_atl_b0_smb0_write_byte(self, reg_start_addr); + if (err) + goto out; + + err = hw_atl_b0_smb0_start_read(self, dev_addr); + if (err) + goto out; + + for (i = 0; i < len - 1; i++) { + b = hw_atl_b0_smb0_read_byte(self); + if (b < 0) { + err = b; + goto out; + } + data[i] = (u8)b; + } + + b = hw_atl_b0_smb0_read_byte_nack(self); + if (b < 0) { + err = b; + goto out; + } + data[i] = (u8)b; + +out: + hw_atl_b0_smb0_stop(self); + return err; +} + const struct aq_hw_ops hw_atl_ops_b0 = { .hw_soft_reset = hw_atl_utils_soft_reset, .hw_prepare = hw_atl_utils_initfw, @@ -1712,4 +1843,5 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_fc = hw_atl_b0_set_fc, .hw_get_mac_temp = hw_atl_b0_get_mac_temp, + .hw_read_module_eeprom = hw_atl_b0_read_module_eeprom, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 7b67bdd8a258..d07af1271d59 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -57,6 +57,49 @@ u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw) HW_ATL_TS_DATA_OUT_SHIFT); } +u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BUS_BUSY_ADR, + HW_ATL_SMB0_BUS_BUSY_MSK, + HW_ATL_SMB0_BUS_BUSY_SHIFT); +} + +u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR, + HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK, + HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT); +} + +u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR, + HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK, + HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT); +} + +u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_REPEATED_START_DETECT_ADR, + HW_ATL_SMB0_REPEATED_START_DETECT_MSK, + HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT); +} + +u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_SMB0_RECEIVED_DATA_ADR); +} + +void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data) +{ + return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_TRANSMITTED_DATA_ADR, data); +} + +void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data) +{ + return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_PROVISIONING2_ADR, data); +} + /* global */ void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 58f5ee0a6214..5fd506acacb5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -34,6 +34,27 @@ u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw); /* get temperature sense data */ u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw); +/* SMBUS0 bus busy */ +u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw); + +/* SMBUS0 byte transfer complete */ +u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw); + +/* SMBUS0 receive acknowledged */ +u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw); + +/* SMBUS0 set transmitted data (only leftmost byte of data valid) */ +void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data); + +/* SMBUS0 provisioning2 command register */ +void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data); + +/* SMBUS0 repeated start detect */ +u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw); + +/* SMBUS0 received data register */ +u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw); + /* global */ /* set global microprocessor semaphore */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 4a6467031b9e..fce30d90b6cb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -42,6 +42,38 @@ #define HW_ATL_TS_DATA_OUT_SHIFT 0 #define HW_ATL_TS_DATA_OUT_WIDTH 12 +/* SMBUS0 Received Data register */ +#define HW_ATL_SMB0_RECEIVED_DATA_ADR 0x00000748 +/* SMBUS0 Transmitted Data register */ +#define HW_ATL_SMB0_TRANSMITTED_DATA_ADR 0x00000608 + +/* SMBUS0 Global Provisioning 2 register */ +#define HW_ATL_SMB0_PROVISIONING2_ADR 0x00000604 + +/* SMBUS0 Bus Busy Bitfield Definitions */ +#define HW_ATL_SMB0_BUS_BUSY_ADR 0x00000744 +#define HW_ATL_SMB0_BUS_BUSY_MSK 0x00000080 +#define HW_ATL_SMB0_BUS_BUSY_SHIFT 7 +#define HW_ATL_SMB0_BUS_BUSY_WIDTH 1 + +/* SMBUS0 Byte Transfer Complete Bitfield Definitions */ +#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR 0x00000744 +#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK 0x00000002 +#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT 1 +#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_WIDTH 1 + +/* SMBUS0 Receive Acknowledge Bitfield Definitions */ +#define HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR 0x00000744 +#define HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK 0x00000100 +#define HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT 8 +#define HW_ATL_SMB0_RX_ACKNOWLEDGED_WIDTH 1 + +/* SMBUS0 Repeated Start Detect Bitfield Definitions */ +#define HW_ATL_SMB0_REPEATED_START_DETECT_ADR 0x00000744 +#define HW_ATL_SMB0_REPEATED_START_DETECT_MSK 0x00000004 +#define HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT 2 +#define HW_ATL_SMB0_REPEATED_START_DETECT_WIDTH 1 + /* global microprocessor semaphore definitions * base address: 0x000003a0 * parameter: semaphore {s} | stride size 0x4 | range [0, 15] diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 493d6356c8ca..780e70ea1c22 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -264,7 +264,7 @@ static void emac_rockchip_remove(struct platform_device *pdev) static struct platform_driver emac_rockchip_driver = { .probe = emac_rockchip_probe, - .remove_new = emac_rockchip_remove, + .remove = emac_rockchip_remove, .driver = { .name = DRV_NAME, .of_match_table = emac_rockchip_dt_ids, diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 9586b6894f7e..3d4c3d8698e2 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1598,8 +1598,8 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit) int ring_mask, ring_size, done = 0; unsigned int pktlen_mask, offset; struct ag71xx_ring *ring; - struct list_head rx_list; struct sk_buff *skb; + LIST_HEAD(rx_list); ring = &ag->rx_ring; pktlen_mask = ag->dcfg->desc_pktlen_mask; @@ -1610,8 +1610,6 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit) netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n", limit, ring->curr, ring->dirty); - INIT_LIST_HEAD(&rx_list); - while (done < limit) { unsigned int i = ring->curr & ring_mask; struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); @@ -1648,6 +1646,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit) skb->dev = ndev; skb->ip_summed = CHECKSUM_NONE; + skb->protocol = eth_type_trans(skb, ndev); list_add_tail(&skb->list, &rx_list); next: @@ -1659,8 +1658,6 @@ next: ag71xx_ring_rx_refill(ag); - list_for_each_entry(skb, &rx_list, list) - skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb_list(&rx_list); netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n", @@ -1822,10 +1819,9 @@ static int ag71xx_probe(struct platform_device *pdev) } clk_eth = devm_clk_get_enabled(&pdev->dev, "eth"); - if (IS_ERR(clk_eth)) { - netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); - return PTR_ERR(clk_eth); - } + if (IS_ERR(clk_eth)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk_eth), + "Failed to get eth clk."); SET_NETDEV_DEV(ndev, &pdev->dev); @@ -1836,14 +1832,13 @@ static int ag71xx_probe(struct platform_device *pdev) memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); - if (IS_ERR(ag->mac_reset)) { - netif_err(ag, probe, ndev, "missing mac reset\n"); - return PTR_ERR(ag->mac_reset); - } + if (IS_ERR(ag->mac_reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(ag->mac_reset), + "missing mac reset"); - ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!ag->mac_base) - return -ENOMEM; + ag->mac_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ag->mac_base)) + return PTR_ERR(ag->mac_base); /* ensure that HW is in manual polling mode before interrupts are * activated. Otherwise ag71xx_interrupt might call napi_schedule @@ -1917,18 +1912,14 @@ static int ag71xx_probe(struct platform_device *pdev) if (err) return err; - platform_set_drvdata(pdev, ndev); - err = ag71xx_phylink_setup(ag); - if (err) { - netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, + "failed to setup phylink"); err = devm_register_netdev(&pdev->dev, ndev); if (err) { netif_err(ag, probe, ndev, "unable to register net device\n"); - platform_set_drvdata(pdev, NULL); return err; } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 75ca3ddda1f5..eeec8bf17cf4 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -72,7 +72,6 @@ config BCMGENET tristate "Broadcom GENET internal MAC support" depends on HAS_IOMEM depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835 - select MII select PHYLIB select FIXED_PHY select BCM7XXX_PHY @@ -195,7 +194,6 @@ config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" depends on HAS_IOMEM depends on NET_DSA || !NET_DSA - select MII select PHYLIB select FIXED_PHY select DIMLIB @@ -260,7 +258,6 @@ config BCMASP depends on ARCH_BRCMSTB || COMPILE_TEST default ARCH_BRCMSTB depends on OF - select MII select PHYLIB select MDIO_BCM_UNIMAC help diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index 297c2682a9cf..a68fab1b05f0 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -1500,7 +1500,7 @@ static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops, static struct platform_driver bcmasp_driver = { .probe = bcmasp_probe, - .remove_new = bcmasp_remove, + .remove = bcmasp_remove, .shutdown = bcmasp_shutdown, .driver = { .name = "brcm,asp-v2", diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index ca163c8e3729..67928b5d8a26 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -496,4 +496,5 @@ const struct ethtool_ops bcmasp_ethtool_ops = { .get_strings = bcmasp_get_strings, .get_ethtool_stats = bcmasp_get_ethtool_stats, .get_sset_count = bcmasp_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, }; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 9ea16ef4139d..cfd50efbdbc0 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -365,6 +365,9 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev) intf->tx_spb_index = spb_index; intf->tx_spb_dma_valid = valid; + + skb_tx_timestamp(skb); + bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid); if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1)) diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 72df1bb10172..203e8d0dd04b 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -789,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = { .of_match_table = bcm4908_enet_of_match, }, .probe = bcm4908_enet_probe, - .remove_new = bcm4908_enet_remove, + .remove = bcm4908_enet_remove, }; module_platform_driver(bcm4908_enet_driver); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 3c0e3b9828be..e5e03aaa49f9 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1936,7 +1936,7 @@ static void bcm_enet_remove(struct platform_device *pdev) static struct platform_driver bcm63xx_enet_driver = { .probe = bcm_enet_probe, - .remove_new = bcm_enet_remove, + .remove = bcm_enet_remove, .driver = { .name = "bcm63xx_enet", }, @@ -2755,7 +2755,7 @@ static void bcm_enetsw_remove(struct platform_device *pdev) static struct platform_driver bcm63xx_enetsw_driver = { .probe = bcm_enetsw_probe, - .remove_new = bcm_enetsw_remove, + .remove = bcm_enetsw_remove, .driver = { .name = "bcm63xx_enetsw", }, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 0a68b526e4a8..0b7088ca4822 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2900,7 +2900,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, - .remove_new = bcm_sysport_remove, + .remove = bcm_sysport_remove, .driver = { .name = "brcm-systemport", .of_match_table = bcm_sysport_of_match, diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 77425c7a32db..ecce23cecbea 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -294,7 +294,7 @@ static struct platform_driver bgmac_enet_driver = { .pm = BGMAC_PM_OPS }, .probe = bgmac_probe, - .remove_new = bgmac_remove, + .remove = bgmac_remove, }; module_platform_driver(bgmac_enet_driver); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 99d025b69079..6dd6541d8619 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -10988,7 +10988,8 @@ static void bnxt_init_napi(struct bnxt *bp) cp_nr_rings--; for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; - netif_napi_add(bp->dev, &bnapi->napi, poll_fn); + netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn, + bnapi->index); } if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bnapi = bp->bnapi[cp_nr_rings]; @@ -12882,7 +12883,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, if (features & NETIF_F_GRO_HW) features &= ~NETIF_F_LRO; - /* Both CTAG and STAG VLAN accelaration on the RX side have to be + /* Both CTAG and STAG VLAN acceleration on the RX side have to be * turned on or off together. */ vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; @@ -16136,7 +16137,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. - * At this point, the card has exprienced a hard reset, + * At this point, the card has experienced a hard reset, * followed by fixups by BIOS, and has its config space * set up identically to what it was at cold boot. */ @@ -16164,7 +16165,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); /* Upon fatal error, our device internal logic that latches to * BAR value is getting reset and will restore only upon - * rewritting the BARs. + * rewriting the BARs. * * As pci_restore_state() does not re-write the BARs if the * value is same as saved value earlier, driver needs to diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index f7be886570d8..10966ab15373 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -4350,7 +4350,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match); static struct platform_driver bcmgenet_driver = { .probe = bcmgenet_probe, - .remove_new = bcmgenet_remove, + .remove = bcmgenet_remove, .shutdown = bcmgenet_shutdown, .driver = { .name = "bcmgenet", diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index fcf8485f3446..30865fe03eeb 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2608,7 +2608,7 @@ static void sbmac_remove(struct platform_device *pldev) static struct platform_driver sbmac_driver = { .probe = sbmac_probe, - .remove_new = sbmac_remove, + .remove = sbmac_remove, .driver = { .name = sbmac_string, }, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 378815917741..01dfec115942 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -3737,7 +3737,7 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, } do { - u32 *fw_data = (u32 *)(fw_hdr + 1); + __be32 *fw_data = (__be32 *)(fw_hdr + 1); for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) write_op(tp, cpu_scratch_base + (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + @@ -7395,27 +7395,60 @@ tx_recovery: static void tg3_napi_disable(struct tg3 *tp) { + int txq_idx = tp->txq_cnt - 1; + int rxq_idx = tp->rxq_cnt - 1; + struct tg3_napi *tnapi; int i; - for (i = tp->irq_cnt - 1; i >= 0; i--) - napi_disable(&tp->napi[i].napi); + for (i = tp->irq_cnt - 1; i >= 0; i--) { + tnapi = &tp->napi[i]; + if (tnapi->tx_buffers) { + netif_queue_set_napi(tp->dev, txq_idx, + NETDEV_QUEUE_TYPE_TX, NULL); + txq_idx--; + } + if (tnapi->rx_rcb) { + netif_queue_set_napi(tp->dev, rxq_idx, + NETDEV_QUEUE_TYPE_RX, NULL); + rxq_idx--; + } + napi_disable(&tnapi->napi); + } } static void tg3_napi_enable(struct tg3 *tp) { + int txq_idx = 0, rxq_idx = 0; + struct tg3_napi *tnapi; int i; - for (i = 0; i < tp->irq_cnt; i++) - napi_enable(&tp->napi[i].napi); + for (i = 0; i < tp->irq_cnt; i++) { + tnapi = &tp->napi[i]; + napi_enable(&tnapi->napi); + if (tnapi->tx_buffers) { + netif_queue_set_napi(tp->dev, txq_idx, + NETDEV_QUEUE_TYPE_TX, + &tnapi->napi); + txq_idx++; + } + if (tnapi->rx_rcb) { + netif_queue_set_napi(tp->dev, rxq_idx, + NETDEV_QUEUE_TYPE_RX, + &tnapi->napi); + rxq_idx++; + } + } } static void tg3_napi_init(struct tg3 *tp) { int i; - netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll); - for (i = 1; i < tp->irq_cnt; i++) - netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix); + for (i = 0; i < tp->irq_cnt; i++) { + netif_napi_add(tp->dev, &tp->napi[i].napi, + i ? tg3_poll_msix : tg3_poll); + netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec); + } } static void tg3_napi_fini(struct tg3 *tp) @@ -11309,18 +11342,17 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num) else { name = &tnapi->irq_lbl[0]; if (tnapi->tx_buffers && tnapi->rx_rcb) - snprintf(name, IFNAMSIZ, + snprintf(name, sizeof(tnapi->irq_lbl), "%s-txrx-%d", tp->dev->name, irq_num); else if (tnapi->tx_buffers) - snprintf(name, IFNAMSIZ, + snprintf(name, sizeof(tnapi->irq_lbl), "%s-tx-%d", tp->dev->name, irq_num); else if (tnapi->rx_rcb) - snprintf(name, IFNAMSIZ, + snprintf(name, sizeof(tnapi->irq_lbl), "%s-rx-%d", tp->dev->name, irq_num); else - snprintf(name, IFNAMSIZ, + snprintf(name, sizeof(tnapi->irq_lbl), "%s-%d", tp->dev->name, irq_num); - name[IFNAMSIZ-1] = 0; } if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { @@ -13093,12 +13125,16 @@ static int tg3_test_nvram(struct tg3 *tp) /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); - if (csum != le32_to_cpu(buf[0x10/4])) + + /* The type of buf is __be32 *, but this value is __le32 */ + if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4])) goto out; /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ - csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); - if (csum != le32_to_cpu(buf[0xfc/4])) + csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88); + + /* The type of buf is __be32 *, but this value is __le32 */ + if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4])) goto out; kfree(buf); @@ -17065,12 +17101,14 @@ static int tg3_get_device_address(struct tg3 *tp, u8 *addr) addr_ok = is_valid_ether_addr(addr); } if (!addr_ok) { + __be32 be_hi, be_lo; + /* Next, try NVRAM. */ if (!tg3_flag(tp, NO_NVRAM) && - !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && - !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { - memcpy(&addr[0], ((char *)&hi) + 2, 2); - memcpy(&addr[2], (char *)&lo, sizeof(lo)); + !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) && + !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) { + memcpy(&addr[0], ((char *)&be_hi) + 2, 2); + memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo)); } /* Finally just fetch it out of the MAC control regs. */ else { @@ -18237,7 +18275,7 @@ done: * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. - * At this point, the card has exprienced a hard reset, + * At this point, the card has experienced a hard reset, * followed by fixups by BIOS, and has its config space * set up identically to what it was at cold boot. */ diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index cf1b2b123c7e..b473f8014d9c 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3033,7 +3033,7 @@ struct tg3_napi { dma_addr_t rx_rcb_mapping; dma_addr_t tx_desc_mapping; - char irq_lbl[IFNAMSIZ]; + char irq_lbl[IFNAMSIZ + 6 + 10]; /* name + "-txrx-" + %d */ unsigned int irq_vec; }; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 56901280ba04..ebe886b98891 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4849,10 +4849,11 @@ static const struct macb_config pc302gem_config = { }; static const struct macb_config sama5d2_config = { - .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .jumbo_max_len = 10240, .usrio = &macb_default_usrio, }; @@ -5490,7 +5491,7 @@ static const struct dev_pm_ops macb_pm_ops = { static struct platform_driver macb_driver = { .probe = macb_probe, - .remove_new = macb_remove, + .remove = macb_remove, .driver = { .name = "macb", .of_match_table = of_match_ptr(macb_dt_ids), diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a71b320fd030..331ac6a3dc38 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1919,7 +1919,7 @@ static struct platform_driver xgmac_driver = { .pm = &xgmac_pm_ops, }, .probe = xgmac_probe, - .remove_new = xgmac_remove, + .remove = xgmac_remove, }; module_platform_driver(xgmac_driver); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index b3c81a2e9d46..9ad49aea2673 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -36,175 +36,6 @@ */ #define CN23XX_INPUT_JABBER 64600 -void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct) -{ - int i = 0; - u32 regval = 0; - struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; - - /*In cn23xx_soft_reset*/ - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n", - "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG))); - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1))); - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST, - lio_pci_readq(oct, CN23XX_RST_SOFT_RST)); - - /*In cn23xx_set_dpi_regs*/ - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL, - lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL)); - - for (i = 0; i < 6; i++) { - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_DPI_DMA_ENG_ENB", i, - CN23XX_DPI_DMA_ENG_ENB(i), - lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_DPI_DMA_ENG_BUF", i, - CN23XX_DPI_DMA_ENG_BUF(i), - lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i))); - } - - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL", - CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL)); - - /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */ - pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_CONFIG_PCIE_DEVCTL", - CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval)); - - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port, - CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port), - lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))); - - /*In cn23xx_specific_regs_setup */ - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port, - CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)))); - - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST), - (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST)); - - /*In cn23xx_setup_global_mac_regs*/ - for (i = 0; i < CN23XX_MAX_MACS; i++) { - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_PKT_MAC_RINFO64", i, - CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_PKT_MAC_RINFO64 - (i, oct->pf_num)))); - } - - /*In cn23xx_setup_global_input_regs*/ - for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_IQ_PKT_CONTROL64", i, - CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i)))); - } - - /*In cn23xx_setup_global_output_regs*/ - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK))); - - for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_PKT_CONTROL", i, - CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)), - CVM_CAST64(octeon_read_csr( - oct, CN23XX_SLI_OQ_PKT_CONTROL(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_PKT_INT_LEVELS", i, - CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i)))); - } - - /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/ - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "cn23xx->intr_enb_reg64", - CVM_CAST64((long)(cn23xx->intr_enb_reg64)), - CVM_CAST64(readq(cn23xx->intr_enb_reg64))); - - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "cn23xx->intr_sum_reg64", - CVM_CAST64((long)(cn23xx->intr_sum_reg64)), - CVM_CAST64(readq(cn23xx->intr_sum_reg64))); - - /*In cn23xx_setup_iq_regs*/ - for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_IQ_BASE_ADDR64", i, - CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_IQ_BASE_ADDR64(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_IQ_SIZE", i, - CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)), - CVM_CAST64(octeon_read_csr - (oct, CN23XX_SLI_IQ_SIZE(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_IQ_DOORBELL", i, - CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_IQ_DOORBELL(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_IQ_INSTR_COUNT64", i, - CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_IQ_INSTR_COUNT64(i)))); - } - - /*In cn23xx_setup_oq_regs*/ - for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_BASE_ADDR64", i, - CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_OQ_BASE_ADDR64(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_SIZE", i, - CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)), - CVM_CAST64(octeon_read_csr - (oct, CN23XX_SLI_OQ_SIZE(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i, - CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)), - CVM_CAST64(octeon_read_csr( - oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_PKTS_SENT", i, - CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_OQ_PKTS_SENT(i)))); - dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", - "CN23XX_SLI_OQ_PKTS_CREDIT", i, - CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_OQ_PKTS_CREDIT(i)))); - } - - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_SLI_PKT_TIME_INT", - CVM_CAST64(CN23XX_SLI_PKT_TIME_INT), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT))); - dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", - "CN23XX_SLI_PKT_CNT_INT", - CVM_CAST64(CN23XX_SLI_PKT_CNT_INT), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT))); -} - static int cn23xx_pf_soft_reset(struct octeon_device *oct) { octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index e6f31d0d5c0b..234b96b4f488 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -59,8 +59,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct, u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); -void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); - int cn23xx_sriov_config(struct octeon_device *oct); int cn23xx_fw_loaded(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 744f2434f7fa..393b9951490a 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1545,7 +1545,7 @@ static struct platform_driver octeon_mgmt_driver = { .of_match_table = octeon_mgmt_match, }, .probe = octeon_mgmt_probe, - .remove_new = octeon_mgmt_remove, + .remove = octeon_mgmt_remove, }; module_platform_driver(octeon_mgmt_driver); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 89256b866840..5a9f6925e1fa 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -515,23 +515,6 @@ void *cxgb3_free_atid(struct t3cdev *tdev, int atid) EXPORT_SYMBOL(cxgb3_free_atid); -/* - * Free a server TID and return it to the free pool. - */ -void cxgb3_free_stid(struct t3cdev *tdev, int stid) -{ - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - union listen_entry *p = stid2entry(t, stid); - - spin_lock_bh(&t->stid_lock); - p->next = t->sfree; - t->sfree = p; - t->stids_in_use--; - spin_unlock_bh(&t->stid_lock); -} - -EXPORT_SYMBOL(cxgb3_free_stid); - void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, void *ctx, unsigned int tid) { @@ -671,28 +654,6 @@ int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, EXPORT_SYMBOL(cxgb3_alloc_atid); -int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, - void *ctx) -{ - int stid = -1; - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - - spin_lock_bh(&t->stid_lock); - if (t->sfree) { - union listen_entry *p = t->sfree; - - stid = (p - t->stid_tab) + t->stid_base; - t->sfree = p->next; - p->t3c_tid.ctx = ctx; - p->t3c_tid.client = client; - t->stids_in_use++; - } - spin_unlock_bh(&t->stid_lock); - return stid; -} - -EXPORT_SYMBOL(cxgb3_alloc_stid); - /* Get the t3cdev associated with a net_device */ struct t3cdev *dev2t3cdev(struct net_device *dev) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h index 929c298115ca..7419824f9926 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h @@ -95,10 +95,7 @@ struct cxgb3_client { */ int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client, void *ctx); -int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client, - void *ctx); void *cxgb3_free_atid(struct t3cdev *dev, int atid); -void cxgb3_free_stid(struct t3cdev *dev, int stid); void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client, void *ctx, unsigned int tid); void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid); diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 163efab27e9b..5060d3998889 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -120,7 +120,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) write_unlock_bh(&ctbl->lock); dev_err(adap->pdev_dev, "CLIP FW cmd failed with error %d, " - "Connections using %pI6c wont be " + "Connections using %pI6c won't be " "offloaded", ret, ce->addr6.sin6_addr.s6_addr); return ret; @@ -133,7 +133,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) } else { write_unlock_bh(&ctbl->lock); dev_info(adap->pdev_dev, "CLIP table overflow, " - "Connections using %pI6c wont be offloaded", + "Connections using %pI6c won't be offloaded", (void *)lip); return -ENOMEM; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index bbf7641a0fc7..75bd69ff61a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1608,7 +1608,6 @@ void t4_os_portmod_changed(struct adapter *adap, int port_id); void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); void t4_free_sge_resources(struct adapter *adap); -void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); int cxgb4_selftest_lb_pkt(struct net_device *netdev); @@ -2141,28 +2140,6 @@ int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid, unsigned int naddr, const u8 **addr, bool sleep_ok); int cxgb4_init_mps_ref_entries(struct adapter *adap); void cxgb4_free_mps_ref_entries(struct adapter *adap); -int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, - const u8 *addr, const u8 *mask, - unsigned int vni, unsigned int vni_mask, - u8 dip_hit, u8 lookup_type, bool sleep_ok); -int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, - int idx, bool sleep_ok); -int cxgb4_free_raw_mac_filt(struct adapter *adap, - unsigned int viid, - const u8 *addr, - const u8 *mask, - unsigned int idx, - u8 lookup_type, - u8 port_id, - bool sleep_ok); -int cxgb4_alloc_raw_mac_filt(struct adapter *adap, - unsigned int viid, - const u8 *addr, - const u8 *mask, - unsigned int idx, - u8 lookup_type, - u8 port_id, - bool sleep_ok); int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid, int *tcam_idx, const u8 *addr, bool persistent, u8 *smt_idx); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 2418645c8823..97a261d5357e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2188,18 +2188,6 @@ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, } EXPORT_SYMBOL(cxgb4_get_tcp_stats); -void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, - const unsigned int *pgsz_order) -{ - struct adapter *adap = netdev2adap(dev); - - t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); - t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | - HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) | - HPZ3_V(pgsz_order[3])); -} -EXPORT_SYMBOL(cxgb4_iscsi_init); - int cxgb4_flush_eq_cache(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c index a020e8490681..60f4d5b5eb3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c @@ -28,28 +28,6 @@ static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap, return ret; } -static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx) -{ - struct mps_entries_ref *mps_entry, *tmp; - int ret = -EINVAL; - - spin_lock(&adap->mps_ref_lock); - list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) { - if (mps_entry->idx == idx) { - if (!refcount_dec_and_test(&mps_entry->refcnt)) { - spin_unlock(&adap->mps_ref_lock); - return -EBUSY; - } - list_del(&mps_entry->list); - kfree(mps_entry); - ret = 0; - break; - } - } - spin_unlock(&adap->mps_ref_lock); - return ret; -} - static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr, u16 idx, const u8 *mask) { @@ -141,82 +119,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid, return ret; } -int cxgb4_free_raw_mac_filt(struct adapter *adap, - unsigned int viid, - const u8 *addr, - const u8 *mask, - unsigned int idx, - u8 lookup_type, - u8 port_id, - bool sleep_ok) -{ - int ret = 0; - - if (!cxgb4_mps_ref_dec(adap, idx)) - ret = t4_free_raw_mac_filt(adap, viid, addr, - mask, idx, lookup_type, - port_id, sleep_ok); - - return ret; -} - -int cxgb4_alloc_raw_mac_filt(struct adapter *adap, - unsigned int viid, - const u8 *addr, - const u8 *mask, - unsigned int idx, - u8 lookup_type, - u8 port_id, - bool sleep_ok) -{ - int ret; - - ret = t4_alloc_raw_mac_filt(adap, viid, addr, - mask, idx, lookup_type, - port_id, sleep_ok); - if (ret < 0) - return ret; - - if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) { - ret = -ENOMEM; - t4_free_raw_mac_filt(adap, viid, addr, - mask, idx, lookup_type, - port_id, sleep_ok); - } - - return ret; -} - -int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, - int idx, bool sleep_ok) -{ - int ret = 0; - - if (!cxgb4_mps_ref_dec(adap, idx)) - ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok); - - return ret; -} - -int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, - const u8 *addr, const u8 *mask, - unsigned int vni, unsigned int vni_mask, - u8 dip_hit, u8 lookup_type, bool sleep_ok) -{ - int ret; - - ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask, - dip_hit, lookup_type, sleep_ok); - if (ret < 0) - return ret; - - if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) { - ret = -ENOMEM; - t4_free_encap_mac_filt(adap, viid, ret, sleep_ok); - } - return ret; -} - int cxgb4_init_mps_ref_entries(struct adapter *adap) { spin_lock_init(&adap->mps_ref_lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index d8cafaa7ddb4..d7713038386c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -518,8 +518,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, unsigned int *mtu_idxp); void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6); -void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, - const unsigned int *pgsz_order); struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, unsigned int skb_len, unsigned int pull_len); int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 1e5f5b1a22a6..c02b4e9c06b2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -608,25 +608,6 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, return e; } -/** - * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters - * @dev: net_device pointer - * @vlan: VLAN Id - * @port: Associated port - * @dmac: Destination MAC address to add to L2T - * Returns pointer to the allocated l2t entry - * - * Allocates an L2T entry for use by switching rule of a filter - */ -struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan, - u8 port, u8 *dmac) -{ - struct adapter *adap = netdev2adap(dev); - - return t4_l2t_alloc_switching(adap, vlan, port, dmac); -} -EXPORT_SYMBOL(cxgb4_l2t_alloc_switching); - struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) { unsigned int l2t_size; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index 340fecb28a13..8aad7e9dee6d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -115,8 +115,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, unsigned int priority); u64 cxgb4_select_ntuple(struct net_device *dev, const struct l2t_entry *l2t); -struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan, - u8 port, u8 *dmac); void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, u8 port, u8 *dmac); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index de52bcb884c4..a7d76a8ed050 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -4874,22 +4874,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, } } -/** - * t4_free_ofld_rxqs - free a block of consecutive Rx queues - * @adap: the adapter - * @n: number of queues - * @q: pointer to first queue - * - * Release the resources of a consecutive block of offload Rx queues. - */ -void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) -{ - for ( ; n; n--, q++) - if (q->rspq.desc) - free_rspq_fl(adap, &q->rspq, - q->fl.size ? &q->fl : NULL); -} - void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) { if (txq->q.desc) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c index 9a54302bb046..a77d6ac1ee8c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/srq.c +++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c @@ -51,64 +51,6 @@ struct srq_data *t4_init_srq(int srq_size) return s; } -/* cxgb4_get_srq_entry: read the SRQ table entry - * @dev: Pointer to the net_device - * @idx: Index to the srq - * @entryp: pointer to the srq entry - * - * Sends CPL_SRQ_TABLE_REQ message for the given index. - * Contents will be returned in CPL_SRQ_TABLE_RPL message. - * - * Returns zero if the read is successful, else a error - * number will be returned. Caller should not use the srq - * entry if the return value is non-zero. - * - * - */ -int cxgb4_get_srq_entry(struct net_device *dev, - int srq_idx, struct srq_entry *entryp) -{ - struct cpl_srq_table_req *req; - struct adapter *adap; - struct sk_buff *skb; - struct srq_data *s; - int rc = -ENODEV; - - adap = netdev2adap(dev); - s = adap->srq; - - if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s) - goto out; - - skb = alloc_skb(sizeof(*req), GFP_KERNEL); - if (!skb) - return -ENOMEM; - req = (struct cpl_srq_table_req *) - __skb_put_zero(skb, sizeof(*req)); - INIT_TP_WR(req, 0); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ, - TID_TID_V(srq_idx) | - TID_QID_V(adap->sge.fw_evtq.abs_id))); - req->idx = srq_idx; - - mutex_lock(&s->lock); - - s->entryp = entryp; - t4_mgmt_tx(adap, skb); - - rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO); - if (rc) - rc = 0; - else /* !rc means we timed out */ - rc = -ETIMEDOUT; - - WARN_ON_ONCE(entryp->idx != srq_idx); - mutex_unlock(&s->lock); -out: - return rc; -} -EXPORT_SYMBOL(cxgb4_get_srq_entry); - void do_srq_table_rpl(struct adapter *adap, const struct cpl_srq_table_rpl *rpl) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.h b/drivers/net/ethernet/chelsio/cxgb4/srq.h index ec85cf93865a..d9f04bd5ffa3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/srq.h +++ b/drivers/net/ethernet/chelsio/cxgb4/srq.h @@ -58,8 +58,6 @@ struct srq_data { }; struct srq_data *t4_init_srq(int srq_size); -int cxgb4_get_srq_entry(struct net_device *dev, - int srq_idx, struct srq_entry *entryp); void do_srq_table_rpl(struct adapter *adap, const struct cpl_srq_table_rpl *rpl); #endif /* __CXGB4_SRQ_H */ diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h index 7ff82b6778ba..21e0dfeff158 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h @@ -573,7 +573,6 @@ int send_tx_flowc_wr(struct sock *sk, int compl, u32 snd_nxt, u32 rcv_nxt); void chtls_tcp_push(struct sock *sk, int flags); int chtls_push_frames(struct chtls_sock *csk, int comp); -int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val); void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word, u64 mask, u64 val, u8 cookie, int through_l2t); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c index 1e67140b0f80..fab6df21f01c 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c @@ -106,15 +106,6 @@ void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word, send_or_defer(sk, tcp_sk(sk), skb, through_l2t); } -/* - * Set one of the t_flags bits in the TCB. - */ -int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val) -{ - return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos, - (u64)val << bit_pos); -} - static int chtls_set_tcb_keyid(struct sock *sk, int keyid) { return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c index 455a54708be4..96fd31d75dfd 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c @@ -342,8 +342,8 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, { struct sk_buff *skb; - /* Allocate space for cpl_pass_accpet_req which will be synthesized by - * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go + /* Allocate space for cpl_pass_accept_req which will be synthesized by + * driver. Once driver synthesizes cpl_pass_accept_req the skb will go * through the regular cpl_pass_accept_req processing in TOM. */ skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 0a21a10a791c..fa5857923db4 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1903,7 +1903,7 @@ static struct platform_driver cs89x0_driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(cs89x0_match), }, - .remove_new = cs89x0_platform_remove, + .remove = cs89x0_platform_remove, }; module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index c2007cd86416..a4972457edd9 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -862,7 +862,7 @@ MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids); static struct platform_driver ep93xx_eth_driver = { .probe = ep93xx_eth_probe, - .remove_new = ep93xx_eth_remove, + .remove = ep93xx_eth_remove, .driver = { .name = "ep93xx-eth", .of_match_table = ep93xx_eth_of_ids, diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 84b300fee2bb..6723df9b65d9 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -568,7 +568,7 @@ static void mac89x0_device_remove(struct platform_device *pdev) static struct platform_driver mac89x0_platform_driver = { .probe = mac89x0_device_probe, - .remove_new = mac89x0_device_remove, + .remove = mac89x0_device_remove, .driver = { .name = "mac89x0", }, diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 73e1c71c5092..991e3839858b 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2573,7 +2573,7 @@ static struct platform_driver gemini_ethernet_port_driver = { .of_match_table = gemini_ethernet_port_of_match, }, .probe = gemini_ethernet_port_probe, - .remove_new = gemini_ethernet_port_remove, + .remove = gemini_ethernet_port_remove, }; static int gemini_ethernet_probe(struct platform_device *pdev) @@ -2637,7 +2637,7 @@ static struct platform_driver gemini_ethernet_driver = { .of_match_table = gemini_ethernet_of_match, }, .probe = gemini_ethernet_probe, - .remove_new = gemini_ethernet_remove, + .remove = gemini_ethernet_remove, }; static int __init gemini_ethernet_module_init(void) diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 150cc94ae9f8..8735e333034c 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1799,7 +1799,7 @@ static struct platform_driver dm9000_driver = { .of_match_table = of_match_ptr(dm9000_of_matches), }, .probe = dm9000_probe, - .remove_new = dm9000_drv_remove, + .remove = dm9000_drv_remove, }; module_platform_driver(dm9000_driver); diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig index 0d77f84c8e7b..e9e13654812c 100644 --- a/drivers/net/ethernet/dlink/Kconfig +++ b/drivers/net/ethernet/dlink/Kconfig @@ -32,24 +32,4 @@ config DL2K To compile this driver as a module, choose M here: the module will be called dl2k. -config SUNDANCE - tristate "Sundance Alta support" - depends on PCI - select CRC32 - select MII - help - This driver is for the Sundance "Alta" chip. - More specific information and updates are available from - <http://www.scyld.com/network/sundance.html>. - -config SUNDANCE_MMIO - bool "Use MMIO instead of PIO" - depends on SUNDANCE - help - Enable memory-mapped I/O for interaction with Sundance NIC registers. - Do NOT enable this by default, PIO (enabled when MMIO is disabled) - is known to solve bugs on certain chips. - - If unsure, say N. - endif # NET_VENDOR_DLINK diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile index 3ff503c747db..38c236eb6007 100644 --- a/drivers/net/ethernet/dlink/Makefile +++ b/drivers/net/ethernet/dlink/Makefile @@ -4,4 +4,3 @@ # obj-$(CONFIG_DL2K) += dl2k.o -obj-$(CONFIG_SUNDANCE) += sundance.o diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c deleted file mode 100644 index 8af5ecec7d61..000000000000 --- a/drivers/net/ethernet/dlink/sundance.c +++ /dev/null @@ -1,1985 +0,0 @@ -/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */ -/* - Written 1999-2000 by Donald Becker. - - This software may be used and distributed according to the terms of - the GNU General Public License (GPL), incorporated herein by reference. - Drivers based on or derived from this code fall under the GPL and must - retain the authorship, copyright and license notice. This file is not - a complete program and may only be used when the entire operating - system is licensed under the GPL. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Support and updates available at - http://www.scyld.com/network/sundance.html - [link no longer provides useful info -jgarzik] - Archives of the mailing list are still available at - https://www.beowulf.org/pipermail/netdrivers/ - -*/ - -#define DRV_NAME "sundance" - -/* The user-configurable values. - These may be modified when a driver module is loaded.*/ -static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ -/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). - Typical is a 64 element hash table based on the Ethernet CRC. */ -static const int multicast_filter_limit = 32; - -/* Set the copy breakpoint for the copy-only-tiny-frames scheme. - Setting to > 1518 effectively disables this feature. - This chip can receive into offset buffers, so the Alpha does not - need a copy-align. */ -static int rx_copybreak; -static int flowctrl=1; - -/* media[] specifies the media type the NIC operates at. - autosense Autosensing active media. - 10mbps_hd 10Mbps half duplex. - 10mbps_fd 10Mbps full duplex. - 100mbps_hd 100Mbps half duplex. - 100mbps_fd 100Mbps full duplex. - 0 Autosensing active media. - 1 10Mbps half duplex. - 2 10Mbps full duplex. - 3 100Mbps half duplex. - 4 100Mbps full duplex. -*/ -#define MAX_UNITS 8 -static char *media[MAX_UNITS]; - - -/* Operational parameters that are set at compile time. */ - -/* Keep the ring sizes a power of two for compile efficiency. - The compiler will convert <unsigned>'%'<2^N> into a bit mask. - Making the Tx ring too large decreases the effectiveness of channel - bonding and packet priority, and more than 128 requires modifying the - Tx error recovery. - Large receive rings merely waste memory. */ -#define TX_RING_SIZE 32 -#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ -#define RX_RING_SIZE 64 -#define RX_BUDGET 32 -#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) -#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) - -/* Operational parameters that usually are not changed. */ -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (4*HZ) -#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ - -/* Include files, designed to support most kernel versions 2.0.0 and later. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/timer.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/init.h> -#include <linux/bitops.h> -#include <linux/uaccess.h> -#include <asm/processor.h> /* Processor type for cache alignment. */ -#include <asm/io.h> -#include <linux/delay.h> -#include <linux/spinlock.h> -#include <linux/dma-mapping.h> -#include <linux/crc32.h> -#include <linux/ethtool.h> -#include <linux/mii.h> - -MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); -MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); -MODULE_LICENSE("GPL"); - -module_param(debug, int, 0); -module_param(rx_copybreak, int, 0); -module_param_array(media, charp, NULL, 0); -module_param(flowctrl, int, 0); -MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)"); -MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames"); -MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]"); - -/* - Theory of Operation - -I. Board Compatibility - -This driver is designed for the Sundance Technologies "Alta" ST201 chip. - -II. Board-specific settings - -III. Driver operation - -IIIa. Ring buffers - -This driver uses two statically allocated fixed-size descriptor lists -formed into rings by a branch from the final descriptor to the beginning of -the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. -Some chips explicitly use only 2^N sized rings, while others use a -'next descriptor' pointer that the driver forms into rings. - -IIIb/c. Transmit/Receive Structure - -This driver uses a zero-copy receive and transmit scheme. -The driver allocates full frame size skbuffs for the Rx ring buffers at -open() time and passes the skb->data field to the chip as receive data -buffers. When an incoming frame is less than RX_COPYBREAK bytes long, -a fresh skbuff is allocated and the frame is copied to the new skbuff. -When the incoming frame is larger, the skbuff is passed directly up the -protocol stack. Buffers consumed this way are replaced by newly allocated -skbuffs in a later phase of receives. - -The RX_COPYBREAK value is chosen to trade-off the memory wasted by -using a full-sized skbuff for small frames vs. the copying costs of larger -frames. New boards are typically used in generously configured machines -and the underfilled buffers have negligible impact compared to the benefit of -a single allocation size, so the default value of zero results in never -copying packets. When copying is done, the cost is usually mitigated by using -a combined copy/checksum routine. Copying also preloads the cache, which is -most useful with small frames. - -A subtle aspect of the operation is that the IP header at offset 14 in an -ethernet frame isn't longword aligned for further processing. -Unaligned buffers are permitted by the Sundance hardware, so -frames are received into the skbuff at an offset of "+2", 16-byte aligning -the IP header. - -IIId. Synchronization - -The driver runs as two independent, single-threaded flows of control. One -is the send-packet routine, which enforces single-threaded use by the -dev->tbusy flag. The other thread is the interrupt handler, which is single -threaded by the hardware and interrupt handling software. - -The send packet thread has partial control over the Tx ring and 'dev->tbusy' -flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next -queue slot is empty, it clears the tbusy flag when finished otherwise it sets -the 'lp->tx_full' flag. - -The interrupt handler has exclusive control over the Rx ring and records stats -from the Tx ring. After reaping the stats, it marks the Tx queue entry as -empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it -clears both the tx_full and tbusy flags. - -IV. Notes - -IVb. References - -The Sundance ST201 datasheet, preliminary version. -The Kendin KS8723 datasheet, preliminary version. -The ICplus IP100 datasheet, preliminary version. -http://www.scyld.com/expert/100mbps.html -http://www.scyld.com/expert/NWay.html - -IVc. Errata - -*/ - -/* Work-around for Kendin chip bugs. */ -#ifndef CONFIG_SUNDANCE_MMIO -#define USE_IO_OPS 1 -#endif - -static const struct pci_device_id sundance_pci_tbl[] = { - { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, - { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, - { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, - { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, - { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, - { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, - { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, - { } -}; -MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); - -enum { - netdev_io_size = 128 -}; - -struct pci_id_info { - const char *name; -}; -static const struct pci_id_info pci_id_tbl[] = { - {"D-Link DFE-550TX FAST Ethernet Adapter"}, - {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, - {"D-Link DFE-580TX 4 port Server Adapter"}, - {"D-Link DFE-530TXS FAST Ethernet Adapter"}, - {"D-Link DL10050-based FAST Ethernet Adapter"}, - {"Sundance Technology Alta"}, - {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, - { } /* terminate list. */ -}; - -/* This driver was written to use PCI memory space, however x86-oriented - hardware often uses I/O space accesses. */ - -/* Offsets to the device registers. - Unlike software-only systems, device drivers interact with complex hardware. - It's not useful to define symbolic names for every register bit in the - device. The name can only partially document the semantics and make - the driver longer and more difficult to read. - In general, only the important configuration values or bits changed - multiple times should be defined symbolically. -*/ -enum alta_offsets { - DMACtrl = 0x00, - TxListPtr = 0x04, - TxDMABurstThresh = 0x08, - TxDMAUrgentThresh = 0x09, - TxDMAPollPeriod = 0x0a, - RxDMAStatus = 0x0c, - RxListPtr = 0x10, - DebugCtrl0 = 0x1a, - DebugCtrl1 = 0x1c, - RxDMABurstThresh = 0x14, - RxDMAUrgentThresh = 0x15, - RxDMAPollPeriod = 0x16, - LEDCtrl = 0x1a, - ASICCtrl = 0x30, - EEData = 0x34, - EECtrl = 0x36, - FlashAddr = 0x40, - FlashData = 0x44, - WakeEvent = 0x45, - TxStatus = 0x46, - TxFrameId = 0x47, - DownCounter = 0x18, - IntrClear = 0x4a, - IntrEnable = 0x4c, - IntrStatus = 0x4e, - MACCtrl0 = 0x50, - MACCtrl1 = 0x52, - StationAddr = 0x54, - MaxFrameSize = 0x5A, - RxMode = 0x5c, - MIICtrl = 0x5e, - MulticastFilter0 = 0x60, - MulticastFilter1 = 0x64, - RxOctetsLow = 0x68, - RxOctetsHigh = 0x6a, - TxOctetsLow = 0x6c, - TxOctetsHigh = 0x6e, - TxFramesOK = 0x70, - RxFramesOK = 0x72, - StatsCarrierError = 0x74, - StatsLateColl = 0x75, - StatsMultiColl = 0x76, - StatsOneColl = 0x77, - StatsTxDefer = 0x78, - RxMissed = 0x79, - StatsTxXSDefer = 0x7a, - StatsTxAbort = 0x7b, - StatsBcastTx = 0x7c, - StatsBcastRx = 0x7d, - StatsMcastTx = 0x7e, - StatsMcastRx = 0x7f, - /* Aliased and bogus values! */ - RxStatus = 0x0c, -}; - -#define ASIC_HI_WORD(x) ((x) + 2) - -enum ASICCtrl_HiWord_bit { - GlobalReset = 0x0001, - RxReset = 0x0002, - TxReset = 0x0004, - DMAReset = 0x0008, - FIFOReset = 0x0010, - NetworkReset = 0x0020, - HostReset = 0x0040, - ResetBusy = 0x0400, -}; - -/* Bits in the interrupt status/mask registers. */ -enum intr_status_bits { - IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008, - IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020, - IntrDrvRqst=0x0040, - StatsMax=0x0080, LinkChange=0x0100, - IntrTxDMADone=0x0200, IntrRxDMADone=0x0400, -}; - -/* Bits in the RxMode register. */ -enum rx_mode_bits { - AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08, - AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01, -}; -/* Bits in MACCtrl. */ -enum mac_ctrl0_bits { - EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40, - EnbFlowCtrl=0x100, EnbPassRxCRC=0x200, -}; -enum mac_ctrl1_bits { - StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080, - TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400, - RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, -}; - -/* Bits in WakeEvent register. */ -enum wake_event_bits { - WakePktEnable = 0x01, - MagicPktEnable = 0x02, - LinkEventEnable = 0x04, - WolEnable = 0x80, -}; - -/* The Rx and Tx buffer descriptors. */ -/* Note that using only 32 bit fields simplifies conversion to big-endian - architectures. */ -struct netdev_desc { - __le32 next_desc; - __le32 status; - struct desc_frag { __le32 addr, length; } frag; -}; - -/* Bits in netdev_desc.status */ -enum desc_status_bits { - DescOwn=0x8000, - DescEndPacket=0x4000, - DescEndRing=0x2000, - LastFrag=0x80000000, - DescIntrOnTx=0x8000, - DescIntrOnDMADone=0x80000000, - DisableAlign = 0x00000001, -}; - -#define PRIV_ALIGN 15 /* Required alignment mask */ -/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment - within the structure. */ -#define MII_CNT 4 -struct netdev_private { - /* Descriptor rings first for alignment. */ - struct netdev_desc *rx_ring; - struct netdev_desc *tx_ring; - struct sk_buff* rx_skbuff[RX_RING_SIZE]; - struct sk_buff* tx_skbuff[TX_RING_SIZE]; - dma_addr_t tx_ring_dma; - dma_addr_t rx_ring_dma; - struct timer_list timer; /* Media monitoring timer. */ - struct net_device *ndev; /* backpointer */ - /* ethtool extra stats */ - struct { - u64 tx_multiple_collisions; - u64 tx_single_collisions; - u64 tx_late_collisions; - u64 tx_deferred; - u64 tx_deferred_excessive; - u64 tx_aborted; - u64 tx_bcasts; - u64 rx_bcasts; - u64 tx_mcasts; - u64 rx_mcasts; - } xstats; - /* Frequently used values: keep some adjacent for cache effect. */ - spinlock_t lock; - int msg_enable; - int chip_id; - unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ - unsigned int rx_buf_sz; /* Based on MTU+slack. */ - struct netdev_desc *last_tx; /* Last Tx descriptor used. */ - unsigned int cur_tx, dirty_tx; - /* These values are keep track of the transceiver/media in use. */ - unsigned int flowctrl:1; - unsigned int default_port:4; /* Last dev->if_port value. */ - unsigned int an_enable:1; - unsigned int speed; - unsigned int wol_enabled:1; /* Wake on LAN enabled */ - struct tasklet_struct rx_tasklet; - struct tasklet_struct tx_tasklet; - int budget; - int cur_task; - /* Multicast and receive mode. */ - spinlock_t mcastlock; /* SMP lock multicast updates. */ - u16 mcast_filter[4]; - /* MII transceiver section. */ - struct mii_if_info mii_if; - int mii_preamble_required; - unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ - struct pci_dev *pci_dev; - void __iomem *base; - spinlock_t statlock; -}; - -/* The station address location in the EEPROM. */ -#define EEPROM_SA_OFFSET 0x10 -#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \ - IntrDrvRqst | IntrTxDone | StatsMax | \ - LinkChange) - -static int change_mtu(struct net_device *dev, int new_mtu); -static int eeprom_read(void __iomem *ioaddr, int location); -static int mdio_read(struct net_device *dev, int phy_id, int location); -static void mdio_write(struct net_device *dev, int phy_id, int location, int value); -static int mdio_wait_link(struct net_device *dev, int wait); -static int netdev_open(struct net_device *dev); -static void check_duplex(struct net_device *dev); -static void netdev_timer(struct timer_list *t); -static void tx_timeout(struct net_device *dev, unsigned int txqueue); -static void init_ring(struct net_device *dev); -static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); -static int reset_tx (struct net_device *dev); -static irqreturn_t intr_handler(int irq, void *dev_instance); -static void rx_poll(struct tasklet_struct *t); -static void tx_poll(struct tasklet_struct *t); -static void refill_rx (struct net_device *dev); -static void netdev_error(struct net_device *dev, int intr_status); -static void netdev_error(struct net_device *dev, int intr_status); -static void set_rx_mode(struct net_device *dev); -static int __set_mac_addr(struct net_device *dev); -static int sundance_set_mac_addr(struct net_device *dev, void *data); -static struct net_device_stats *get_stats(struct net_device *dev); -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static int netdev_close(struct net_device *dev); -static const struct ethtool_ops ethtool_ops; - -static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base + ASICCtrl; - int countdown; - - /* ST201 documentation states ASICCtrl is a 32bit register */ - iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr); - /* ST201 documentation states reset can take up to 1 ms */ - countdown = 10 + 1; - while (ioread32 (ioaddr) & (ResetBusy << 16)) { - if (--countdown == 0) { - printk(KERN_WARNING "%s : reset not completed !!\n", dev->name); - break; - } - udelay(100); - } -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void sundance_poll_controller(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - - disable_irq(np->pci_dev->irq); - intr_handler(np->pci_dev->irq, dev); - enable_irq(np->pci_dev->irq); -} -#endif - -static const struct net_device_ops netdev_ops = { - .ndo_open = netdev_open, - .ndo_stop = netdev_close, - .ndo_start_xmit = start_tx, - .ndo_get_stats = get_stats, - .ndo_set_rx_mode = set_rx_mode, - .ndo_eth_ioctl = netdev_ioctl, - .ndo_tx_timeout = tx_timeout, - .ndo_change_mtu = change_mtu, - .ndo_set_mac_address = sundance_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = sundance_poll_controller, -#endif -}; - -static int sundance_probe1(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev; - struct netdev_private *np; - static int card_idx; - int chip_idx = ent->driver_data; - int irq; - int i; - void __iomem *ioaddr; - u16 mii_ctl; - void *ring_space; - dma_addr_t ring_dma; -#ifdef USE_IO_OPS - int bar = 0; -#else - int bar = 1; -#endif - int phy, phy_end, phy_idx = 0; - __le16 addr[ETH_ALEN / 2]; - - if (pci_enable_device(pdev)) - return -EIO; - pci_set_master(pdev); - - irq = pdev->irq; - - dev = alloc_etherdev(sizeof(*np)); - if (!dev) - return -ENOMEM; - SET_NETDEV_DEV(dev, &pdev->dev); - - if (pci_request_regions(pdev, DRV_NAME)) - goto err_out_netdev; - - ioaddr = pci_iomap(pdev, bar, netdev_io_size); - if (!ioaddr) - goto err_out_res; - - for (i = 0; i < 3; i++) - addr[i] = - cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); - eth_hw_addr_set(dev, (u8 *)addr); - - np = netdev_priv(dev); - np->ndev = dev; - np->base = ioaddr; - np->pci_dev = pdev; - np->chip_id = chip_idx; - np->msg_enable = (1 << debug) - 1; - spin_lock_init(&np->lock); - spin_lock_init(&np->statlock); - tasklet_setup(&np->rx_tasklet, rx_poll); - tasklet_setup(&np->tx_tasklet, tx_poll); - - ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, - &ring_dma, GFP_KERNEL); - if (!ring_space) - goto err_out_cleardev; - np->tx_ring = (struct netdev_desc *)ring_space; - np->tx_ring_dma = ring_dma; - - ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, - &ring_dma, GFP_KERNEL); - if (!ring_space) - goto err_out_unmap_tx; - np->rx_ring = (struct netdev_desc *)ring_space; - np->rx_ring_dma = ring_dma; - - np->mii_if.dev = dev; - np->mii_if.mdio_read = mdio_read; - np->mii_if.mdio_write = mdio_write; - np->mii_if.phy_id_mask = 0x1f; - np->mii_if.reg_num_mask = 0x1f; - - /* The chip-specific entries in the device structure. */ - dev->netdev_ops = &netdev_ops; - dev->ethtool_ops = ðtool_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - /* MTU range: 68 - 8191 */ - dev->min_mtu = ETH_MIN_MTU; - dev->max_mtu = 8191; - - pci_set_drvdata(pdev, dev); - - i = register_netdev(dev); - if (i) - goto err_out_unmap_rx; - - printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", - dev->name, pci_id_tbl[chip_idx].name, ioaddr, - dev->dev_addr, irq); - - np->phys[0] = 1; /* Default setting */ - np->mii_preamble_required++; - - /* - * It seems some phys doesn't deal well with address 0 being accessed - * first - */ - if (sundance_pci_tbl[np->chip_id].device == 0x0200) { - phy = 0; - phy_end = 31; - } else { - phy = 1; - phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ - } - for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { - int phyx = phy & 0x1f; - int mii_status = mdio_read(dev, phyx, MII_BMSR); - if (mii_status != 0xffff && mii_status != 0x0000) { - np->phys[phy_idx++] = phyx; - np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); - if ((mii_status & 0x0040) == 0) - np->mii_preamble_required++; - printk(KERN_INFO "%s: MII PHY found at address %d, status " - "0x%4.4x advertising %4.4x.\n", - dev->name, phyx, mii_status, np->mii_if.advertising); - } - } - np->mii_preamble_required--; - - if (phy_idx == 0) { - printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", - dev->name, ioread32(ioaddr + ASICCtrl)); - goto err_out_unregister; - } - - np->mii_if.phy_id = np->phys[0]; - - /* Parse override configuration */ - np->an_enable = 1; - if (card_idx < MAX_UNITS) { - if (media[card_idx] != NULL) { - np->an_enable = 0; - if (strcmp (media[card_idx], "100mbps_fd") == 0 || - strcmp (media[card_idx], "4") == 0) { - np->speed = 100; - np->mii_if.full_duplex = 1; - } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || - strcmp (media[card_idx], "3") == 0) { - np->speed = 100; - np->mii_if.full_duplex = 0; - } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || - strcmp (media[card_idx], "2") == 0) { - np->speed = 10; - np->mii_if.full_duplex = 1; - } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || - strcmp (media[card_idx], "1") == 0) { - np->speed = 10; - np->mii_if.full_duplex = 0; - } else { - np->an_enable = 1; - } - } - if (flowctrl == 1) - np->flowctrl = 1; - } - - /* Fibre PHY? */ - if (ioread32 (ioaddr + ASICCtrl) & 0x80) { - /* Default 100Mbps Full */ - if (np->an_enable) { - np->speed = 100; - np->mii_if.full_duplex = 1; - np->an_enable = 0; - } - } - /* Reset PHY */ - mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); - mdelay (300); - /* If flow control enabled, we need to advertise it.*/ - if (np->flowctrl) - mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); - mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); - /* Force media type */ - if (!np->an_enable) { - mii_ctl = 0; - mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; - mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; - mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); - printk (KERN_INFO "Override speed=%d, %s duplex\n", - np->speed, np->mii_if.full_duplex ? "Full" : "Half"); - - } - - /* Perhaps move the reset here? */ - /* Reset the chip to erase previous misconfiguration. */ - if (netif_msg_hw(np)) - printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); - sundance_reset(dev, 0x00ff << 16); - if (netif_msg_hw(np)) - printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); - - card_idx++; - return 0; - -err_out_unregister: - unregister_netdev(dev); -err_out_unmap_rx: - dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, - np->rx_ring, np->rx_ring_dma); -err_out_unmap_tx: - dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, - np->tx_ring, np->tx_ring_dma); -err_out_cleardev: - pci_iounmap(pdev, ioaddr); -err_out_res: - pci_release_regions(pdev); -err_out_netdev: - free_netdev (dev); - return -ENODEV; -} - -static int change_mtu(struct net_device *dev, int new_mtu) -{ - if (netif_running(dev)) - return -EBUSY; - WRITE_ONCE(dev->mtu, new_mtu); - return 0; -} - -#define eeprom_delay(ee_addr) ioread32(ee_addr) -/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ -static int eeprom_read(void __iomem *ioaddr, int location) -{ - int boguscnt = 10000; /* Typical 1900 ticks. */ - iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); - do { - eeprom_delay(ioaddr + EECtrl); - if (! (ioread16(ioaddr + EECtrl) & 0x8000)) { - return ioread16(ioaddr + EEData); - } - } while (--boguscnt > 0); - return 0; -} - -/* MII transceiver control section. - Read and write the MII registers using software-generated serial - MDIO protocol. See the MII specifications or DP83840A data sheet - for details. - - The maximum data clock rate is 2.5 Mhz. The minimum timing is usually - met by back-to-back 33Mhz PCI cycles. */ -#define mdio_delay() ioread8(mdio_addr) - -enum mii_reg_bits { - MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004, -}; -#define MDIO_EnbIn (0) -#define MDIO_WRITE0 (MDIO_EnbOutput) -#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput) - -/* Generate the preamble required for initial synchronization and - a few older transceivers. */ -static void mdio_sync(void __iomem *mdio_addr) -{ - int bits = 32; - - /* Establish sync by sending at least 32 logic ones. */ - while (--bits >= 0) { - iowrite8(MDIO_WRITE1, mdio_addr); - mdio_delay(); - iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); - mdio_delay(); - } -} - -static int mdio_read(struct net_device *dev, int phy_id, int location) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *mdio_addr = np->base + MIICtrl; - int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; - int i, retval = 0; - - if (np->mii_preamble_required) - mdio_sync(mdio_addr); - - /* Shift the read command bits out. */ - for (i = 15; i >= 0; i--) { - int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; - - iowrite8(dataval, mdio_addr); - mdio_delay(); - iowrite8(dataval | MDIO_ShiftClk, mdio_addr); - mdio_delay(); - } - /* Read the two transition, 16 data, and wire-idle bits. */ - for (i = 19; i > 0; i--) { - iowrite8(MDIO_EnbIn, mdio_addr); - mdio_delay(); - retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0); - iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); - mdio_delay(); - } - return (retval>>1) & 0xffff; -} - -static void mdio_write(struct net_device *dev, int phy_id, int location, int value) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *mdio_addr = np->base + MIICtrl; - int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; - int i; - - if (np->mii_preamble_required) - mdio_sync(mdio_addr); - - /* Shift the command bits out. */ - for (i = 31; i >= 0; i--) { - int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; - - iowrite8(dataval, mdio_addr); - mdio_delay(); - iowrite8(dataval | MDIO_ShiftClk, mdio_addr); - mdio_delay(); - } - /* Clear out extra bits. */ - for (i = 2; i > 0; i--) { - iowrite8(MDIO_EnbIn, mdio_addr); - mdio_delay(); - iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); - mdio_delay(); - } -} - -static int mdio_wait_link(struct net_device *dev, int wait) -{ - int bmsr; - int phy_id; - struct netdev_private *np; - - np = netdev_priv(dev); - phy_id = np->phys[0]; - - do { - bmsr = mdio_read(dev, phy_id, MII_BMSR); - if (bmsr & 0x0004) - return 0; - mdelay(1); - } while (--wait > 0); - return -1; -} - -static int netdev_open(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - const int irq = np->pci_dev->irq; - unsigned long flags; - int i; - - sundance_reset(dev, 0x00ff << 16); - - i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); - if (i) - return i; - - if (netif_msg_ifup(np)) - printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq); - - init_ring(dev); - - iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); - /* The Tx list pointer is written as packets are queued. */ - - /* Initialize other registers. */ - __set_mac_addr(dev); -#if IS_ENABLED(CONFIG_VLAN_8021Q) - iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); -#else - iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); -#endif - if (dev->mtu > 2047) - iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl); - - /* Configure the PCI bus bursts and FIFO thresholds. */ - - if (dev->if_port == 0) - dev->if_port = np->default_port; - - spin_lock_init(&np->mcastlock); - - set_rx_mode(dev); - iowrite16(0, ioaddr + IntrEnable); - iowrite16(0, ioaddr + DownCounter); - /* Set the chip to poll every N*320nsec. */ - iowrite8(100, ioaddr + RxDMAPollPeriod); - iowrite8(127, ioaddr + TxDMAPollPeriod); - /* Fix DFE-580TX packet drop issue */ - if (np->pci_dev->revision >= 0x14) - iowrite8(0x01, ioaddr + DebugCtrl1); - netif_start_queue(dev); - - spin_lock_irqsave(&np->lock, flags); - reset_tx(dev); - spin_unlock_irqrestore(&np->lock, flags); - - iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); - - /* Disable Wol */ - iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent); - np->wol_enabled = 0; - - if (netif_msg_ifup(np)) - printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " - "MAC Control %x, %4.4x %4.4x.\n", - dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), - ioread32(ioaddr + MACCtrl0), - ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); - - /* Set the timer to check for link beat. */ - timer_setup(&np->timer, netdev_timer, 0); - np->timer.expires = jiffies + 3*HZ; - add_timer(&np->timer); - - /* Enable interrupts by setting the interrupt mask. */ - iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); - - return 0; -} - -static void check_duplex(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); - int negotiated = mii_lpa & np->mii_if.advertising; - int duplex; - - /* Force media */ - if (!np->an_enable || mii_lpa == 0xffff) { - if (np->mii_if.full_duplex) - iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex, - ioaddr + MACCtrl0); - return; - } - - /* Autonegotiation */ - duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; - if (np->mii_if.full_duplex != duplex) { - np->mii_if.full_duplex = duplex; - if (netif_msg_link(np)) - printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " - "negotiated capability %4.4x.\n", dev->name, - duplex ? "full" : "half", np->phys[0], negotiated); - iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0); - } -} - -static void netdev_timer(struct timer_list *t) -{ - struct netdev_private *np = from_timer(np, t, timer); - struct net_device *dev = np->mii_if.dev; - void __iomem *ioaddr = np->base; - int next_tick = 10*HZ; - - if (netif_msg_timer(np)) { - printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " - "Tx %x Rx %x.\n", - dev->name, ioread16(ioaddr + IntrEnable), - ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); - } - check_duplex(dev); - np->timer.expires = jiffies + next_tick; - add_timer(&np->timer); -} - -static void tx_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - unsigned long flag; - - netif_stop_queue(dev); - tasklet_disable_in_atomic(&np->tx_tasklet); - iowrite16(0, ioaddr + IntrEnable); - printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " - "TxFrameId %2.2x," - " resetting...\n", dev->name, ioread8(ioaddr + TxStatus), - ioread8(ioaddr + TxFrameId)); - - { - int i; - for (i=0; i<TX_RING_SIZE; i++) { - printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i, - (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), - le32_to_cpu(np->tx_ring[i].next_desc), - le32_to_cpu(np->tx_ring[i].status), - (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, - le32_to_cpu(np->tx_ring[i].frag.addr), - le32_to_cpu(np->tx_ring[i].frag.length)); - } - printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", - ioread32(np->base + TxListPtr), - netif_queue_stopped(dev)); - printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", - np->cur_tx, np->cur_tx % TX_RING_SIZE, - np->dirty_tx, np->dirty_tx % TX_RING_SIZE); - printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); - printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); - } - spin_lock_irqsave(&np->lock, flag); - - /* Stop and restart the chip's Tx processes . */ - reset_tx(dev); - spin_unlock_irqrestore(&np->lock, flag); - - dev->if_port = 0; - - netif_trans_update(dev); /* prevent tx timeout */ - dev->stats.tx_errors++; - if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { - netif_wake_queue(dev); - } - iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); - tasklet_enable(&np->tx_tasklet); -} - - -/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ -static void init_ring(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - int i; - - np->cur_rx = np->cur_tx = 0; - np->dirty_rx = np->dirty_tx = 0; - np->cur_task = 0; - - np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); - - /* Initialize all Rx descriptors. */ - for (i = 0; i < RX_RING_SIZE; i++) { - np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + - ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); - np->rx_ring[i].status = 0; - np->rx_ring[i].frag.length = 0; - np->rx_skbuff[i] = NULL; - } - - /* Fill in the Rx buffers. Handle allocation failure gracefully. */ - for (i = 0; i < RX_RING_SIZE; i++) { - struct sk_buff *skb = - netdev_alloc_skb(dev, np->rx_buf_sz + 2); - np->rx_skbuff[i] = skb; - if (skb == NULL) - break; - skb_reserve(skb, 2); /* 16 byte align the IP header. */ - np->rx_ring[i].frag.addr = cpu_to_le32( - dma_map_single(&np->pci_dev->dev, skb->data, - np->rx_buf_sz, DMA_FROM_DEVICE)); - if (dma_mapping_error(&np->pci_dev->dev, - np->rx_ring[i].frag.addr)) { - dev_kfree_skb(skb); - np->rx_skbuff[i] = NULL; - break; - } - np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); - } - np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); - - for (i = 0; i < TX_RING_SIZE; i++) { - np->tx_skbuff[i] = NULL; - np->tx_ring[i].status = 0; - } -} - -static void tx_poll(struct tasklet_struct *t) -{ - struct netdev_private *np = from_tasklet(np, t, tx_tasklet); - unsigned head = np->cur_task % TX_RING_SIZE; - struct netdev_desc *txdesc = - &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; - - /* Chain the next pointer */ - for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { - int entry = np->cur_task % TX_RING_SIZE; - txdesc = &np->tx_ring[entry]; - if (np->last_tx) { - np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + - entry*sizeof(struct netdev_desc)); - } - np->last_tx = txdesc; - } - /* Indicate the latest descriptor of tx ring */ - txdesc->status |= cpu_to_le32(DescIntrOnTx); - - if (ioread32 (np->base + TxListPtr) == 0) - iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), - np->base + TxListPtr); -} - -static netdev_tx_t -start_tx (struct sk_buff *skb, struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - struct netdev_desc *txdesc; - unsigned entry; - - /* Calculate the next Tx descriptor entry. */ - entry = np->cur_tx % TX_RING_SIZE; - np->tx_skbuff[entry] = skb; - txdesc = &np->tx_ring[entry]; - - txdesc->next_desc = 0; - txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); - txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, - skb->data, skb->len, DMA_TO_DEVICE)); - if (dma_mapping_error(&np->pci_dev->dev, - txdesc->frag.addr)) - goto drop_frame; - txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag); - - /* Increment cur_tx before tasklet_schedule() */ - np->cur_tx++; - mb(); - /* Schedule a tx_poll() task */ - tasklet_schedule(&np->tx_tasklet); - - /* On some architectures: explicitly flush cache lines here. */ - if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && - !netif_queue_stopped(dev)) { - /* do nothing */ - } else { - netif_stop_queue (dev); - } - if (netif_msg_tx_queued(np)) { - printk (KERN_DEBUG - "%s: Transmit frame #%d queued in slot %d.\n", - dev->name, np->cur_tx, entry); - } - return NETDEV_TX_OK; - -drop_frame: - dev_kfree_skb_any(skb); - np->tx_skbuff[entry] = NULL; - dev->stats.tx_dropped++; - return NETDEV_TX_OK; -} - -/* Reset hardware tx and free all of tx buffers */ -static int -reset_tx (struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - struct sk_buff *skb; - int i; - - /* Reset tx logic, TxListPtr will be cleaned */ - iowrite16 (TxDisable, ioaddr + MACCtrl1); - sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); - - /* free all tx skbuff */ - for (i = 0; i < TX_RING_SIZE; i++) { - np->tx_ring[i].next_desc = 0; - - skb = np->tx_skbuff[i]; - if (skb) { - dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[i].frag.addr), - skb->len, DMA_TO_DEVICE); - dev_kfree_skb_any(skb); - np->tx_skbuff[i] = NULL; - dev->stats.tx_dropped++; - } - } - np->cur_tx = np->dirty_tx = 0; - np->cur_task = 0; - - np->last_tx = NULL; - iowrite8(127, ioaddr + TxDMAPollPeriod); - - iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); - return 0; -} - -/* The interrupt handler cleans up after the Tx thread, - and schedule a Rx thread work */ -static irqreturn_t intr_handler(int irq, void *dev_instance) -{ - struct net_device *dev = (struct net_device *)dev_instance; - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - int hw_frame_id; - int tx_cnt; - int tx_status; - int handled = 0; - int i; - - do { - int intr_status = ioread16(ioaddr + IntrStatus); - iowrite16(intr_status, ioaddr + IntrStatus); - - if (netif_msg_intr(np)) - printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", - dev->name, intr_status); - - if (!(intr_status & DEFAULT_INTR)) - break; - - handled = 1; - - if (intr_status & (IntrRxDMADone)) { - iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), - ioaddr + IntrEnable); - if (np->budget < 0) - np->budget = RX_BUDGET; - tasklet_schedule(&np->rx_tasklet); - } - if (intr_status & (IntrTxDone | IntrDrvRqst)) { - tx_status = ioread16 (ioaddr + TxStatus); - for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { - if (netif_msg_tx_done(np)) - printk - ("%s: Transmit status is %2.2x.\n", - dev->name, tx_status); - if (tx_status & 0x1e) { - if (netif_msg_tx_err(np)) - printk("%s: Transmit error status %4.4x.\n", - dev->name, tx_status); - dev->stats.tx_errors++; - if (tx_status & 0x10) - dev->stats.tx_fifo_errors++; - if (tx_status & 0x08) - dev->stats.collisions++; - if (tx_status & 0x04) - dev->stats.tx_fifo_errors++; - if (tx_status & 0x02) - dev->stats.tx_window_errors++; - - /* - ** This reset has been verified on - ** DFE-580TX boards ! phdm@macqel.be. - */ - if (tx_status & 0x10) { /* TxUnderrun */ - /* Restart Tx FIFO and transmitter */ - sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); - /* No need to reset the Tx pointer here */ - } - /* Restart the Tx. Need to make sure tx enabled */ - i = 10; - do { - iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); - if (ioread16(ioaddr + MACCtrl1) & TxEnabled) - break; - mdelay(1); - } while (--i); - } - /* Yup, this is a documentation bug. It cost me *hours*. */ - iowrite16 (0, ioaddr + TxStatus); - if (tx_cnt < 0) { - iowrite32(5000, ioaddr + DownCounter); - break; - } - tx_status = ioread16 (ioaddr + TxStatus); - } - hw_frame_id = (tx_status >> 8) & 0xff; - } else { - hw_frame_id = ioread8(ioaddr + TxFrameId); - } - - if (np->pci_dev->revision >= 0x14) { - spin_lock(&np->lock); - for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { - int entry = np->dirty_tx % TX_RING_SIZE; - struct sk_buff *skb; - int sw_frame_id; - sw_frame_id = (le32_to_cpu( - np->tx_ring[entry].status) >> 2) & 0xff; - if (sw_frame_id == hw_frame_id && - !(le32_to_cpu(np->tx_ring[entry].status) - & 0x00010000)) - break; - if (sw_frame_id == (hw_frame_id + 1) % - TX_RING_SIZE) - break; - skb = np->tx_skbuff[entry]; - /* Free the original skb. */ - dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[entry].frag.addr), - skb->len, DMA_TO_DEVICE); - dev_consume_skb_irq(np->tx_skbuff[entry]); - np->tx_skbuff[entry] = NULL; - np->tx_ring[entry].frag.addr = 0; - np->tx_ring[entry].frag.length = 0; - } - spin_unlock(&np->lock); - } else { - spin_lock(&np->lock); - for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { - int entry = np->dirty_tx % TX_RING_SIZE; - struct sk_buff *skb; - if (!(le32_to_cpu(np->tx_ring[entry].status) - & 0x00010000)) - break; - skb = np->tx_skbuff[entry]; - /* Free the original skb. */ - dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[entry].frag.addr), - skb->len, DMA_TO_DEVICE); - dev_consume_skb_irq(np->tx_skbuff[entry]); - np->tx_skbuff[entry] = NULL; - np->tx_ring[entry].frag.addr = 0; - np->tx_ring[entry].frag.length = 0; - } - spin_unlock(&np->lock); - } - - if (netif_queue_stopped(dev) && - np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { - /* The ring is no longer full, clear busy flag. */ - netif_wake_queue (dev); - } - /* Abnormal error summary/uncommon events handlers. */ - if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) - netdev_error(dev, intr_status); - } while (0); - if (netif_msg_intr(np)) - printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", - dev->name, ioread16(ioaddr + IntrStatus)); - return IRQ_RETVAL(handled); -} - -static void rx_poll(struct tasklet_struct *t) -{ - struct netdev_private *np = from_tasklet(np, t, rx_tasklet); - struct net_device *dev = np->ndev; - int entry = np->cur_rx % RX_RING_SIZE; - int boguscnt = np->budget; - void __iomem *ioaddr = np->base; - int received = 0; - - /* If EOP is set on the next entry, it's a new packet. Send it up. */ - while (1) { - struct netdev_desc *desc = &(np->rx_ring[entry]); - u32 frame_status = le32_to_cpu(desc->status); - int pkt_len; - - if (--boguscnt < 0) { - goto not_done; - } - if (!(frame_status & DescOwn)) - break; - pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ - if (netif_msg_rx_status(np)) - printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", - frame_status); - if (frame_status & 0x001f4000) { - /* There was a error. */ - if (netif_msg_rx_err(np)) - printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", - frame_status); - dev->stats.rx_errors++; - if (frame_status & 0x00100000) - dev->stats.rx_length_errors++; - if (frame_status & 0x00010000) - dev->stats.rx_fifo_errors++; - if (frame_status & 0x00060000) - dev->stats.rx_frame_errors++; - if (frame_status & 0x00080000) - dev->stats.rx_crc_errors++; - if (frame_status & 0x00100000) { - printk(KERN_WARNING "%s: Oversized Ethernet frame," - " status %8.8x.\n", - dev->name, frame_status); - } - } else { - struct sk_buff *skb; -#ifndef final_version - if (netif_msg_rx_status(np)) - printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" - ", bogus_cnt %d.\n", - pkt_len, boguscnt); -#endif - /* Check if the packet is long enough to accept without copying - to a minimally-sized skbuff. */ - if (pkt_len < rx_copybreak && - (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { - skb_reserve(skb, 2); /* 16 byte align the IP header */ - dma_sync_single_for_cpu(&np->pci_dev->dev, - le32_to_cpu(desc->frag.addr), - np->rx_buf_sz, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); - dma_sync_single_for_device(&np->pci_dev->dev, - le32_to_cpu(desc->frag.addr), - np->rx_buf_sz, DMA_FROM_DEVICE); - skb_put(skb, pkt_len); - } else { - dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(desc->frag.addr), - np->rx_buf_sz, DMA_FROM_DEVICE); - skb_put(skb = np->rx_skbuff[entry], pkt_len); - np->rx_skbuff[entry] = NULL; - } - skb->protocol = eth_type_trans(skb, dev); - /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ - netif_rx(skb); - } - entry = (entry + 1) % RX_RING_SIZE; - received++; - } - np->cur_rx = entry; - refill_rx (dev); - np->budget -= received; - iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); - return; - -not_done: - np->cur_rx = entry; - refill_rx (dev); - if (!received) - received = 1; - np->budget -= received; - if (np->budget <= 0) - np->budget = RX_BUDGET; - tasklet_schedule(&np->rx_tasklet); -} - -static void refill_rx (struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - int entry; - - /* Refill the Rx ring buffers. */ - for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; - np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { - struct sk_buff *skb; - entry = np->dirty_rx % RX_RING_SIZE; - if (np->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); - np->rx_skbuff[entry] = skb; - if (skb == NULL) - break; /* Better luck next round. */ - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - np->rx_ring[entry].frag.addr = cpu_to_le32( - dma_map_single(&np->pci_dev->dev, skb->data, - np->rx_buf_sz, DMA_FROM_DEVICE)); - if (dma_mapping_error(&np->pci_dev->dev, - np->rx_ring[entry].frag.addr)) { - dev_kfree_skb_irq(skb); - np->rx_skbuff[entry] = NULL; - break; - } - } - /* Perhaps we need not reset this field. */ - np->rx_ring[entry].frag.length = - cpu_to_le32(np->rx_buf_sz | LastFrag); - np->rx_ring[entry].status = 0; - } -} -static void netdev_error(struct net_device *dev, int intr_status) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - u16 mii_ctl, mii_advertise, mii_lpa; - int speed; - - if (intr_status & LinkChange) { - if (mdio_wait_link(dev, 10) == 0) { - printk(KERN_INFO "%s: Link up\n", dev->name); - if (np->an_enable) { - mii_advertise = mdio_read(dev, np->phys[0], - MII_ADVERTISE); - mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); - mii_advertise &= mii_lpa; - printk(KERN_INFO "%s: Link changed: ", - dev->name); - if (mii_advertise & ADVERTISE_100FULL) { - np->speed = 100; - printk("100Mbps, full duplex\n"); - } else if (mii_advertise & ADVERTISE_100HALF) { - np->speed = 100; - printk("100Mbps, half duplex\n"); - } else if (mii_advertise & ADVERTISE_10FULL) { - np->speed = 10; - printk("10Mbps, full duplex\n"); - } else if (mii_advertise & ADVERTISE_10HALF) { - np->speed = 10; - printk("10Mbps, half duplex\n"); - } else - printk("\n"); - - } else { - mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); - speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; - np->speed = speed; - printk(KERN_INFO "%s: Link changed: %dMbps ,", - dev->name, speed); - printk("%s duplex.\n", - (mii_ctl & BMCR_FULLDPLX) ? - "full" : "half"); - } - check_duplex(dev); - if (np->flowctrl && np->mii_if.full_duplex) { - iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, - ioaddr + MulticastFilter1+2); - iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, - ioaddr + MACCtrl0); - } - netif_carrier_on(dev); - } else { - printk(KERN_INFO "%s: Link down\n", dev->name); - netif_carrier_off(dev); - } - } - if (intr_status & StatsMax) { - get_stats(dev); - } - if (intr_status & IntrPCIErr) { - printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", - dev->name, intr_status); - /* We must do a global reset of DMA to continue. */ - } -} - -static struct net_device_stats *get_stats(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - unsigned long flags; - u8 late_coll, single_coll, mult_coll; - - spin_lock_irqsave(&np->statlock, flags); - /* The chip only need report frame silently dropped. */ - dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); - dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); - dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); - dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); - - mult_coll = ioread8(ioaddr + StatsMultiColl); - np->xstats.tx_multiple_collisions += mult_coll; - single_coll = ioread8(ioaddr + StatsOneColl); - np->xstats.tx_single_collisions += single_coll; - late_coll = ioread8(ioaddr + StatsLateColl); - np->xstats.tx_late_collisions += late_coll; - dev->stats.collisions += mult_coll - + single_coll - + late_coll; - - np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); - np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); - np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); - np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); - np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); - np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); - np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); - - dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); - dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; - dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); - dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; - - spin_unlock_irqrestore(&np->statlock, flags); - - return &dev->stats; -} - -static void set_rx_mode(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - u16 mc_filter[4]; /* Multicast hash filter */ - u32 rx_mode; - int i; - - if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ - memset(mc_filter, 0xff, sizeof(mc_filter)); - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; - } else if ((netdev_mc_count(dev) > multicast_filter_limit) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to match, or accept all multicasts. */ - memset(mc_filter, 0xff, sizeof(mc_filter)); - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; - } else if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - int bit; - int index; - int crc; - memset (mc_filter, 0, sizeof (mc_filter)); - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - for (index=0, bit=0; bit < 6; bit++, crc <<= 1) - if (crc & 0x80000000) index |= 1 << bit; - mc_filter[index/16] |= (1 << (index % 16)); - } - rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys; - } else { - iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); - return; - } - if (np->mii_if.full_duplex && np->flowctrl) - mc_filter[3] |= 0x0200; - - for (i = 0; i < 4; i++) - iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2); - iowrite8(rx_mode, ioaddr + RxMode); -} - -static int __set_mac_addr(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - u16 addr16; - - addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); - iowrite16(addr16, np->base + StationAddr); - addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); - iowrite16(addr16, np->base + StationAddr+2); - addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); - iowrite16(addr16, np->base + StationAddr+4); - return 0; -} - -/* Invoked with rtnl_lock held */ -static int sundance_set_mac_addr(struct net_device *dev, void *data) -{ - const struct sockaddr *addr = data; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - eth_hw_addr_set(dev, addr->sa_data); - __set_mac_addr(dev); - - return 0; -} - -static const struct { - const char name[ETH_GSTRING_LEN]; -} sundance_stats[] = { - { "tx_multiple_collisions" }, - { "tx_single_collisions" }, - { "tx_late_collisions" }, - { "tx_deferred" }, - { "tx_deferred_excessive" }, - { "tx_aborted" }, - { "tx_bcasts" }, - { "rx_bcasts" }, - { "tx_mcasts" }, - { "rx_mcasts" }, -}; - -static int check_if_running(struct net_device *dev) -{ - if (!netif_running(dev)) - return -EINVAL; - return 0; -} - -static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct netdev_private *np = netdev_priv(dev); - strscpy(info->driver, DRV_NAME, sizeof(info->driver)); - strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); -} - -static int get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct netdev_private *np = netdev_priv(dev); - spin_lock_irq(&np->lock); - mii_ethtool_get_link_ksettings(&np->mii_if, cmd); - spin_unlock_irq(&np->lock); - return 0; -} - -static int set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct netdev_private *np = netdev_priv(dev); - int res; - spin_lock_irq(&np->lock); - res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); - spin_unlock_irq(&np->lock); - return res; -} - -static int nway_reset(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - return mii_nway_restart(&np->mii_if); -} - -static u32 get_link(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - return mii_link_ok(&np->mii_if); -} - -static u32 get_msglevel(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - return np->msg_enable; -} - -static void set_msglevel(struct net_device *dev, u32 val) -{ - struct netdev_private *np = netdev_priv(dev); - np->msg_enable = val; -} - -static void get_strings(struct net_device *dev, u32 stringset, - u8 *data) -{ - if (stringset == ETH_SS_STATS) - memcpy(data, sundance_stats, sizeof(sundance_stats)); -} - -static int get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(sundance_stats); - default: - return -EOPNOTSUPP; - } -} - -static void get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct netdev_private *np = netdev_priv(dev); - int i = 0; - - get_stats(dev); - data[i++] = np->xstats.tx_multiple_collisions; - data[i++] = np->xstats.tx_single_collisions; - data[i++] = np->xstats.tx_late_collisions; - data[i++] = np->xstats.tx_deferred; - data[i++] = np->xstats.tx_deferred_excessive; - data[i++] = np->xstats.tx_aborted; - data[i++] = np->xstats.tx_bcasts; - data[i++] = np->xstats.rx_bcasts; - data[i++] = np->xstats.tx_mcasts; - data[i++] = np->xstats.rx_mcasts; -} - -#ifdef CONFIG_PM - -static void sundance_get_wol(struct net_device *dev, - struct ethtool_wolinfo *wol) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - u8 wol_bits; - - wol->wolopts = 0; - - wol->supported = (WAKE_PHY | WAKE_MAGIC); - if (!np->wol_enabled) - return; - - wol_bits = ioread8(ioaddr + WakeEvent); - if (wol_bits & MagicPktEnable) - wol->wolopts |= WAKE_MAGIC; - if (wol_bits & LinkEventEnable) - wol->wolopts |= WAKE_PHY; -} - -static int sundance_set_wol(struct net_device *dev, - struct ethtool_wolinfo *wol) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - u8 wol_bits; - - if (!device_can_wakeup(&np->pci_dev->dev)) - return -EOPNOTSUPP; - - np->wol_enabled = !!(wol->wolopts); - wol_bits = ioread8(ioaddr + WakeEvent); - wol_bits &= ~(WakePktEnable | MagicPktEnable | - LinkEventEnable | WolEnable); - - if (np->wol_enabled) { - if (wol->wolopts & WAKE_MAGIC) - wol_bits |= (MagicPktEnable | WolEnable); - if (wol->wolopts & WAKE_PHY) - wol_bits |= (LinkEventEnable | WolEnable); - } - iowrite8(wol_bits, ioaddr + WakeEvent); - - device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled); - - return 0; -} -#else -#define sundance_get_wol NULL -#define sundance_set_wol NULL -#endif /* CONFIG_PM */ - -static const struct ethtool_ops ethtool_ops = { - .begin = check_if_running, - .get_drvinfo = get_drvinfo, - .nway_reset = nway_reset, - .get_link = get_link, - .get_wol = sundance_get_wol, - .set_wol = sundance_set_wol, - .get_msglevel = get_msglevel, - .set_msglevel = set_msglevel, - .get_strings = get_strings, - .get_sset_count = get_sset_count, - .get_ethtool_stats = get_ethtool_stats, - .get_link_ksettings = get_link_ksettings, - .set_link_ksettings = set_link_ksettings, -}; - -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct netdev_private *np = netdev_priv(dev); - int rc; - - if (!netif_running(dev)) - return -EINVAL; - - spin_lock_irq(&np->lock); - rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); - spin_unlock_irq(&np->lock); - - return rc; -} - -static int netdev_close(struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - struct sk_buff *skb; - int i; - - /* Wait and kill tasklet */ - tasklet_kill(&np->rx_tasklet); - tasklet_kill(&np->tx_tasklet); - np->cur_tx = 0; - np->dirty_tx = 0; - np->cur_task = 0; - np->last_tx = NULL; - - netif_stop_queue(dev); - - if (netif_msg_ifdown(np)) { - printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " - "Rx %4.4x Int %2.2x.\n", - dev->name, ioread8(ioaddr + TxStatus), - ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); - printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", - dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); - } - - /* Disable interrupts by clearing the interrupt mask. */ - iowrite16(0x0000, ioaddr + IntrEnable); - - /* Disable Rx and Tx DMA for safely release resource */ - iowrite32(0x500, ioaddr + DMACtrl); - - /* Stop the chip's Tx and Rx processes. */ - iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); - - for (i = 2000; i > 0; i--) { - if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) - break; - mdelay(1); - } - - iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, - ioaddr + ASIC_HI_WORD(ASICCtrl)); - - for (i = 2000; i > 0; i--) { - if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) - break; - mdelay(1); - } - -#ifdef __i386__ - if (netif_msg_hw(np)) { - printk(KERN_DEBUG " Tx ring at %8.8x:\n", - (int)(np->tx_ring_dma)); - for (i = 0; i < TX_RING_SIZE; i++) - printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", - i, np->tx_ring[i].status, np->tx_ring[i].frag.addr, - np->tx_ring[i].frag.length); - printk(KERN_DEBUG " Rx ring %8.8x:\n", - (int)(np->rx_ring_dma)); - for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { - printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", - i, np->rx_ring[i].status, np->rx_ring[i].frag.addr, - np->rx_ring[i].frag.length); - } - } -#endif /* __i386__ debugging only */ - - free_irq(np->pci_dev->irq, dev); - - del_timer_sync(&np->timer); - - /* Free all the skbuffs in the Rx queue. */ - for (i = 0; i < RX_RING_SIZE; i++) { - np->rx_ring[i].status = 0; - skb = np->rx_skbuff[i]; - if (skb) { - dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->rx_ring[i].frag.addr), - np->rx_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb(skb); - np->rx_skbuff[i] = NULL; - } - np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */ - } - for (i = 0; i < TX_RING_SIZE; i++) { - np->tx_ring[i].next_desc = 0; - skb = np->tx_skbuff[i]; - if (skb) { - dma_unmap_single(&np->pci_dev->dev, - le32_to_cpu(np->tx_ring[i].frag.addr), - skb->len, DMA_TO_DEVICE); - dev_kfree_skb(skb); - np->tx_skbuff[i] = NULL; - } - } - - return 0; -} - -static void sundance_remove1(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (dev) { - struct netdev_private *np = netdev_priv(dev); - unregister_netdev(dev); - dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, - np->rx_ring, np->rx_ring_dma); - dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, - np->tx_ring, np->tx_ring_dma); - pci_iounmap(pdev, np->base); - pci_release_regions(pdev); - free_netdev(dev); - } -} - -static int __maybe_unused sundance_suspend(struct device *dev_d) -{ - struct net_device *dev = dev_get_drvdata(dev_d); - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->base; - - if (!netif_running(dev)) - return 0; - - netdev_close(dev); - netif_device_detach(dev); - - if (np->wol_enabled) { - iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); - iowrite16(RxEnable, ioaddr + MACCtrl1); - } - - device_set_wakeup_enable(dev_d, np->wol_enabled); - - return 0; -} - -static int __maybe_unused sundance_resume(struct device *dev_d) -{ - struct net_device *dev = dev_get_drvdata(dev_d); - int err = 0; - - if (!netif_running(dev)) - return 0; - - err = netdev_open(dev); - if (err) { - printk(KERN_ERR "%s: Can't resume interface!\n", - dev->name); - goto out; - } - - netif_device_attach(dev); - -out: - return err; -} - -static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume); - -static struct pci_driver sundance_driver = { - .name = DRV_NAME, - .id_table = sundance_pci_tbl, - .probe = sundance_probe1, - .remove = sundance_remove1, - .driver.pm = &sundance_pm_ops, -}; - -module_pci_driver(sundance_driver); diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 2a18df3605f1..0de3cd660ec8 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -863,7 +863,7 @@ static void dnet_remove(struct platform_device *pdev) static struct platform_driver dnet_driver = { .probe = dnet_probe, - .remove_new = dnet_remove, + .remove = dnet_remove, .driver = { .name = "dnet", }, diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 44da335d66bd..95a5295d0361 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -2689,7 +2689,7 @@ static struct platform_driver tsnep_driver = { .of_match_table = tsnep_of_match, }, .probe = tsnep_probe, - .remove_new = tsnep_remove, + .remove = tsnep_remove, }; module_platform_driver(tsnep_driver); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ad41c9019018..0c418557264c 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1296,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match); static struct platform_driver ethoc_driver = { .probe = ethoc_probe, - .remove_new = ethoc_remove, + .remove = ethoc_remove, .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 9ebe751c1df0..5cb478e98697 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -651,7 +651,7 @@ MODULE_DEVICE_TABLE(of, nps_enet_dt_ids); static struct platform_driver nps_enet_driver = { .probe = nps_enet_probe, - .remove_new = nps_enet_remove, + .remove = nps_enet_remove, .driver = { .name = DRV_NAME, .of_match_table = nps_enet_dt_ids, diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 0b61f548fd18..10c1a2f11000 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1913,7 +1913,7 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_phy_connect; } err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link, - PHY_INTERFACE_MODE_MII); + PHY_INTERFACE_MODE_RMII); if (err) { dev_err(&pdev->dev, "Connecting PHY failed\n"); goto err_phy_connect; @@ -2089,7 +2089,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match); static struct platform_driver ftgmac100_driver = { .probe = ftgmac100_probe, - .remove_new = ftgmac100_remove, + .remove = ftgmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftgmac100_of_match, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 1047c805054e..5803a382f0ba 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1243,7 +1243,7 @@ static const struct of_device_id ftmac100_of_ids[] = { static struct platform_driver ftmac100_driver = { .probe = ftmac100_probe, - .remove_new = ftmac100_remove, + .remove = ftmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftmac100_of_ids diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index e15dd3d858df..ac06b01fe934 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -463,6 +463,22 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) return 0; } +static int dpaa_addr_sync(struct net_device *net_dev, const u8 *addr) +{ + const struct dpaa_priv *priv = netdev_priv(net_dev); + + return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)addr); +} + +static int dpaa_addr_unsync(struct net_device *net_dev, const u8 *addr) +{ + const struct dpaa_priv *priv = netdev_priv(net_dev); + + return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)addr); +} + static void dpaa_set_rx_mode(struct net_device *net_dev) { const struct dpaa_priv *priv; @@ -490,9 +506,9 @@ static void dpaa_set_rx_mode(struct net_device *net_dev) err); } - err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); + err = __dev_mc_sync(net_dev, dpaa_addr_sync, dpaa_addr_unsync); if (err < 0) - netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", + netif_err(priv, drv, net_dev, "dpaa_addr_sync() = %d\n", err); } @@ -3571,7 +3587,7 @@ static struct platform_driver dpaa_driver = { }, .id_table = dpaa_devtype, .probe = dpaa_eth_probe, - .remove_new = dpaa_remove + .remove = dpaa_remove }; static int __init dpaa_load(void) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9d9fcec41488..1b55047c0237 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -4766,7 +4766,7 @@ static struct platform_driver fec_driver = { }, .id_table = fec_devtype, .probe = fec_probe, - .remove_new = fec_drv_remove, + .remove = fec_drv_remove, }; module_platform_driver(fec_driver); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index ebae71ec26c6..2bfaf14f65c8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -1040,7 +1040,7 @@ static struct platform_driver mpc52xx_fec_driver = { .of_match_table = mpc52xx_fec_match, }, .probe = mpc52xx_fec_probe, - .remove_new = mpc52xx_fec_remove, + .remove = mpc52xx_fec_remove, #ifdef CONFIG_PM .suspend = mpc52xx_fec_of_suspend, .resume = mpc52xx_fec_of_resume, diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 39689826cc8f..3d073f0fae63 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -94,7 +94,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) goto out_free; } - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); bus->priv = priv; bus->parent = dev; @@ -144,7 +144,7 @@ struct platform_driver mpc52xx_fec_mdio_driver = { .of_match_table = mpc52xx_fec_mdio_match, }, .probe = mpc52xx_fec_mdio_probe, - .remove_new = mpc52xx_fec_mdio_remove, + .remove = mpc52xx_fec_mdio_remove, }; /* let fec driver call it, since this has to be registered before it */ diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index a4eb6edb850a..7f6b57432071 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -84,8 +84,7 @@ #define FEC_CC_MULT (1 << 31) #define FEC_COUNTER_PERIOD (1 << 31) #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC -#define FEC_CHANNLE_0 0 -#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 +#define DEFAULT_PPS_CHANNEL 0 #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL @@ -525,7 +524,6 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, int ret = 0; if (rq->type == PTP_CLK_REQ_PPS) { - fep->pps_channel = DEFAULT_PPS_CHANNEL; fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; ret = fec_ptp_enable_pps(fep, on); @@ -536,10 +534,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, if (rq->perout.flags) return -EOPNOTSUPP; - if (rq->perout.index != DEFAULT_PPS_CHANNEL) + if (rq->perout.index != fep->pps_channel) return -EOPNOTSUPP; - fep->pps_channel = DEFAULT_PPS_CHANNEL; period.tv_sec = rq->perout.period.sec; period.tv_nsec = rq->perout.period.nsec; period_ns = timespec64_to_ns(&period); @@ -707,12 +704,16 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + struct device_node *np = fep->pdev->dev.of_node; int irq; int ret; fep->ptp_caps.owner = THIS_MODULE; strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); + fep->pps_channel = DEFAULT_PPS_CHANNEL; + of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel); + fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; fep->ptp_caps.n_ext_ts = 0; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 3088da7adf0f..85617bb94959 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1415,7 +1415,6 @@ int dtsec_initialization(struct mac_device *mac_dev, mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; mac_dev->set_tstamp = dtsec_set_tstamp; - mac_dev->set_multi = fman_set_multi; mac_dev->enable = dtsec_enable; mac_dev->disable = dtsec_disable; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 796e6f4e583d..3925441143fa 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1087,7 +1087,6 @@ int memac_initialization(struct mac_device *mac_dev, mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; mac_dev->set_tstamp = memac_set_tstamp; - mac_dev->set_multi = fman_set_multi; mac_dev->enable = memac_enable; mac_dev->disable = memac_disable; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index f17a4e511510..e977389f7088 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -987,7 +987,7 @@ static int init_low_level_driver(struct fman_port *port) return -ENODEV; } - /* The code bellow is a trick so the FM will not release the buffer + /* The code below is a trick so the FM will not release the buffer * to BM nor will try to enqueue the frame to QM */ if (port->port_type == FMAN_PORT_TYPE_TX) { diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index c2261d26db5b..fecfca6eba03 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -771,7 +771,6 @@ int tgec_initialization(struct mac_device *mac_dev, mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; mac_dev->set_tstamp = tgec_set_tstamp; - mac_dev->set_multi = fman_set_multi; mac_dev->enable = tgec_enable; mac_dev->disable = tgec_disable; diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 11da139082e1..c7bff9490ce0 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -32,8 +32,6 @@ MODULE_DESCRIPTION("FSL FMan MAC API based driver"); struct mac_priv_s { u8 cell_index; struct fman *fman; - /* List of multicast addresses */ - struct list_head mc_addr_list; struct platform_device *eth_dev; u16 speed; }; @@ -57,44 +55,6 @@ static void mac_exception(struct mac_device *mac_dev, __func__, ex); } -int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev) -{ - struct mac_priv_s *priv; - struct mac_address *old_addr, *tmp; - struct netdev_hw_addr *ha; - int err; - enet_addr_t *addr; - - priv = mac_dev->priv; - - /* Clear previous address list */ - list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) { - addr = (enet_addr_t *)old_addr->addr; - err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr); - if (err < 0) - return err; - - list_del(&old_addr->list); - kfree(old_addr); - } - - /* Add all the addresses from the new list */ - netdev_for_each_mc_addr(ha, net_dev) { - addr = (enet_addr_t *)ha->addr; - err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr); - if (err < 0) - return err; - - tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC); - if (!tmp) - return -ENOMEM; - - ether_addr_copy(tmp->addr, ha->addr); - list_add(&tmp->list, &priv->mc_addr_list); - } - return 0; -} - static DEFINE_MUTEX(eth_lock); static struct platform_device *dpaa_eth_add_device(int fman_id, @@ -181,8 +141,6 @@ static int mac_probe(struct platform_device *_of_dev) mac_dev->priv = priv; mac_dev->dev = dev; - INIT_LIST_HEAD(&priv->mc_addr_list); - /* Get the FM node */ dev_node = of_get_parent(mac_node); if (!dev_node) { @@ -379,7 +337,7 @@ static struct platform_driver mac_driver = { .of_match_table = mac_match, }, .probe = mac_probe, - .remove_new = mac_remove, + .remove = mac_remove, }; builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 8b5b43d50f8e..955ace338965 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -40,8 +40,6 @@ struct mac_device { int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); - int (*set_multi)(struct net_device *net_dev, - struct mac_device *mac_dev); int (*set_exception)(struct fman_mac *mac_dev, enum fman_mac_exceptions exception, bool enable); int (*add_hash_mac_addr)(struct fman_mac *mac_dev, diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 3425c4a6abcb..f563692a4a00 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1052,7 +1052,7 @@ static struct platform_driver fs_enet_driver = { .of_match_table = fs_enet_match, }, .probe = fs_enet_probe, - .remove_new = fs_enet_remove, + .remove = fs_enet_remove, }; #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 2e210a003558..66038e2a4ae3 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -123,7 +123,7 @@ static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np) * we get is an int, and the odds of multiple bitbang mdio buses * is low enough that it's not worth going too crazy. */ - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); data = of_get_property(np, "fsl,mdio-pin", &len); if (!data || len != 4) @@ -214,7 +214,7 @@ static struct platform_driver fs_enet_bb_mdio_driver = { .of_match_table = fs_enet_mdio_bb_match, }, .probe = fs_enet_mdio_probe, - .remove_new = fs_enet_mdio_remove, + .remove = fs_enet_mdio_remove, }; module_platform_driver(fs_enet_bb_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 93d91e8ad0de..dec31b638941 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -212,7 +212,7 @@ static struct platform_driver fs_enet_fec_mdio_driver = { .of_match_table = fs_enet_mdio_fec_match, }, .probe = fs_enet_mdio_probe, - .remove_new = fs_enet_mdio_remove, + .remove = fs_enet_mdio_remove, }; module_platform_driver(fs_enet_fec_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 026f7270a54d..56d2f79fb7e3 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -526,7 +526,7 @@ static struct platform_driver fsl_pq_mdio_driver = { .of_match_table = fsl_pq_mdio_match, }, .probe = fsl_pq_mdio_probe, - .remove_new = fsl_pq_mdio_remove, + .remove = fsl_pq_mdio_remove, }; module_platform_driver(fsl_pq_mdio_driver); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index ecb1703ea150..435138f4699d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2207,8 +2207,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(do_tstamp)) { struct skb_shared_hwtstamps shhwtstamps; - u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & - ~0x7UL); + __be64 *ns; + + ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); @@ -2471,7 +2472,7 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) /* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - u64 *ns = (u64 *) skb->data; + __be64 *ns = (__be64 *)skb->data; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); @@ -3642,7 +3643,7 @@ static struct platform_driver gfar_driver = { .of_match_table = gfar_match, }, .probe = gfar_probe, - .remove_new = gfar_remove, + .remove = gfar_remove, }; module_platform_driver(gfar_driver); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index ab421243a419..d3ddca22d6b0 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3786,7 +3786,7 @@ static struct platform_driver ucc_geth_driver = { .of_match_table = ucc_geth_match, }, .probe = ucc_geth_probe, - .remove_new = ucc_geth_remove, + .remove = ucc_geth_remove, .suspend = ucc_geth_suspend, .resume = ucc_geth_resume, }; diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig index 8641a00f8e63..564862a57124 100644 --- a/drivers/net/ethernet/google/Kconfig +++ b/drivers/net/ethernet/google/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE config GVE tristate "Google Virtual NIC (gVNIC) support" depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN)) + select PAGE_POOL help This driver supports Google Virtual NIC (gVNIC)" diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile index 9ed07080b38a..4520f1c07a63 100644 --- a/drivers/net/ethernet/google/gve/Makefile +++ b/drivers/net/ethernet/google/gve/Makefile @@ -1,4 +1,5 @@ # Makefile for the Google virtual Ethernet (gve) driver obj-$(CONFIG_GVE) += gve.o -gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o +gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \ + gve_buffer_mgmt_dqo.o diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 301fa1ea4f51..dd92949bb214 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -13,6 +13,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/u64_stats_sync.h> +#include <net/page_pool/helpers.h> #include <net/xdp.h> #include "gve_desc.h" @@ -60,6 +61,8 @@ #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 +#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4 + #define GVE_FLOW_RULES_CACHE_SIZE \ (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule)) #define GVE_FLOW_RULE_IDS_CACHE_SIZE \ @@ -102,6 +105,7 @@ struct gve_rx_slot_page_info { struct page *page; void *page_address; u32 page_offset; /* offset to write to in page */ + unsigned int buf_size; int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ u16 pad; /* adjustment for rx padding */ u8 can_flip; /* tracks if the networking stack is using the page */ @@ -273,6 +277,8 @@ struct gve_rx_ring { /* Address info of the buffers for header-split */ struct gve_header_buf hdr_bufs; + + struct page_pool *page_pool; } dqo; }; @@ -1162,6 +1168,36 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); bool gve_header_split_supported(const struct gve_priv *priv); int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); +/* rx buffer handling */ +int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs); +void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs, + bool free_page); +struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx); +bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_buf_state(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx, + struct gve_index_list *list); +void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list, + struct gve_rx_buf_state_dqo *buf_state); +struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx); +void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_to_page_pool(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state, + bool allow_direct); +int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state); +void gve_reuse_buffer(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_buffer(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc); +struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, + struct gve_rx_ring *rx); + /* Reset */ void gve_schedule_reset(struct gve_priv *priv); int gve_reset(struct gve_priv *priv, bool attempt_teardown); diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c new file mode 100644 index 000000000000..05bf1f80a79c --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2024 Google, Inc. + */ + +#include "gve.h" +#include "gve_utils.h" + +int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs) +{ + return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias; +} + +struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx) +{ + struct gve_rx_buf_state_dqo *buf_state; + s16 buffer_id; + + buffer_id = rx->dqo.free_buf_states; + if (unlikely(buffer_id == -1)) + return NULL; + + buf_state = &rx->dqo.buf_states[buffer_id]; + + /* Remove buf_state from free list */ + rx->dqo.free_buf_states = buf_state->next; + + /* Point buf_state to itself to mark it as allocated */ + buf_state->next = buffer_id; + + return buf_state; +} + +bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + s16 buffer_id = buf_state - rx->dqo.buf_states; + + return buf_state->next == buffer_id; +} + +void gve_free_buf_state(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + s16 buffer_id = buf_state - rx->dqo.buf_states; + + buf_state->next = rx->dqo.free_buf_states; + rx->dqo.free_buf_states = buffer_id; +} + +struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx, + struct gve_index_list *list) +{ + struct gve_rx_buf_state_dqo *buf_state; + s16 buffer_id; + + buffer_id = list->head; + if (unlikely(buffer_id == -1)) + return NULL; + + buf_state = &rx->dqo.buf_states[buffer_id]; + + /* Remove buf_state from list */ + list->head = buf_state->next; + if (buf_state->next == -1) + list->tail = -1; + + /* Point buf_state to itself to mark it as allocated */ + buf_state->next = buffer_id; + + return buf_state; +} + +void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list, + struct gve_rx_buf_state_dqo *buf_state) +{ + s16 buffer_id = buf_state - rx->dqo.buf_states; + + buf_state->next = -1; + + if (list->head == -1) { + list->head = buffer_id; + list->tail = buffer_id; + } else { + int tail = list->tail; + + rx->dqo.buf_states[tail].next = buffer_id; + list->tail = buffer_id; + } +} + +struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx) +{ + struct gve_rx_buf_state_dqo *buf_state; + int i; + + /* Recycled buf states are immediately usable. */ + buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); + if (likely(buf_state)) + return buf_state; + + if (unlikely(rx->dqo.used_buf_states.head == -1)) + return NULL; + + /* Used buf states are only usable when ref count reaches 0, which means + * no SKBs refer to them. + * + * Search a limited number before giving up. + */ + for (i = 0; i < 5; i++) { + buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); + if (gve_buf_ref_cnt(buf_state) == 0) { + rx->dqo.used_buf_states_cnt--; + return buf_state; + } + + gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); + } + + return NULL; +} + +int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + struct gve_priv *priv = rx->gve; + u32 idx; + + idx = rx->dqo.next_qpl_page_idx; + if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) { + net_err_ratelimited("%s: Out of QPL pages\n", + priv->dev->name); + return -ENOMEM; + } + buf_state->page_info.page = rx->dqo.qpl->pages[idx]; + buf_state->addr = rx->dqo.qpl->page_buses[idx]; + rx->dqo.next_qpl_page_idx++; + buf_state->page_info.page_offset = 0; + buf_state->page_info.page_address = + page_address(buf_state->page_info.page); + buf_state->page_info.buf_size = priv->data_buffer_size_dqo; + buf_state->last_single_ref_offset = 0; + + /* The page already has 1 ref. */ + page_ref_add(buf_state->page_info.page, INT_MAX - 1); + buf_state->page_info.pagecnt_bias = INT_MAX; + + return 0; +} + +void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state) +{ + if (!buf_state->page_info.page) + return; + + page_ref_sub(buf_state->page_info.page, + buf_state->page_info.pagecnt_bias - 1); + buf_state->page_info.page = NULL; +} + +void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + const u16 data_buffer_size = priv->data_buffer_size_dqo; + int pagecount; + + /* Can't reuse if we only fit one buffer per page */ + if (data_buffer_size * 2 > PAGE_SIZE) + goto mark_used; + + pagecount = gve_buf_ref_cnt(buf_state); + + /* Record the offset when we have a single remaining reference. + * + * When this happens, we know all of the other offsets of the page are + * usable. + */ + if (pagecount == 1) { + buf_state->last_single_ref_offset = + buf_state->page_info.page_offset; + } + + /* Use the next buffer sized chunk in the page. */ + buf_state->page_info.page_offset += data_buffer_size; + buf_state->page_info.page_offset &= (PAGE_SIZE - 1); + + /* If we wrap around to the same offset without ever dropping to 1 + * reference, then we don't know if this offset was ever freed. + */ + if (buf_state->page_info.page_offset == + buf_state->last_single_ref_offset) { + goto mark_used; + } + + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + return; + +mark_used: + gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); + rx->dqo.used_buf_states_cnt++; +} + +void gve_free_to_page_pool(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state, + bool allow_direct) +{ + struct page *page = buf_state->page_info.page; + + if (!page) + return; + + page_pool_put_page(page->pp, page, buf_state->page_info.buf_size, + allow_direct); + buf_state->page_info.page = NULL; +} + +static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + struct gve_priv *priv = rx->gve; + struct page *page; + + buf_state->page_info.buf_size = priv->data_buffer_size_dqo; + page = page_pool_alloc(rx->dqo.page_pool, + &buf_state->page_info.page_offset, + &buf_state->page_info.buf_size, GFP_ATOMIC); + + if (!page) + return -ENOMEM; + + buf_state->page_info.page = page; + buf_state->page_info.page_address = page_address(page); + buf_state->addr = page_pool_get_dma_addr(page); + + return 0; +} + +struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, + struct gve_rx_ring *rx) +{ + u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num); + struct page_pool_params pp = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt, + .dev = &priv->pdev->dev, + .netdev = priv->dev, + .napi = &priv->ntfy_blocks[ntfy_id].napi, + .max_len = PAGE_SIZE, + .dma_dir = DMA_FROM_DEVICE, + }; + + return page_pool_create(&pp); +} + +void gve_free_buffer(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + if (rx->dqo.page_pool) { + gve_free_to_page_pool(rx, buf_state, true); + gve_free_buf_state(rx, buf_state); + } else { + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, + buf_state); + } +} + +void gve_reuse_buffer(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + if (rx->dqo.page_pool) { + buf_state->page_info.page = NULL; + gve_free_buf_state(rx, buf_state); + } else { + gve_dec_pagecnt_bias(&buf_state->page_info); + gve_try_recycle_buf(rx->gve, rx, buf_state); + } +} + +int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) +{ + struct gve_rx_buf_state_dqo *buf_state; + + if (rx->dqo.page_pool) { + buf_state = gve_alloc_buf_state(rx); + if (WARN_ON_ONCE(!buf_state)) + return -ENOMEM; + + if (gve_alloc_from_page_pool(rx, buf_state)) + goto free_buf_state; + } else { + buf_state = gve_get_recycled_buf_state(rx); + if (unlikely(!buf_state)) { + buf_state = gve_alloc_buf_state(rx); + if (unlikely(!buf_state)) + return -ENOMEM; + + if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state))) + goto free_buf_state; + } + } + desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); + desc->buf_addr = cpu_to_le64(buf_state->addr + + buf_state->page_info.page_offset); + + return 0; + +free_buf_state: + gve_free_buf_state(rx, buf_state); + return -ENOMEM; +} diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 661566db68c8..e171ca248f9a 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1875,6 +1875,11 @@ static void gve_turndown(struct gve_priv *priv) if (!gve_tx_was_added_to_block(priv, idx)) continue; + + if (idx < priv->tx_cfg.num_queues) + netif_queue_set_napi(priv->dev, idx, + NETDEV_QUEUE_TYPE_TX, NULL); + napi_disable(&block->napi); } for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { @@ -1883,6 +1888,9 @@ static void gve_turndown(struct gve_priv *priv) if (!gve_rx_was_added_to_block(priv, idx)) continue; + + netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX, + NULL); napi_disable(&block->napi); } @@ -1909,6 +1917,12 @@ static void gve_turnup(struct gve_priv *priv) continue; napi_enable(&block->napi); + + if (idx < priv->tx_cfg.num_queues) + netif_queue_set_napi(priv->dev, idx, + NETDEV_QUEUE_TYPE_TX, + &block->napi); + if (gve_is_gqi(priv)) { iowrite32be(0, gve_irq_doorbell(priv, block)); } else { @@ -1931,6 +1945,9 @@ static void gve_turnup(struct gve_priv *priv) continue; napi_enable(&block->napi); + netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX, + &block->napi); + if (gve_is_gqi(priv)) { iowrite32be(0, gve_irq_doorbell(priv, block)); } else { @@ -2544,6 +2561,54 @@ static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = { .ndo_queue_stop = gve_rx_queue_stop, }; +static void gve_get_rx_queue_stats(struct net_device *dev, int idx, + struct netdev_queue_stats_rx *rx_stats) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_rx_ring *rx = &priv->rx[idx]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&rx->statss); + rx_stats->packets = rx->rpackets; + rx_stats->bytes = rx->rbytes; + rx_stats->alloc_fail = rx->rx_skb_alloc_fail + + rx->rx_buf_alloc_fail; + } while (u64_stats_fetch_retry(&rx->statss, start)); +} + +static void gve_get_tx_queue_stats(struct net_device *dev, int idx, + struct netdev_queue_stats_tx *tx_stats) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_tx_ring *tx = &priv->tx[idx]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&tx->statss); + tx_stats->packets = tx->pkt_done; + tx_stats->bytes = tx->bytes_done; + } while (u64_stats_fetch_retry(&tx->statss, start)); +} + +static void gve_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + + tx->packets = 0; + tx->bytes = 0; +} + +static const struct netdev_stat_ops gve_stat_ops = { + .get_queue_stats_rx = gve_get_rx_queue_stats, + .get_queue_stats_tx = gve_get_tx_queue_stats, + .get_base_stats = gve_get_base_stats, +}; + static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int max_tx_queues, max_rx_queues; @@ -2599,6 +2664,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &gve_ethtool_ops; dev->netdev_ops = &gve_netdev_ops; dev->queue_mgmt_ops = &gve_queue_mgmt_ops; + dev->stat_ops = &gve_stat_ops; /* Set default and supported features. * diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 1154c1d8f66f..8ac0047f1ada 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -16,189 +16,6 @@ #include <net/ipv6.h> #include <net/tcp.h> -static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs) -{ - return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias; -} - -static void gve_free_page_dqo(struct gve_priv *priv, - struct gve_rx_buf_state_dqo *bs, - bool free_page) -{ - page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1); - if (free_page) - gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr, - DMA_FROM_DEVICE); - bs->page_info.page = NULL; -} - -static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx) -{ - struct gve_rx_buf_state_dqo *buf_state; - s16 buffer_id; - - buffer_id = rx->dqo.free_buf_states; - if (unlikely(buffer_id == -1)) - return NULL; - - buf_state = &rx->dqo.buf_states[buffer_id]; - - /* Remove buf_state from free list */ - rx->dqo.free_buf_states = buf_state->next; - - /* Point buf_state to itself to mark it as allocated */ - buf_state->next = buffer_id; - - return buf_state; -} - -static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, - struct gve_rx_buf_state_dqo *buf_state) -{ - s16 buffer_id = buf_state - rx->dqo.buf_states; - - return buf_state->next == buffer_id; -} - -static void gve_free_buf_state(struct gve_rx_ring *rx, - struct gve_rx_buf_state_dqo *buf_state) -{ - s16 buffer_id = buf_state - rx->dqo.buf_states; - - buf_state->next = rx->dqo.free_buf_states; - rx->dqo.free_buf_states = buffer_id; -} - -static struct gve_rx_buf_state_dqo * -gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list) -{ - struct gve_rx_buf_state_dqo *buf_state; - s16 buffer_id; - - buffer_id = list->head; - if (unlikely(buffer_id == -1)) - return NULL; - - buf_state = &rx->dqo.buf_states[buffer_id]; - - /* Remove buf_state from list */ - list->head = buf_state->next; - if (buf_state->next == -1) - list->tail = -1; - - /* Point buf_state to itself to mark it as allocated */ - buf_state->next = buffer_id; - - return buf_state; -} - -static void gve_enqueue_buf_state(struct gve_rx_ring *rx, - struct gve_index_list *list, - struct gve_rx_buf_state_dqo *buf_state) -{ - s16 buffer_id = buf_state - rx->dqo.buf_states; - - buf_state->next = -1; - - if (list->head == -1) { - list->head = buffer_id; - list->tail = buffer_id; - } else { - int tail = list->tail; - - rx->dqo.buf_states[tail].next = buffer_id; - list->tail = buffer_id; - } -} - -static struct gve_rx_buf_state_dqo * -gve_get_recycled_buf_state(struct gve_rx_ring *rx) -{ - struct gve_rx_buf_state_dqo *buf_state; - int i; - - /* Recycled buf states are immediately usable. */ - buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); - if (likely(buf_state)) - return buf_state; - - if (unlikely(rx->dqo.used_buf_states.head == -1)) - return NULL; - - /* Used buf states are only usable when ref count reaches 0, which means - * no SKBs refer to them. - * - * Search a limited number before giving up. - */ - for (i = 0; i < 5; i++) { - buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); - if (gve_buf_ref_cnt(buf_state) == 0) { - rx->dqo.used_buf_states_cnt--; - return buf_state; - } - - gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); - } - - /* For QPL, we cannot allocate any new buffers and must - * wait for the existing ones to be available. - */ - if (rx->dqo.qpl) - return NULL; - - /* If there are no free buf states discard an entry from - * `used_buf_states` so it can be used. - */ - if (unlikely(rx->dqo.free_buf_states == -1)) { - buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); - if (gve_buf_ref_cnt(buf_state) == 0) - return buf_state; - - gve_free_page_dqo(rx->gve, buf_state, true); - gve_free_buf_state(rx, buf_state); - } - - return NULL; -} - -static int gve_alloc_page_dqo(struct gve_rx_ring *rx, - struct gve_rx_buf_state_dqo *buf_state) -{ - struct gve_priv *priv = rx->gve; - u32 idx; - - if (!rx->dqo.qpl) { - int err; - - err = gve_alloc_page(priv, &priv->pdev->dev, - &buf_state->page_info.page, - &buf_state->addr, - DMA_FROM_DEVICE, GFP_ATOMIC); - if (err) - return err; - } else { - idx = rx->dqo.next_qpl_page_idx; - if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) { - net_err_ratelimited("%s: Out of QPL pages\n", - priv->dev->name); - return -ENOMEM; - } - buf_state->page_info.page = rx->dqo.qpl->pages[idx]; - buf_state->addr = rx->dqo.qpl->page_buses[idx]; - rx->dqo.next_qpl_page_idx++; - } - buf_state->page_info.page_offset = 0; - buf_state->page_info.page_address = - page_address(buf_state->page_info.page); - buf_state->last_single_ref_offset = 0; - - /* The page already has 1 ref. */ - page_ref_add(buf_state->page_info.page, INT_MAX - 1); - buf_state->page_info.pagecnt_bias = INT_MAX; - - return 0; -} - static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) { struct device *hdev = &priv->pdev->dev; @@ -278,8 +95,10 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx) for (i = 0; i < rx->dqo.num_buf_states; i++) { struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; - if (bs->page_info.page) - gve_free_page_dqo(priv, bs, !rx->dqo.qpl); + if (rx->dqo.page_pool) + gve_free_to_page_pool(rx, bs, false); + else + gve_free_qpl_page_dqo(bs); } } @@ -321,9 +140,11 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, for (i = 0; i < rx->dqo.num_buf_states; i++) { struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; - /* Only free page for RDA. QPL pages are freed in gve_main. */ - if (bs->page_info.page) - gve_free_page_dqo(priv, bs, !rx->dqo.qpl); + + if (rx->dqo.page_pool) + gve_free_to_page_pool(rx, bs, false); + else + gve_free_qpl_page_dqo(bs); } if (rx->dqo.qpl) { @@ -350,6 +171,11 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, kvfree(rx->dqo.buf_states); rx->dqo.buf_states = NULL; + if (rx->dqo.page_pool) { + page_pool_destroy(rx->dqo.page_pool); + rx->dqo.page_pool = NULL; + } + gve_rx_free_hdr_bufs(priv, rx); netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); @@ -382,6 +208,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) { struct device *hdev = &priv->pdev->dev; + struct page_pool *pool; int qpl_page_cnt; size_t size; u32 qpl_id; @@ -395,8 +222,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, rx->gve = priv; rx->q_num = idx; - rx->dqo.num_buf_states = cfg->raw_addressing ? - min_t(s16, S16_MAX, buffer_queue_slots * 4) : + rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, sizeof(rx->dqo.buf_states[0]), @@ -424,7 +250,13 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, if (!rx->dqo.bufq.desc_ring) goto err; - if (!cfg->raw_addressing) { + if (cfg->raw_addressing) { + pool = gve_rx_create_page_pool(priv, rx); + if (IS_ERR(pool)) + goto err; + + rx->dqo.page_pool = pool; + } else { qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); @@ -521,26 +353,14 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots); while (num_posted < num_avail_slots) { struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail]; - struct gve_rx_buf_state_dqo *buf_state; - - buf_state = gve_get_recycled_buf_state(rx); - if (unlikely(!buf_state)) { - buf_state = gve_alloc_buf_state(rx); - if (unlikely(!buf_state)) - break; - - if (unlikely(gve_alloc_page_dqo(rx, buf_state))) { - u64_stats_update_begin(&rx->statss); - rx->rx_buf_alloc_fail++; - u64_stats_update_end(&rx->statss); - gve_free_buf_state(rx, buf_state); - break; - } + + if (unlikely(gve_alloc_buffer(rx, desc))) { + u64_stats_update_begin(&rx->statss); + rx->rx_buf_alloc_fail++; + u64_stats_update_end(&rx->statss); + break; } - desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); - desc->buf_addr = cpu_to_le64(buf_state->addr + - buf_state->page_info.page_offset); if (rx->dqo.hdr_bufs.data) desc->header_buf_addr = cpu_to_le64(rx->dqo.hdr_bufs.addr + @@ -557,48 +377,6 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) rx->fill_cnt += num_posted; } -static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, - struct gve_rx_buf_state_dqo *buf_state) -{ - const u16 data_buffer_size = priv->data_buffer_size_dqo; - int pagecount; - - /* Can't reuse if we only fit one buffer per page */ - if (data_buffer_size * 2 > PAGE_SIZE) - goto mark_used; - - pagecount = gve_buf_ref_cnt(buf_state); - - /* Record the offset when we have a single remaining reference. - * - * When this happens, we know all of the other offsets of the page are - * usable. - */ - if (pagecount == 1) { - buf_state->last_single_ref_offset = - buf_state->page_info.page_offset; - } - - /* Use the next buffer sized chunk in the page. */ - buf_state->page_info.page_offset += data_buffer_size; - buf_state->page_info.page_offset &= (PAGE_SIZE - 1); - - /* If we wrap around to the same offset without ever dropping to 1 - * reference, then we don't know if this offset was ever freed. - */ - if (buf_state->page_info.page_offset == - buf_state->last_single_ref_offset) { - goto mark_used; - } - - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); - return; - -mark_used: - gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); - rx->dqo.used_buf_states_cnt++; -} - static void gve_rx_skb_csum(struct sk_buff *skb, const struct gve_rx_compl_desc_dqo *desc, struct gve_ptype ptype) @@ -713,6 +491,9 @@ static int gve_rx_append_frags(struct napi_struct *napi, if (!skb) return -1; + if (rx->dqo.page_pool) + skb_mark_for_recycle(skb); + if (rx->ctx.skb_tail == rx->ctx.skb_head) skb_shinfo(rx->ctx.skb_head)->frag_list = skb; else @@ -723,7 +504,7 @@ static int gve_rx_append_frags(struct napi_struct *napi, if (rx->ctx.skb_tail != rx->ctx.skb_head) { rx->ctx.skb_head->len += buf_len; rx->ctx.skb_head->data_len += buf_len; - rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; + rx->ctx.skb_head->truesize += buf_state->page_info.buf_size; } /* Trigger ondemand page allocation if we are running low on buffers */ @@ -733,13 +514,8 @@ static int gve_rx_append_frags(struct napi_struct *napi, skb_add_rx_frag(rx->ctx.skb_tail, num_frags, buf_state->page_info.page, buf_state->page_info.page_offset, - buf_len, priv->data_buffer_size_dqo); - gve_dec_pagecnt_bias(&buf_state->page_info); - - /* Advances buffer page-offset if page is partially used. - * Marks buffer as used if page is full. - */ - gve_try_recycle_buf(priv, rx, buf_state); + buf_len, buf_state->page_info.buf_size); + gve_reuse_buffer(rx, buf_state); return 0; } @@ -773,8 +549,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, } if (unlikely(compl_desc->rx_error)) { - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, - buf_state); + gve_free_buffer(rx, buf_state); return -EINVAL; } @@ -798,6 +573,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, if (unlikely(!rx->ctx.skb_head)) goto error; rx->ctx.skb_tail = rx->ctx.skb_head; + + if (rx->dqo.page_pool) + skb_mark_for_recycle(rx->ctx.skb_head); } else { unsplit = 1; } @@ -834,8 +612,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, rx->rx_copybreak_pkt++; u64_stats_update_end(&rx->statss); - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, - buf_state); + gve_free_buffer(rx, buf_state); return 0; } @@ -850,16 +627,17 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; } + if (rx->dqo.page_pool) + skb_mark_for_recycle(rx->ctx.skb_head); + skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, buf_state->page_info.page_offset, buf_len, - priv->data_buffer_size_dqo); - gve_dec_pagecnt_bias(&buf_state->page_info); - - gve_try_recycle_buf(priv, rx, buf_state); + buf_state->page_info.buf_size); + gve_reuse_buffer(rx, buf_state); return 0; error: - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + gve_free_buffer(rx, buf_state); return -ENOMEM; } diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 2349750075a5..30fef100257e 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -111,6 +111,7 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx, struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; netif_napi_add(priv->dev, &block->napi, gve_poll); + netif_napi_set_irq(&block->napi, block->irq); } void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 3312e1d93c3b..65302c41bfb1 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -7,7 +7,6 @@ config NET_VENDOR_HISILICON bool "Hisilicon devices" default y depends on OF || ACPI - depends on ARM || ARM64 || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -18,6 +17,8 @@ config NET_VENDOR_HISILICON if NET_VENDOR_HISILICON +if ARM || ARM64 || COMPILE_TEST + config HIX5HD2_GMAC tristate "Hisilicon HIX5HD2 Family Network Device Support" select PHYLIB @@ -141,4 +142,19 @@ config HNS3_ENET endif #HNS3 +endif # ARM || ARM64 || COMPILE_TEST + +config HIBMCGE + tristate "Hisilicon BMC Gigabit Ethernet Device Support" + depends on PCI && PCI_MSI + select PHYLIB + select MOTORCOMM_PHY + select REALTEK_PHY + help + If you wish to compile a kernel for a BMC with HIBMC-xx_gmac + then you should answer Y to this. This makes this driver suitable for use + on certain boards such as the HIBMC-210. + + If you are unsure, say N. + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile index 7f76d412047a..0e2cadfea8ff 100644 --- a/drivers/net/ethernet/hisilicon/Makefile +++ b/drivers/net/ethernet/hisilicon/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_HNS_MDIO) += hns_mdio.o obj-$(CONFIG_HNS) += hns/ obj-$(CONFIG_HNS3) += hns3/ obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o +obj-$(CONFIG_HIBMCGE) += hibmcge/ diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile new file mode 100644 index 000000000000..ae58ac38c206 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Makefile for the HISILICON BMC GE network device drivers. +# + +obj-$(CONFIG_HIBMCGE) += hibmcge.o + +hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h new file mode 100644 index 000000000000..96daf058d387 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_COMMON_H +#define __HBG_COMMON_H + +#include <linux/netdevice.h> +#include <linux/pci.h> +#include "hbg_reg.h" + +#define HBG_STATUS_DISABLE 0x0 +#define HBG_STATUS_ENABLE 0x1 +#define HBG_RX_SKIP1 0x00 +#define HBG_RX_SKIP2 0x01 +#define HBG_VECTOR_NUM 4 +#define HBG_PCU_CACHE_LINE_SIZE 32 +#define HBG_TX_TIMEOUT_BUF_LEN 1024 +#define HBG_RX_DESCR 0x01 + +#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \ + HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE) + +enum hbg_dir { + HBG_DIR_TX = 1 << 0, + HBG_DIR_RX = 1 << 1, + HBG_DIR_TX_RX = HBG_DIR_TX | HBG_DIR_RX, +}; + +enum hbg_tx_state { + HBG_TX_STATE_COMPLETE = 0, /* clear state, must fix to 0 */ + HBG_TX_STATE_START, +}; + +enum hbg_nic_state { + HBG_NIC_STATE_EVENT_HANDLING = 0, +}; + +struct hbg_buffer { + u32 state; + dma_addr_t state_dma; + + struct sk_buff *skb; + dma_addr_t skb_dma; + u32 skb_len; + + enum hbg_dir dir; + struct hbg_ring *ring; + struct hbg_priv *priv; +}; + +struct hbg_ring { + struct hbg_buffer *queue; + dma_addr_t queue_dma; + + union { + u32 head; + u32 ntc; + }; + union { + u32 tail; + u32 ntu; + }; + u32 len; + + enum hbg_dir dir; + struct hbg_priv *priv; + struct napi_struct napi; + char *tout_log_buf; /* tx timeout log buffer */ +}; + +enum hbg_hw_event_type { + HBG_HW_EVENT_NONE = 0, + HBG_HW_EVENT_INIT, /* driver is loading */ + HBG_HW_EVENT_RESET, +}; + +struct hbg_dev_specs { + u32 mac_id; + struct sockaddr mac_addr; + u32 phy_addr; + u32 mdio_frequency; + u32 rx_fifo_num; + u32 tx_fifo_num; + u32 vlan_layers; + u32 max_mtu; + u32 min_mtu; + + u32 max_frame_len; + u32 rx_buf_size; +}; + +struct hbg_irq_info { + const char *name; + u32 mask; + bool re_enable; + bool need_print; + u64 count; + + void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info); +}; + +struct hbg_vector { + char name[HBG_VECTOR_NUM][32]; + struct hbg_irq_info *info_array; + u32 info_array_len; +}; + +struct hbg_mac { + struct mii_bus *mdio_bus; + struct phy_device *phydev; + u8 phy_addr; + + u32 speed; + u32 duplex; + u32 autoneg; + u32 link_status; +}; + +struct hbg_priv { + struct net_device *netdev; + struct pci_dev *pdev; + u8 __iomem *io_base; + struct hbg_dev_specs dev_specs; + unsigned long state; + struct hbg_mac mac; + struct hbg_vector vectors; + struct hbg_ring tx_ring; + struct hbg_ring rx_ring; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c new file mode 100644 index 000000000000..c3370114aef3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/ethtool.h> +#include <linux/phy.h> +#include "hbg_ethtool.h" + +static const struct ethtool_ops hbg_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +void hbg_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &hbg_ethtool_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h new file mode 100644 index 000000000000..628707ec2686 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_ETHTOOL_H +#define __HBG_ETHTOOL_H + +#include <linux/netdevice.h> + +void hbg_ethtool_set_ops(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c new file mode 100644 index 000000000000..05295c2ad439 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/iopoll.h> +#include <linux/minmax.h> +#include "hbg_common.h" +#include "hbg_hw.h" +#include "hbg_reg.h" + +#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000) +#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000) +/* little endian or big endian. + * ctrl means packet description, data means skb packet data + */ +#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0 +#define HBG_PCU_FRAME_LEN_PLUS 4 + +static bool hbg_hw_spec_is_valid(struct hbg_priv *priv) +{ + return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) && + !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR); +} + +int hbg_hw_event_notify(struct hbg_priv *priv, + enum hbg_hw_event_type event_type) +{ + bool is_valid; + int ret; + + if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state)) + return -EBUSY; + + /* notify */ + hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type); + + ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid, + HBG_HW_EVENT_WAIT_INTERVAL_US, + HBG_HW_EVENT_WAIT_TIMEOUT_US, + HBG_HW_EVENT_WAIT_INTERVAL_US, priv); + + clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state); + + if (ret) + dev_err(&priv->pdev->dev, + "event %d wait timeout\n", event_type); + + return ret; +} + +static int hbg_hw_dev_specs_init(struct hbg_priv *priv) +{ + struct hbg_dev_specs *specs = &priv->dev_specs; + u64 mac_addr; + + if (!hbg_hw_spec_is_valid(priv)) { + dev_err(&priv->pdev->dev, "dev_specs not init\n"); + return -EINVAL; + } + + specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR); + specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR); + specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR); + specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR); + specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR); + specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR); + specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR); + specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR); + mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR); + u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data); + + if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data)) + return -EADDRNOTAVAIL; + + specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu; + specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len; + return 0; +} + +u32 hbg_hw_get_irq_status(struct hbg_priv *priv) +{ + u32 status; + + status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR); + + hbg_field_modify(status, HBG_INT_MSK_TX_B, + hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR)); + hbg_field_modify(status, HBG_INT_MSK_RX_B, + hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR)); + + return status; +} + +void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask) +{ + if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) + return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1); + + if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) + return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1); + + return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask); +} + +bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask) +{ + if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) + return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR); + + if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) + return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR); + + return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask; +} + +void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable) +{ + u32 value; + + if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) + return hbg_reg_write(priv, + HBG_REG_CF_IND_TXINT_MSK_ADDR, enable); + + if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) + return hbg_reg_write(priv, + HBG_REG_CF_IND_RXINT_MSK_ADDR, enable); + + value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR); + if (enable) + value |= mask; + else + value &= ~mask; + + hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value); +} + +void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr) +{ + hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr); +} + +static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv, + u16 max_frame_len) +{ + max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN); + + /* lower two bits of value must be set to 0 */ + max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS); + + hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR, + HBG_REG_MAX_FRAME_LEN_M, max_frame_len); +} + +static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv, + u16 max_frame_size) +{ + hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR, + HBG_REG_MAX_FRAME_LEN_M, max_frame_size); +} + +void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu) +{ + hbg_hw_set_pcu_max_frame_len(priv, mtu); + hbg_hw_set_mac_max_frame_len(priv, mtu); +} + +void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable) +{ + hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR, + HBG_REG_PORT_ENABLE_TX_B, enable); + hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR, + HBG_REG_PORT_ENABLE_RX_B, enable); +} + +u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir) +{ + if (dir & HBG_DIR_TX) + return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, + HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M); + + if (dir & HBG_DIR_RX) + return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, + HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M); + + return 0; +} + +void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc) +{ + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0); + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1); + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2); + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3); +} + +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) +{ + hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr); +} + +void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) +{ + hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, + HBG_REG_PORT_MODE_M, speed); + hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR, + HBG_REG_DUPLEX_B, duplex); +} + +static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv) +{ + u32 ctrl = 0; + + ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE); + ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE); + ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE); + + hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl); +} + +static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv) +{ + u32 ctrl = 0; + + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B, + HBG_STATUS_ENABLE); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M, + HBG_RX_SKIP2); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id); + + hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl); +} + +static void hbg_hw_init_rx_control(struct hbg_priv *priv) +{ + hbg_hw_init_rx_ctrl(priv); + + /* parse from L2 layer */ + hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR, + HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1); + + hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR, + HBG_REG_RECV_CTRL_STRIP_PAD_EN_B, + HBG_STATUS_ENABLE); + hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR, + HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size); + hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR, + HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE); +} + +int hbg_hw_init(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_hw_dev_specs_init(priv); + if (ret) + return ret; + + hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR, + HBG_REG_BUS_CTRL_ENDIAN_M, + HBG_ENDIAN_CTRL_LE_DATA_BE); + hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR, + HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE); + + hbg_hw_init_rx_control(priv); + hbg_hw_init_transmit_ctrl(priv); + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h new file mode 100644 index 000000000000..14fb39241c93 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_HW_H +#define __HBG_HW_H + +#include <linux/bitfield.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +static inline u32 hbg_reg_read(struct hbg_priv *priv, u32 addr) +{ + return readl(priv->io_base + addr); +} + +static inline void hbg_reg_write(struct hbg_priv *priv, u32 addr, u32 value) +{ + writel(value, priv->io_base + addr); +} + +static inline u64 hbg_reg_read64(struct hbg_priv *priv, u32 addr) +{ + return lo_hi_readq(priv->io_base + addr); +} + +static inline void hbg_reg_write64(struct hbg_priv *priv, u32 addr, u64 value) +{ + lo_hi_writeq(value, priv->io_base + addr); +} + +#define hbg_reg_read_field(priv, addr, mask) \ + FIELD_GET(mask, hbg_reg_read(priv, addr)) + +#define hbg_field_modify(reg_value, mask, value) ({ \ + (reg_value) &= ~(mask); \ + (reg_value) |= FIELD_PREP(mask, value); }) + +#define hbg_reg_write_field(priv, addr, mask, val) ({ \ + typeof(priv) _priv = (priv); \ + typeof(addr) _addr = (addr); \ + u32 _value = hbg_reg_read(_priv, _addr); \ + hbg_field_modify(_value, mask, val); \ + hbg_reg_write(_priv, _addr, _value); }) + +int hbg_hw_event_notify(struct hbg_priv *priv, + enum hbg_hw_event_type event_type); +int hbg_hw_init(struct hbg_priv *priv); +void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex); +u32 hbg_hw_get_irq_status(struct hbg_priv *priv); +void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask); +bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask); +void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable); +void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu); +void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable); +void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr); +u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir); +void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc); +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c new file mode 100644 index 000000000000..25dd25f096fe --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/interrupt.h> +#include "hbg_irq.h" +#include "hbg_hw.h" + +static void hbg_irq_handle_err(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + if (irq_info->need_print) + dev_err(&priv->pdev->dev, + "receive error interrupt: %s\n", irq_info->name); +} + +static void hbg_irq_handle_tx(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + napi_schedule(&priv->tx_ring.napi); +} + +static void hbg_irq_handle_rx(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + napi_schedule(&priv->rx_ring.napi); +} + +#define HBG_TXRX_IRQ_I(name, handle) \ + {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle} +#define HBG_ERR_IRQ_I(name, need_print) \ + {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err} + +static struct hbg_irq_info hbg_irqs[] = { + HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx), + HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx), + HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true), + HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true), + HBG_ERR_IRQ_I(TX_AHB_ERR, true), + HBG_ERR_IRQ_I(RX_BUF_AVL, false), + HBG_ERR_IRQ_I(REL_BUF_ERR, true), + HBG_ERR_IRQ_I(TXCFG_AVL, false), + HBG_ERR_IRQ_I(TX_DROP, false), + HBG_ERR_IRQ_I(RX_DROP, false), + HBG_ERR_IRQ_I(RX_AHB_ERR, true), + HBG_ERR_IRQ_I(MAC_FIFO_ERR, false), + HBG_ERR_IRQ_I(RBREQ_ERR, false), + HBG_ERR_IRQ_I(WE_ERR, false), +}; + +static irqreturn_t hbg_irq_handle(int irq_num, void *p) +{ + struct hbg_irq_info *info; + struct hbg_priv *priv = p; + u32 status; + u32 i; + + status = hbg_hw_get_irq_status(priv); + for (i = 0; i < priv->vectors.info_array_len; i++) { + info = &priv->vectors.info_array[i]; + if (status & info->mask) { + if (!hbg_hw_irq_is_enabled(priv, info->mask)) + continue; + + hbg_hw_irq_enable(priv, info->mask, false); + hbg_hw_irq_clear(priv, info->mask); + + info->count++; + if (info->irq_handle) + info->irq_handle(priv, info); + + if (info->re_enable) + hbg_hw_irq_enable(priv, info->mask, true); + } + } + + return IRQ_HANDLED; +} + +static const char *irq_names_map[HBG_VECTOR_NUM] = { "tx", "rx", + "err", "mdio" }; + +int hbg_irq_init(struct hbg_priv *priv) +{ + struct hbg_vector *vectors = &priv->vectors; + struct device *dev = &priv->pdev->dev; + int ret, id; + u32 i; + + /* used pcim_enable_device(), so the vectors become device managed */ + ret = pci_alloc_irq_vectors(priv->pdev, HBG_VECTOR_NUM, HBG_VECTOR_NUM, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to allocate vectors\n"); + + if (ret != HBG_VECTOR_NUM) + return dev_err_probe(dev, -EINVAL, + "requested %u MSI, but allocated %d MSI\n", + HBG_VECTOR_NUM, ret); + + /* mdio irq not requested, so the number of requested interrupts + * is HBG_VECTOR_NUM - 1. + */ + for (i = 0; i < HBG_VECTOR_NUM - 1; i++) { + id = pci_irq_vector(priv->pdev, i); + if (id < 0) + return dev_err_probe(dev, id, "failed to get irq id\n"); + + snprintf(vectors->name[i], sizeof(vectors->name[i]), "%s-%s-%s", + dev_driver_string(dev), pci_name(priv->pdev), + irq_names_map[i]); + + ret = devm_request_irq(dev, id, hbg_irq_handle, 0, + vectors->name[i], priv); + if (ret) + return dev_err_probe(dev, ret, + "failed to request irq: %s\n", + irq_names_map[i]); + } + + vectors->info_array = hbg_irqs; + vectors->info_array_len = ARRAY_SIZE(hbg_irqs); + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h new file mode 100644 index 000000000000..5c5323cfc751 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_IRQ_H +#define __HBG_IRQ_H + +#include "hbg_common.h" + +int hbg_irq_init(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c new file mode 100644 index 000000000000..75505fb5cc4a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include "hbg_common.h" +#include "hbg_ethtool.h" +#include "hbg_hw.h" +#include "hbg_irq.h" +#include "hbg_mdio.h" +#include "hbg_txrx.h" + +static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu); + +static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled) +{ + struct hbg_irq_info *info; + u32 i; + + for (i = 0; i < priv->vectors.info_array_len; i++) { + info = &priv->vectors.info_array[i]; + hbg_hw_irq_enable(priv, info->mask, enabled); + } +} + +static int hbg_net_open(struct net_device *netdev) +{ + struct hbg_priv *priv = netdev_priv(netdev); + int ret; + + ret = hbg_txrx_init(priv); + if (ret) + return ret; + + hbg_all_irq_enable(priv, true); + hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); + netif_start_queue(netdev); + hbg_phy_start(priv); + + return 0; +} + +/* This function only can be called after hbg_txrx_uninit() */ +static int hbg_hw_txrx_clear(struct hbg_priv *priv) +{ + int ret; + + /* After ring buffers have been released, + * do a reset to release hw fifo rx ring buffer + */ + ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET); + if (ret) + return ret; + + /* After reset, regs need to be reconfigured */ + hbg_hw_init(priv); + hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr)); + hbg_change_mtu(priv, priv->netdev->mtu); + + return 0; +} + +static int hbg_net_stop(struct net_device *netdev) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_phy_stop(priv); + netif_stop_queue(netdev); + hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); + hbg_all_irq_enable(priv, false); + hbg_txrx_uninit(priv); + return hbg_hw_txrx_clear(priv); +} + +static int hbg_net_set_mac_address(struct net_device *netdev, void *addr) +{ + struct hbg_priv *priv = netdev_priv(netdev); + u8 *mac_addr; + + mac_addr = ((struct sockaddr *)addr)->sa_data; + + if (!is_valid_ether_addr(mac_addr)) + return -EADDRNOTAVAIL; + + hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr)); + dev_addr_set(netdev, mac_addr); + + return 0; +} + +static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu) +{ + u32 frame_len; + + frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers + + ETH_HLEN + ETH_FCS_LEN; + hbg_hw_set_mtu(priv, frame_len); +} + +static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + if (netif_running(netdev)) + return -EBUSY; + + hbg_change_mtu(priv, new_mtu); + WRITE_ONCE(netdev->mtu, new_mtu); + + dev_dbg(&priv->pdev->dev, + "change mtu from %u to %u\n", netdev->mtu, new_mtu); + + return 0; +} + +static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_ring *ring = &priv->tx_ring; + char *buf = ring->tout_log_buf; + u32 pos = 0; + + pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, + "ring used num: %u, fifo used num: %u\n", + hbg_get_queue_used_num(ring), + hbg_hw_get_fifo_used_num(priv, HBG_DIR_TX)); + pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, + "ntc: %u, ntu: %u, irq enabled: %u\n", + ring->ntc, ring->ntu, + hbg_hw_irq_is_enabled(priv, HBG_INT_MSK_TX_B)); + + netdev_info(netdev, "%s", buf); +} + +static const struct net_device_ops hbg_netdev_ops = { + .ndo_open = hbg_net_open, + .ndo_stop = hbg_net_stop, + .ndo_start_xmit = hbg_net_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = hbg_net_set_mac_address, + .ndo_change_mtu = hbg_net_change_mtu, + .ndo_tx_timeout = hbg_net_tx_timeout, +}; + +static int hbg_init(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_INIT); + if (ret) + return ret; + + ret = hbg_hw_init(priv); + if (ret) + return ret; + + ret = hbg_irq_init(priv); + if (ret) + return ret; + + return hbg_mdio_init(priv); +} + +static int hbg_pci_init(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + struct device *dev = &pdev->dev; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "failed to enable PCI device\n"); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return dev_err_probe(dev, ret, "failed to set PCI DMA mask\n"); + + ret = pcim_iomap_regions(pdev, BIT(0), dev_driver_string(dev)); + if (ret) + return dev_err_probe(dev, ret, "failed to map PCI bar space\n"); + + priv->io_base = pcim_iomap_table(pdev)[0]; + if (!priv->io_base) + return dev_err_probe(dev, -ENOMEM, "failed to get io base\n"); + + pci_set_master(pdev); + return 0; +} + +static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct hbg_priv *priv; + int ret; + + netdev = devm_alloc_etherdev(dev, sizeof(struct hbg_priv)); + if (!netdev) + return -ENOMEM; + + pci_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, dev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->pdev = pdev; + + ret = hbg_pci_init(pdev); + if (ret) + return ret; + + ret = hbg_init(priv); + if (ret) + return ret; + + netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + netdev->max_mtu = priv->dev_specs.max_mtu; + netdev->min_mtu = priv->dev_specs.min_mtu; + netdev->netdev_ops = &hbg_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + + hbg_change_mtu(priv, ETH_DATA_LEN); + hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr); + hbg_ethtool_set_ops(netdev); + + ret = devm_register_netdev(dev, netdev); + if (ret) + return dev_err_probe(dev, ret, "failed to register netdev\n"); + + netif_carrier_off(netdev); + return 0; +} + +static const struct pci_device_id hbg_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, 0x3730), 0}, + { } +}; +MODULE_DEVICE_TABLE(pci, hbg_pci_tbl); + +static struct pci_driver hbg_driver = { + .name = "hibmcge", + .id_table = hbg_pci_tbl, + .probe = hbg_probe, +}; +module_pci_driver(hbg_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("hibmcge driver"); +MODULE_VERSION("1.0"); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c new file mode 100644 index 000000000000..a3479fba8501 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/phy.h> +#include "hbg_common.h" +#include "hbg_hw.h" +#include "hbg_mdio.h" +#include "hbg_reg.h" + +#define HBG_MAC_GET_PRIV(mac) ((struct hbg_priv *)(mac)->mdio_bus->priv) +#define HBG_MII_BUS_GET_MAC(bus) (&((struct hbg_priv *)(bus)->priv)->mac) + +#define HBG_MDIO_C22_MODE 0x1 +#define HBG_MDIO_C22_REG_WRITE 0x1 +#define HBG_MDIO_C22_REG_READ 0x2 + +#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000) +#define HBG_MDIO_OP_INTERVAL_US (5 * 1000) + +static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd) +{ + hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd); +} + +static void hbg_mdio_get_command(struct hbg_mac *mac, u32 *cmd) +{ + *cmd = hbg_reg_read(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR); +} + +static void hbg_mdio_set_wdata_reg(struct hbg_mac *mac, u16 wdata_value) +{ + hbg_reg_write_field(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_WDATA_ADDR, + HBG_REG_MDIO_WDATA_M, wdata_value); +} + +static u32 hbg_mdio_get_rdata_reg(struct hbg_mac *mac) +{ + return hbg_reg_read_field(HBG_MAC_GET_PRIV(mac), + HBG_REG_MDIO_RDATA_ADDR, + HBG_REG_MDIO_WDATA_M); +} + +static int hbg_mdio_wait_ready(struct hbg_mac *mac) +{ + struct hbg_priv *priv = HBG_MAC_GET_PRIV(mac); + u32 cmd = 0; + int ret; + + ret = readl_poll_timeout(priv->io_base + HBG_REG_MDIO_COMMAND_ADDR, cmd, + !FIELD_GET(HBG_REG_MDIO_COMMAND_START_B, cmd), + HBG_MDIO_OP_INTERVAL_US, + HBG_MDIO_OP_TIMEOUT_US); + + return ret ? -ETIMEDOUT : 0; +} + +static int hbg_mdio_cmd_send(struct hbg_mac *mac, u32 prt_addr, u32 dev_addr, + u32 type, u32 op_code) +{ + u32 cmd = 0; + + hbg_mdio_get_command(mac, &cmd); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_ST_M, type); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_OP_M, op_code); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_PRTAD_M, prt_addr); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_DEVAD_M, dev_addr); + + /* if auto scan enabled, this value need fix to 0 */ + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_START_B, 0x1); + + hbg_mdio_set_command(mac, cmd); + + /* wait operation complete and check the result */ + return hbg_mdio_wait_ready(mac); +} + +static int hbg_mdio_read22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus); + int ret; + + ret = hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE, + HBG_MDIO_C22_REG_READ); + if (ret) + return ret; + + return hbg_mdio_get_rdata_reg(mac); +} + +static int hbg_mdio_write22(struct mii_bus *bus, int phy_addr, int regnum, + u16 val) +{ + struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus); + + hbg_mdio_set_wdata_reg(mac, val); + return hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE, + HBG_MDIO_C22_REG_WRITE); +} + +static void hbg_mdio_init_hw(struct hbg_priv *priv) +{ + u32 freq = priv->dev_specs.mdio_frequency; + struct hbg_mac *mac = &priv->mac; + u32 cmd = 0; + + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_ST_M, HBG_MDIO_C22_MODE); + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_AUTO_SCAN_B, HBG_STATUS_DISABLE); + + /* freq use two bits, which are stored in clk_sel and clk_sel_exp */ + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_B, freq & 0x1); + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B, + (freq >> 1) & 0x1); + + hbg_mdio_set_command(mac, cmd); +} + +static void hbg_phy_adjust_link(struct net_device *netdev) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 speed; + + if (phydev->link != priv->mac.link_status) { + if (phydev->link) { + switch (phydev->speed) { + case SPEED_10: + speed = HBG_PORT_MODE_SGMII_10M; + break; + case SPEED_100: + speed = HBG_PORT_MODE_SGMII_100M; + break; + case SPEED_1000: + speed = HBG_PORT_MODE_SGMII_1000M; + break; + default: + return; + } + + priv->mac.speed = speed; + priv->mac.duplex = phydev->duplex; + priv->mac.autoneg = phydev->autoneg; + hbg_hw_adjust_link(priv, speed, phydev->duplex); + } + + priv->mac.link_status = phydev->link; + phy_print_status(phydev); + } +} + +static void hbg_phy_disconnect(void *data) +{ + phy_disconnect((struct phy_device *)data); +} + +static int hbg_phy_connect(struct hbg_priv *priv) +{ + struct phy_device *phydev = priv->mac.phydev; + struct device *dev = &priv->pdev->dev; + int ret; + + ret = phy_connect_direct(priv->netdev, phydev, hbg_phy_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) + return dev_err_probe(dev, ret, "failed to connect phy\n"); + + ret = devm_add_action_or_reset(dev, hbg_phy_disconnect, phydev); + if (ret) + return ret; + + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_attached_info(phydev); + + return 0; +} + +void hbg_phy_start(struct hbg_priv *priv) +{ + phy_start(priv->mac.phydev); +} + +void hbg_phy_stop(struct hbg_priv *priv) +{ + phy_stop(priv->mac.phydev); +} + +int hbg_mdio_init(struct hbg_priv *priv) +{ + struct device *dev = &priv->pdev->dev; + struct hbg_mac *mac = &priv->mac; + struct phy_device *phydev; + struct mii_bus *mdio_bus; + int ret; + + mac->phy_addr = priv->dev_specs.phy_addr; + mdio_bus = devm_mdiobus_alloc(dev); + if (!mdio_bus) + return dev_err_probe(dev, -ENOMEM, + "failed to alloc MDIO bus\n"); + + mdio_bus->parent = dev; + mdio_bus->priv = priv; + mdio_bus->phy_mask = ~(1 << mac->phy_addr); + mdio_bus->name = "hibmcge mii bus"; + mac->mdio_bus = mdio_bus; + + mdio_bus->read = hbg_mdio_read22; + mdio_bus->write = hbg_mdio_write22; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", dev_name(dev)); + + ret = devm_mdiobus_register(dev, mdio_bus); + if (ret) + return dev_err_probe(dev, ret, "failed to register MDIO bus\n"); + + phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr); + if (!phydev) + return dev_err_probe(dev, -ENODEV, + "failed to get phy device\n"); + + mac->phydev = phydev; + hbg_mdio_init_hw(priv); + return hbg_phy_connect(priv); +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h new file mode 100644 index 000000000000..febd02a309c7 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_MDIO_H +#define __HBG_MDIO_H + +#include "hbg_common.h" + +int hbg_mdio_init(struct hbg_priv *priv); +void hbg_phy_start(struct hbg_priv *priv); +void hbg_phy_stop(struct hbg_priv *priv); +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h new file mode 100644 index 000000000000..57d81c6d7633 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_REG_H +#define __HBG_REG_H + +/* DEV SPEC */ +#define HBG_REG_SPEC_VALID_ADDR 0x0000 +#define HBG_REG_EVENT_REQ_ADDR 0x0004 +#define HBG_REG_MAC_ID_ADDR 0x0008 +#define HBG_REG_PHY_ID_ADDR 0x000C +#define HBG_REG_MAC_ADDR_ADDR 0x0010 +#define HBG_REG_MDIO_FREQ_ADDR 0x0024 +#define HBG_REG_MAX_MTU_ADDR 0x0028 +#define HBG_REG_MIN_MTU_ADDR 0x002C +#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030 +#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034 +#define HBG_REG_VLAN_LAYERS_ADDR 0x0038 + +/* MDIO */ +#define HBG_REG_MDIO_BASE 0x8000 +#define HBG_REG_MDIO_COMMAND_ADDR (HBG_REG_MDIO_BASE + 0x0000) +#define HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B BIT(17) +#define HBG_REG_MDIO_COMMAND_AUTO_SCAN_B BIT(16) +#define HBG_REG_MDIO_COMMAND_CLK_SEL_B BIT(15) +#define HBG_REG_MDIO_COMMAND_START_B BIT(14) +#define HBG_REG_MDIO_COMMAND_ST_M GENMASK(13, 12) +#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10) +#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5) +#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0) +#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008) +#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0) +#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C) +#define HBG_REG_MDIO_STA_ADDR (HBG_REG_MDIO_BASE + 0x0010) + +/* GMAC */ +#define HBG_REG_SGMII_BASE 0x10000 +#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008) +#define HBG_REG_DUPLEX_B BIT(0) +#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C) +#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040) +#define HBG_REG_PORT_MODE_M GENMASK(3, 0) +#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044) +#define HBG_REG_PORT_ENABLE_RX_B BIT(1) +#define HBG_REG_PORT_ENABLE_TX_B BIT(2) +#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060) +#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7) +#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6) +#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5) +#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0) +#define HBG_REG_CF_CRC_STRIP_B BIT(0) +#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4) +#define HBG_REG_MODE_CHANGE_EN_B BIT(0) +#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0) +#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3) +#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210) +#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214) + +/* PCU */ +#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C) +#define HBG_INT_MSK_WE_ERR_B BIT(31) +#define HBG_INT_MSK_RBREQ_ERR_B BIT(30) +#define HBG_INT_MSK_MAC_FIFO_ERR_B BIT(29) +#define HBG_INT_MSK_RX_AHB_ERR_B BIT(28) +#define HBG_INT_MSK_RX_DROP_B BIT(26) +#define HBG_INT_MSK_TX_DROP_B BIT(25) +#define HBG_INT_MSK_TXCFG_AVL_B BIT(24) +#define HBG_INT_MSK_REL_BUF_ERR_B BIT(23) +#define HBG_INT_MSK_RX_BUF_AVL_B BIT(22) +#define HBG_INT_MSK_TX_AHB_ERR_B BIT(21) +#define HBG_INT_MSK_SRAM_PARITY_ERR_B BIT(20) +#define HBG_INT_MSK_MAC_APP_TX_FIFO_ERR_B BIT(19) +#define HBG_INT_MSK_MAC_APP_RX_FIFO_ERR_B BIT(18) +#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17) +#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16) +#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15) +#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */ +#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */ +#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434) +#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438) +#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444) +#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16) +#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488) +#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C) +#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490) +#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494) +#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0) +#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4) +#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0) +#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8) +#define HBG_REG_BUS_CTRL_ENDIAN_M GENMASK(2, 1) +#define HBG_REG_RX_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04F0) +#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M GENMASK(31, 28) +#define HBG_REG_RX_CTRL_TIME_INF_EN_B BIT(23) +#define HBG_REG_RX_CTRL_RX_ALIGN_NUM_M GENMASK(18, 17) +#define HBG_REG_RX_CTRL_PORT_NUM GENMASK(16, 13) +#define HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B BIT(12) +#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0) +#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4) +#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21) +#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694) +#define HBG_REG_IND_INTR_MASK_B BIT(0) +#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698) +#define HBG_REG_CF_IND_TXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x069C) +#define HBG_REG_CF_IND_RXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x06a0) +#define HBG_REG_CF_IND_RXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x06a4) +#define HBG_REG_CF_IND_RXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x06a8) + +enum hbg_port_mode { + /* 0x0 ~ 0x5 are reserved */ + HBG_PORT_MODE_SGMII_10M = 0x6, + HBG_PORT_MODE_SGMII_100M = 0x7, + HBG_PORT_MODE_SGMII_1000M = 0x8, +}; + +struct hbg_tx_desc { + u32 word0; + u32 word1; + u32 word2; /* pkt_addr */ + u32 word3; /* clear_addr */ +}; + +#define HBG_TX_DESC_W0_IP_OFF_M GENMASK(30, 26) +#define HBG_TX_DESC_W0_l3_CS_B BIT(2) +#define HBG_TX_DESC_W0_WB_B BIT(1) +#define HBG_TX_DESC_W0_l4_CS_B BIT(0) +#define HBG_TX_DESC_W1_SEND_LEN_M GENMASK(19, 4) + +struct hbg_rx_desc { + u32 word0; + u32 word1; /* tag */ + u32 word2; + u32 word3; + u32 word4; + u32 word5; +}; + +#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16) + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c new file mode 100644 index 000000000000..f4f256a0dfea --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <net/netdev_queues.h> +#include "hbg_common.h" +#include "hbg_irq.h" +#include "hbg_reg.h" +#include "hbg_txrx.h" + +#define netdev_get_tx_ring(netdev) \ + (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring)) + +#define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \ + DMA_FROM_DEVICE : DMA_TO_DEVICE) + +#define hbg_queue_used_num(head, tail, ring) ({ \ + typeof(ring) _ring = (ring); \ + ((tail) + _ring->len - (head)) % _ring->len; }) +#define hbg_queue_left_num(head, tail, ring) ({ \ + typeof(ring) _r = (ring); \ + _r->len - hbg_queue_used_num((head), (tail), _r) - 1; }) +#define hbg_queue_is_empty(head, tail, ring) \ + (hbg_queue_used_num((head), (tail), (ring)) == 0) +#define hbg_queue_is_full(head, tail, ring) \ + (hbg_queue_left_num((head), (tail), (ring)) == 0) +#define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len) +#define hbg_queue_move_next(p, ring) ({ \ + typeof(ring) _ring = (ring); \ + _ring->p = hbg_queue_next_prt(_ring->p, _ring); }) + +#define HBG_TX_STOP_THRS 2 +#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS) + +static int hbg_dma_map(struct hbg_buffer *buffer) +{ + struct hbg_priv *priv = buffer->priv; + + buffer->skb_dma = dma_map_single(&priv->pdev->dev, + buffer->skb->data, buffer->skb_len, + buffer_to_dma_dir(buffer)); + if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) + return -ENOMEM; + + return 0; +} + +static void hbg_dma_unmap(struct hbg_buffer *buffer) +{ + struct hbg_priv *priv = buffer->priv; + + if (unlikely(!buffer->skb_dma)) + return; + + dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len, + buffer_to_dma_dir(buffer)); + buffer->skb_dma = 0; +} + +static void hbg_init_tx_desc(struct hbg_buffer *buffer, + struct hbg_tx_desc *tx_desc) +{ + u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header; + u32 word0 = 0; + + word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE); + word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset); + if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) { + word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE); + word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE); + } + + tx_desc->word0 = word0; + tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M, + buffer->skb->len); + tx_desc->word2 = buffer->skb_dma; + tx_desc->word3 = buffer->state_dma; +} + +netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct hbg_ring *ring = netdev_get_tx_ring(netdev); + struct hbg_priv *priv = netdev_priv(netdev); + /* This smp_load_acquire() pairs with smp_store_release() in + * hbg_napi_tx_recycle() called in tx interrupt handle process. + */ + u32 ntc = smp_load_acquire(&ring->ntc); + struct hbg_buffer *buffer; + struct hbg_tx_desc tx_desc; + u32 ntu = ring->ntu; + + if (unlikely(!skb->len || + skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) { + dev_kfree_skb_any(skb); + netdev->stats.tx_errors++; + return NETDEV_TX_OK; + } + + if (!netif_subqueue_maybe_stop(netdev, 0, + hbg_queue_left_num(ntc, ntu, ring), + HBG_TX_STOP_THRS, HBG_TX_START_THRS)) + return NETDEV_TX_BUSY; + + buffer = &ring->queue[ntu]; + buffer->skb = skb; + buffer->skb_len = skb->len; + if (unlikely(hbg_dma_map(buffer))) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + buffer->state = HBG_TX_STATE_START; + hbg_init_tx_desc(buffer, &tx_desc); + hbg_hw_set_tx_desc(priv, &tx_desc); + + /* This smp_store_release() pairs with smp_load_acquire() in + * hbg_napi_tx_recycle() called in tx interrupt handle process. + */ + smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring)); + dev_sw_netstats_tx_add(netdev, 1, skb->len); + return NETDEV_TX_OK; +} + +static void hbg_buffer_free_skb(struct hbg_buffer *buffer) +{ + if (unlikely(!buffer->skb)) + return; + + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; +} + +static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer) +{ + u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir); + struct hbg_priv *priv = buffer->priv; + + buffer->skb = netdev_alloc_skb(priv->netdev, len); + if (unlikely(!buffer->skb)) + return -ENOMEM; + + buffer->skb_len = len; + memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE); + return 0; +} + +static void hbg_buffer_free(struct hbg_buffer *buffer) +{ + hbg_dma_unmap(buffer); + hbg_buffer_free_skb(buffer); +} + +static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) +{ + struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); + /* This smp_load_acquire() pairs with smp_store_release() in + * hbg_net_start_xmit() called in xmit process. + */ + u32 ntu = smp_load_acquire(&ring->ntu); + struct hbg_priv *priv = ring->priv; + struct hbg_buffer *buffer; + u32 ntc = ring->ntc; + int packet_done = 0; + + /* We need do cleanup even if budget is 0. + * Per NAPI documentation budget is for Rx. + * So We hardcode the amount of work Tx NAPI does to 128. + */ + budget = 128; + while (packet_done < budget) { + if (unlikely(hbg_queue_is_empty(ntc, ntu, ring))) + break; + + /* make sure HW write desc complete */ + dma_rmb(); + + buffer = &ring->queue[ntc]; + if (buffer->state != HBG_TX_STATE_COMPLETE) + break; + + hbg_buffer_free(buffer); + ntc = hbg_queue_next_prt(ntc, ring); + packet_done++; + } + + /* This smp_store_release() pairs with smp_load_acquire() in + * hbg_net_start_xmit() called in xmit process. + */ + smp_store_release(&ring->ntc, ntc); + netif_wake_queue(priv->netdev); + + if (likely(packet_done < budget && + napi_complete_done(napi, packet_done))) + hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true); + + return packet_done; +} + +static int hbg_rx_fill_one_buffer(struct hbg_priv *priv) +{ + struct hbg_ring *ring = &priv->rx_ring; + struct hbg_buffer *buffer; + int ret; + + if (hbg_queue_is_full(ring->ntc, ring->ntu, ring)) + return 0; + + buffer = &ring->queue[ring->ntu]; + ret = hbg_buffer_alloc_skb(buffer); + if (unlikely(ret)) + return ret; + + ret = hbg_dma_map(buffer); + if (unlikely(ret)) { + hbg_buffer_free_skb(buffer); + return ret; + } + + hbg_hw_fill_buffer(priv, buffer->skb_dma); + hbg_queue_move_next(ntu, ring); + return 0; +} + +static bool hbg_sync_data_from_hw(struct hbg_priv *priv, + struct hbg_buffer *buffer) +{ + struct hbg_rx_desc *rx_desc; + + /* make sure HW write desc complete */ + dma_rmb(); + + dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma, + buffer->skb_len, DMA_FROM_DEVICE); + + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0; +} + +static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) +{ + struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); + struct hbg_priv *priv = ring->priv; + struct hbg_rx_desc *rx_desc; + struct hbg_buffer *buffer; + u32 packet_done = 0; + u32 pkt_len; + + while (packet_done < budget) { + if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring))) + break; + + buffer = &ring->queue[ring->ntc]; + if (unlikely(!buffer->skb)) + goto next_buffer; + + if (unlikely(!hbg_sync_data_from_hw(priv, buffer))) + break; + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); + + hbg_dma_unmap(buffer); + + skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); + skb_put(buffer->skb, pkt_len); + buffer->skb->protocol = eth_type_trans(buffer->skb, + priv->netdev); + + dev_sw_netstats_rx_add(priv->netdev, pkt_len); + napi_gro_receive(napi, buffer->skb); + buffer->skb = NULL; + +next_buffer: + hbg_rx_fill_one_buffer(priv); + hbg_queue_move_next(ntc, ring); + packet_done++; + } + + if (likely(packet_done < budget && + napi_complete_done(napi, packet_done))) + hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true); + + return packet_done; +} + +static void hbg_ring_uninit(struct hbg_ring *ring) +{ + struct hbg_buffer *buffer; + u32 i; + + if (!ring->queue) + return; + + napi_disable(&ring->napi); + netif_napi_del(&ring->napi); + + for (i = 0; i < ring->len; i++) { + buffer = &ring->queue[i]; + hbg_buffer_free(buffer); + buffer->ring = NULL; + buffer->priv = NULL; + } + + dma_free_coherent(&ring->priv->pdev->dev, + ring->len * sizeof(*ring->queue), + ring->queue, ring->queue_dma); + ring->queue = NULL; + ring->queue_dma = 0; + ring->len = 0; + ring->priv = NULL; +} + +static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring, + int (*napi_poll)(struct napi_struct *, int), + enum hbg_dir dir) +{ + struct hbg_buffer *buffer; + u32 i, len; + + len = hbg_get_spec_fifo_max_num(priv, dir) + 1; + ring->queue = dma_alloc_coherent(&priv->pdev->dev, + len * sizeof(*ring->queue), + &ring->queue_dma, GFP_KERNEL); + if (!ring->queue) + return -ENOMEM; + + for (i = 0; i < len; i++) { + buffer = &ring->queue[i]; + buffer->skb_len = 0; + buffer->dir = dir; + buffer->ring = ring; + buffer->priv = priv; + buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer)); + } + + ring->dir = dir; + ring->priv = priv; + ring->ntc = 0; + ring->ntu = 0; + ring->len = len; + + if (dir == HBG_DIR_TX) + netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll); + else + netif_napi_add(priv->netdev, &ring->napi, napi_poll); + + napi_enable(&ring->napi); + return 0; +} + +static int hbg_tx_ring_init(struct hbg_priv *priv) +{ + struct hbg_ring *tx_ring = &priv->tx_ring; + + if (!tx_ring->tout_log_buf) + tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev, + HBG_TX_TIMEOUT_BUF_LEN, + GFP_KERNEL); + + if (!tx_ring->tout_log_buf) + return -ENOMEM; + + return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX); +} + +static int hbg_rx_ring_init(struct hbg_priv *priv) +{ + int ret; + u32 i; + + ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX); + if (ret) + return ret; + + for (i = 0; i < priv->rx_ring.len - 1; i++) { + ret = hbg_rx_fill_one_buffer(priv); + if (ret) { + hbg_ring_uninit(&priv->rx_ring); + return ret; + } + } + + return 0; +} + +int hbg_txrx_init(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_tx_ring_init(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to init tx ring, ret = %d\n", ret); + return ret; + } + + ret = hbg_rx_ring_init(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to init rx ring, ret = %d\n", ret); + hbg_ring_uninit(&priv->tx_ring); + } + + return ret; +} + +void hbg_txrx_uninit(struct hbg_priv *priv) +{ + hbg_ring_uninit(&priv->tx_ring); + hbg_ring_uninit(&priv->rx_ring); +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h new file mode 100644 index 000000000000..2883a5899ae2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_TXRX_H +#define __HBG_TXRX_H + +#include <linux/etherdevice.h> +#include "hbg_hw.h" + +static inline u32 hbg_spec_max_frame_len(struct hbg_priv *priv, + enum hbg_dir dir) +{ + return (dir == HBG_DIR_TX) ? priv->dev_specs.max_frame_len : + priv->dev_specs.rx_buf_size; +} + +static inline u32 hbg_get_spec_fifo_max_num(struct hbg_priv *priv, + enum hbg_dir dir) +{ + return (dir == HBG_DIR_TX) ? priv->dev_specs.tx_fifo_num : + priv->dev_specs.rx_fifo_num; +} + +static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir) +{ + return hbg_hw_get_fifo_used_num(priv, dir) >= + hbg_get_spec_fifo_max_num(priv, dir); +} + +static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring) +{ + return (ring->ntu + ring->len - ring->ntc) % ring->len; +} + +netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev); +int hbg_txrx_init(struct hbg_priv *priv); +void hbg_txrx_uninit(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index beb815e5289b..a376d4bdf281 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -1047,7 +1047,7 @@ MODULE_DEVICE_TABLE(of, hip04_mac_match); static struct platform_driver hip04_mac_driver = { .probe = hip04_mac_probe, - .remove_new = hip04_remove, + .remove = hip04_remove, .driver = { .name = DRV_NAME, .of_match_table = hip04_mac_match, diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 2406263c9dd3..d244a40df430 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -959,7 +959,7 @@ static struct platform_driver hisi_femac_driver = { .of_match_table = hisi_femac_match, }, .probe = hisi_femac_drv_probe, - .remove_new = hisi_femac_drv_remove, + .remove = hisi_femac_drv_remove, #ifdef CONFIG_PM .suspend = hisi_femac_drv_suspend, .resume = hisi_femac_drv_resume, diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 1a972b093a42..e3e7f2270560 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1312,7 +1312,7 @@ static struct platform_driver hix5hd2_dev_driver = { .of_match_table = hix5hd2_of_match, }, .probe = hix5hd2_dev_probe, - .remove_new = hix5hd2_dev_remove, + .remove = hix5hd2_dev_remove, }; module_platform_driver(hix5hd2_dev_driver); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 58baac7103b3..5fa9b2eeb929 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1090,28 +1090,24 @@ int hns_mac_init(struct dsaf_device *dsaf_dev) u32 port_id; int max_port_num = hns_mac_get_max_port_num(dsaf_dev); struct hns_mac_cb *mac_cb; - struct fwnode_handle *child; - device_for_each_child_node(dsaf_dev->dev, child) { + device_for_each_child_node_scoped(dsaf_dev->dev, child) { ret = fwnode_property_read_u32(child, "reg", &port_id); if (ret) { - fwnode_handle_put(child); dev_err(dsaf_dev->dev, "get reg fail, ret=%d!\n", ret); return ret; } if (port_id >= max_port_num) { - fwnode_handle_put(child); dev_err(dsaf_dev->dev, "reg(%u) out of range!\n", port_id); return -EINVAL; } mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), GFP_KERNEL); - if (!mac_cb) { - fwnode_handle_put(child); + if (!mac_cb) return -ENOMEM; - } + mac_cb->fw_port = child; mac_cb->mac_id = (u8)port_id; dsaf_dev->mac_cb[port_id] = mac_cb; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 1b67da1f6fa8..eb60f45a3460 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3031,7 +3031,7 @@ MODULE_DEVICE_TABLE(of, g_dsaf_match); static struct platform_driver g_dsaf_driver = { .probe = hns_dsaf_probe, - .remove_new = hns_dsaf_remove, + .remove = hns_dsaf_remove, .driver = { .name = DSAF_DRV_NAME, .of_match_table = g_dsaf_match, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index fd32e15cadcb..42bb341fd80b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2439,7 +2439,7 @@ static struct platform_driver hns_nic_dev_driver = { .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), }, .probe = hns_nic_dev_probe, - .remove_new = hns_nic_dev_remove, + .remove = hns_nic_dev_remove, }; module_platform_driver(hns_nic_dev_driver); diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 8a047145f0c5..a1aa6c1f966e 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -636,7 +636,7 @@ MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match); static struct platform_driver hns_mdio_driver = { .probe = hns_mdio_probe, - .remove_new = hns_mdio_remove, + .remove = hns_mdio_remove, .driver = { .name = MDIO_DRV_NAME, .of_match_table = hns_mdio_match, diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 813403c2628f..baa598988f47 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -168,7 +168,7 @@ static void sni_82596_driver_remove(struct platform_device *pdev) static struct platform_driver sni_82596_driver = { .probe = sni_82596_probe, - .remove_new = sni_82596_driver_remove, + .remove = sni_82596_driver_remove, .driver = { .name = sni_82596_string, }, diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index c41c3f1cc506..9b006bc353a1 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -121,7 +121,7 @@ static struct platform_driver ehea_driver = { .of_match_table = ehea_device_table, }, .probe = ehea_probe_adapter, - .remove_new = ehea_remove, + .remove = ehea_remove, }; void ehea_dump(void *adr, int len, char *msg) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index dac570f3c110..dadd987efb6b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -3241,7 +3241,7 @@ static struct platform_driver emac_driver = { .of_match_table = emac_match, }, .probe = emac_probe, - .remove_new = emac_remove, + .remove = emac_remove, }; static void __init emac_make_bootlist(void) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 99d5f83f7c60..c634534710d9 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -778,7 +778,7 @@ static struct platform_driver mal_of_driver = { .of_match_table = mal_platform_match, }, .probe = mal_probe, - .remove_new = mal_remove, + .remove = mal_remove, }; int __init mal_init(void) diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index e1712fdc3c31..317c22d09172 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -300,7 +300,7 @@ static struct platform_driver rgmii_driver = { .of_match_table = rgmii_match, }, .probe = rgmii_probe, - .remove_new = rgmii_remove, + .remove = rgmii_remove, }; int __init rgmii_init(void) diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index fa3488258ca2..c605c8ff933e 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -158,7 +158,7 @@ static struct platform_driver tah_driver = { .of_match_table = tah_match, }, .probe = tah_probe, - .remove_new = tah_remove, + .remove = tah_remove, }; int __init tah_init(void) diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 26e86cdee2f6..03bab3f95fe4 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -306,7 +306,7 @@ static struct platform_driver zmii_driver = { .of_match_table = zmii_match, }, .probe = zmii_probe, - .remove_new = zmii_remove, + .remove = zmii_remove, }; int __init zmii_init(void) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 97425c06e1ed..cca2ed6ad289 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2310,7 +2310,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, tx_buff = &tx_pool->tx_buff[index]; adapter->netdev->stats.tx_packets--; adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; - adapter->tx_stats_buffers[queue_num].packets--; + adapter->tx_stats_buffers[queue_num].batched_packets--; adapter->tx_stats_buffers[queue_num].bytes -= tx_buff->skb->len; dev_kfree_skb_any(tx_buff->skb); @@ -2402,7 +2402,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_map_failed = 0; union sub_crq indir_arr[16]; unsigned int tx_dropped = 0; - unsigned int tx_packets = 0; + unsigned int tx_dpackets = 0; + unsigned int tx_bpackets = 0; unsigned int tx_bytes = 0; dma_addr_t data_dma_addr; struct netdev_queue *txq; @@ -2575,6 +2576,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) if (lpar_rc != H_SUCCESS) goto tx_err; + tx_dpackets++; goto early_exit; } @@ -2603,6 +2605,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) goto tx_err; } + tx_bpackets++; + early_exit: if (atomic_add_return(num_entries, &tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { @@ -2610,7 +2614,6 @@ early_exit: netif_stop_subqueue(netdev, queue_num); } - tx_packets++; tx_bytes += skb->len; txq_trans_cond_update(txq); ret = NETDEV_TX_OK; @@ -2640,10 +2643,11 @@ out: rcu_read_unlock(); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; - netdev->stats.tx_packets += tx_packets; + netdev->stats.tx_packets += tx_bpackets + tx_dpackets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; - adapter->tx_stats_buffers[queue_num].packets += tx_packets; + adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets; + adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets; adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; @@ -3808,7 +3812,10 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + snprintf(data, ETH_GSTRING_LEN, "tx%d_batched_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_direct_packets", i); data += ETH_GSTRING_LEN; snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); @@ -3873,7 +3880,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, (adapter, ibmvnic_stats[i].offset)); for (j = 0; j < adapter->req_tx_queues; j++) { - data[i] = adapter->tx_stats_buffers[j].packets; + data[i] = adapter->tx_stats_buffers[j].batched_packets; + i++; + data[i] = adapter->tx_stats_buffers[j].direct_packets; i++; data[i] = adapter->tx_stats_buffers[j].bytes; i++; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 94ac36b1408b..a189038d88df 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -213,7 +213,8 @@ struct ibmvnic_statistics { #define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { - u64 packets; + u64 batched_packets; + u64 direct_packets; u64 bytes; u64 dropped_packets; }; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 0375c7448a57..20bc40eec487 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -258,6 +258,7 @@ config I40E_DCB config IAVF tristate select LIBIE + select NET_SHAPER config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index ab7ae418d294..4de9b156b2be 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -513,6 +513,8 @@ void e1000_down(struct e1000_adapter *adapter) */ netif_carrier_off(netdev); + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL); + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL); napi_disable(&adapter->napi); e1000_irq_disable(adapter); @@ -1392,7 +1394,10 @@ int e1000_open(struct net_device *netdev) /* From here on the code is the same as e1000_up() */ clear_bit(__E1000_DOWN, &adapter->flags); + netif_napi_set_irq(&adapter->napi, adapter->pdev->irq); napi_enable(&adapter->napi); + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi); + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi); e1000_irq_enable(adapter); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 07e903346358..286155efcedf 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2928,11 +2928,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); - writel(0, tx_ring->head); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, 0); - else - writel(0, tx_ring->tail); /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); @@ -3253,11 +3250,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); - writel(0, rx_ring->head); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, 0); - else - writel(0, rx_ring->tail); /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); @@ -4613,6 +4607,7 @@ int e1000e_open(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; int err; + int irq; /* disallow open during test */ if (test_bit(__E1000_TESTING, &adapter->state)) @@ -4676,7 +4671,15 @@ int e1000e_open(struct net_device *netdev) /* From here on the code is the same as e1000e_up() */ clear_bit(__E1000_DOWN, &adapter->state); + if (adapter->int_mode == E1000E_INT_MODE_MSIX) + irq = adapter->msix_entries[0].vector; + else + irq = adapter->pdev->irq; + + netif_napi_set_irq(&adapter->napi, irq); napi_enable(&adapter->napi); + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi); + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi); e1000_irq_enable(adapter); @@ -4735,6 +4738,8 @@ int e1000e_close(struct net_device *netdev) netdev_info(netdev, "NIC Link is Down\n"); } + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL); + netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL); napi_disable(&adapter->napi); e1000e_free_tx_resources(adapter->tx_ring); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 48cd1d06761c..532a0a595fe8 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -34,6 +34,7 @@ #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_skbedit.h> +#include <net/net_shaper.h> #include "iavf_type.h" #include <linux/avf/virtchnl.h> @@ -250,6 +251,9 @@ struct iavf_cloud_filter { #define IAVF_RESET_WAIT_DETECTED_COUNT 500 #define IAVF_RESET_WAIT_COMPLETE_COUNT 2000 +#define IAVF_MAX_QOS_TC_NUM 8 +#define IAVF_DEFAULT_QUANTA_SIZE 1024 + /* board specific private data structure */ struct iavf_adapter { struct workqueue_struct *wq; @@ -336,6 +340,9 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36) #define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37) #define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38) +#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39) +#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40) +#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41) /* flags for processing extended capability messages during * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires @@ -408,6 +415,8 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_FDIR_PF) #define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) +#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_QOS) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; @@ -416,6 +425,7 @@ struct iavf_adapter { struct virtchnl_vlan_caps vlan_v2_caps; u16 msg_enable; struct iavf_eth_stats current_stats; + struct virtchnl_qos_cap_list *qos_caps; struct iavf_vsi vsi; u32 aq_wait_count; /* RSS stuff */ @@ -529,22 +539,16 @@ static inline void iavf_change_state(struct iavf_adapter *adapter, iavf_state_str(adapter->state)); } -int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags); void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags); void iavf_schedule_finish_config(struct iavf_adapter *adapter); -void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); -void iavf_update_stats(struct iavf_adapter *adapter); void iavf_free_all_tx_resources(struct iavf_adapter *adapter); void iavf_free_all_rx_resources(struct iavf_adapter *adapter); -void iavf_napi_add_all(struct iavf_adapter *adapter); -void iavf_napi_del_all(struct iavf_adapter *adapter); - int iavf_send_api_ver(struct iavf_adapter *adapter); int iavf_verify_api_ver(struct iavf_adapter *adapter); int iavf_send_vf_config_msg(struct iavf_adapter *adapter); @@ -555,11 +559,9 @@ void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter); void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); void iavf_configure_queues(struct iavf_adapter *adapter); -void iavf_deconfigure_queues(struct iavf_adapter *adapter); void iavf_enable_queues(struct iavf_adapter *adapter); void iavf_disable_queues(struct iavf_adapter *adapter); void iavf_map_queues(struct iavf_adapter *adapter); -int iavf_request_queues(struct iavf_adapter *adapter, int num); void iavf_add_ether_addrs(struct iavf_adapter *adapter); void iavf_del_ether_addrs(struct iavf_adapter *adapter); void iavf_add_vlans(struct iavf_adapter *adapter); @@ -579,8 +581,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, enum virtchnl_ops v_opcode, enum iavf_status v_retval, u8 *msg, u16 msglen); int iavf_config_rss(struct iavf_adapter *adapter); -int iavf_lan_add_device(struct iavf_adapter *adapter); -int iavf_lan_del_device(struct iavf_adapter *adapter); +void iavf_cfg_queues_bw(struct iavf_adapter *adapter); +void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter); +void iavf_get_qos_caps(struct iavf_adapter *adapter); void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index f782402cd789..12ef160425aa 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1972,8 +1972,11 @@ static void iavf_finish_config(struct work_struct *work) adapter = container_of(work, struct iavf_adapter, finish_config); - /* Always take RTNL first to prevent circular lock dependency */ + /* Always take RTNL first to prevent circular lock dependency; + * The dev->lock is needed to update the queue number + */ rtnl_lock(); + mutex_lock(&adapter->netdev->lock); mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && @@ -2017,6 +2020,7 @@ static void iavf_finish_config(struct work_struct *work) out: mutex_unlock(&adapter->crit_lock); + mutex_unlock(&adapter->netdev->lock); rtnl_unlock(); } @@ -2085,6 +2089,21 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) { + iavf_cfg_queues_bw(adapter); + return 0; + } + + if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) { + iavf_get_qos_caps(adapter); + return 0; + } + + if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) { + iavf_cfg_queues_quanta_size(adapter); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { iavf_configure_queues(adapter); return 0; @@ -2670,6 +2689,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) /* request initial VLAN offload settings */ iavf_set_vlan_offload_features(adapter, 0, netdev->features); + if (QOS_ALLOWED(adapter)) + adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS; + iavf_schedule_finish_config(adapter); return; @@ -2919,6 +2941,30 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) } /** + * iavf_reconfig_qs_bw - Call-back task to handle hardware reset + * @adapter: board private structure + * + * After a reset, the shaper parameters of queues need to be replayed again. + * Since the net_shaper object inside TX rings persists across reset, + * set the update flag for all queues so that the virtchnl message is triggered + * for all queues. + **/ +static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter) +{ + int i, num = 0; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i].q_shaper.bw_min || + adapter->tx_rings[i].q_shaper.bw_max) { + adapter->tx_rings[i].q_shaper_update = true; + num++; + } + + if (num) + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; +} + +/** * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct * @@ -2944,10 +2990,12 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ + mutex_lock(&netdev->lock); if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) queue_work(adapter->wq, &adapter->reset_task); + mutex_unlock(&netdev->lock); return; } @@ -2995,6 +3043,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); + mutex_unlock(&netdev->lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3124,6 +3173,8 @@ continue_reset: iavf_up_complete(adapter); iavf_irq_enable(adapter, true); + + iavf_reconfig_qs_bw(adapter); } else { iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); @@ -3133,6 +3184,7 @@ continue_reset: wake_up(&adapter->reset_waitqueue); mutex_unlock(&adapter->crit_lock); + mutex_unlock(&netdev->lock); return; reset_err: @@ -3143,6 +3195,7 @@ reset_err: iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); + mutex_unlock(&netdev->lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3614,8 +3667,10 @@ exit: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return 0; + mutex_lock(&netdev->lock); netif_set_real_num_rx_queues(netdev, total_qps); netif_set_real_num_tx_queues(netdev, total_qps); + mutex_unlock(&netdev->lock); return ret; } @@ -4893,6 +4948,98 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, return iavf_fix_strip_features(adapter, features); } +static int +iavf_verify_shaper(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct iavf_adapter *adapter = netdev_priv(binding->netdev); + u64 vf_max; + + if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE) { + vf_max = adapter->qos_caps->cap[0].shaper.peak; + if (vf_max && shaper->bw_max > vf_max) { + NL_SET_ERR_MSG_FMT(extack, "Max rate (%llu) of queue %d can't exceed max TX rate of VF (%llu kbps)", + shaper->bw_max, shaper->handle.id, + vf_max); + return -EINVAL; + } + } + return 0; +} + +static int +iavf_shaper_set(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct iavf_adapter *adapter = netdev_priv(binding->netdev); + const struct net_shaper_handle *handle = &shaper->handle; + struct iavf_ring *tx_ring; + int ret = 0; + + mutex_lock(&adapter->crit_lock); + if (handle->id >= adapter->num_active_queues) + goto unlock; + + ret = iavf_verify_shaper(binding, shaper, extack); + if (ret) + goto unlock; + + tx_ring = &adapter->tx_rings[handle->id]; + + tx_ring->q_shaper.bw_min = div_u64(shaper->bw_min, 1000); + tx_ring->q_shaper.bw_max = div_u64(shaper->bw_max, 1000); + tx_ring->q_shaper_update = true; + + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; + +unlock: + mutex_unlock(&adapter->crit_lock); + return ret; +} + +static int iavf_shaper_del(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct netlink_ext_ack *extack) +{ + struct iavf_adapter *adapter = netdev_priv(binding->netdev); + struct iavf_ring *tx_ring; + + mutex_lock(&adapter->crit_lock); + if (handle->id >= adapter->num_active_queues) + goto unlock; + + tx_ring = &adapter->tx_rings[handle->id]; + tx_ring->q_shaper.bw_min = 0; + tx_ring->q_shaper.bw_max = 0; + tx_ring->q_shaper_update = true; + + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; + +unlock: + mutex_unlock(&adapter->crit_lock); + return 0; +} + +static void iavf_shaper_cap(struct net_shaper_binding *binding, + enum net_shaper_scope scope, + unsigned long *flags) +{ + if (scope != NET_SHAPER_SCOPE_QUEUE) + return; + + *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN) | + BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) | + BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS); +} + +static const struct net_shaper_ops iavf_shaper_ops = { + .set = iavf_shaper_set, + .delete = iavf_shaper_del, + .capabilities = iavf_shaper_cap, +}; + static const struct net_device_ops iavf_netdev_ops = { .ndo_open = iavf_open, .ndo_stop = iavf_close, @@ -4908,6 +5055,7 @@ static const struct net_device_ops iavf_netdev_ops = { .ndo_fix_features = iavf_fix_features, .ndo_set_features = iavf_set_features, .ndo_setup_tc = iavf_setup_tc, + .net_shaper_ops = &iavf_shaper_ops, }; /** @@ -5054,7 +5202,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev; struct iavf_adapter *adapter = NULL; struct iavf_hw *hw = NULL; - int err; + int err, len; err = pci_enable_device(pdev); if (err) @@ -5122,6 +5270,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; + len = struct_size(adapter->qos_caps, cap, IAVF_MAX_QOS_TC_NUM); + adapter->qos_caps = kzalloc(len, GFP_KERNEL); + if (!adapter->qos_caps) { + err = -ENOMEM; + goto err_alloc_qos_cap; + } + /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ @@ -5160,6 +5315,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Initialization goes on in the work. Do not add more of it below. */ return 0; +err_alloc_qos_cap: + iounmap(hw->hw_addr); err_ioremap: destroy_workqueue(adapter->wq); err_alloc_wq: diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h index 48c3901381b4..cac9d1a35a52 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -18,7 +18,6 @@ /* adminq functions */ enum iavf_status iavf_init_adminq(struct iavf_hw *hw); enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw); -void iavf_adminq_init_ring_data(struct iavf_hw *hw); enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, struct iavf_arq_event_info *e, u16 *events_pending); @@ -33,8 +32,6 @@ bool iavf_asq_done(struct iavf_hw *hw); void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, void *buffer, u16 buf_len); -void iavf_idle_aq(struct iavf_hw *hw); -void iavf_resume_aq(struct iavf_hw *hw); bool iavf_check_asq_alive(struct iavf_hw *hw); enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading); const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index d7b5587aeb8e..f97c702c0802 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -296,6 +296,8 @@ struct iavf_ring { */ u32 rx_buf_len; + struct net_shaper q_shaper; + bool q_shaper_update; } ____cacheline_internodealigned_in_smp; #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 7e810b65380c..15d388b431c5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -151,7 +151,8 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_USO | VIRTCHNL_VF_OFFLOAD_FDIR_PF | VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | - VIRTCHNL_VF_CAP_ADV_LINK_SPEED; + VIRTCHNL_VF_CAP_ADV_LINK_SPEED | + VIRTCHNL_VF_OFFLOAD_QOS; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; @@ -1508,6 +1509,130 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, } /** + * iavf_get_qos_caps - get qos caps support + * @adapter: iavf adapter struct instance + * + * This function requests PF for Supported QoS Caps. + */ +void iavf_get_qos_caps(struct iavf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, + "Cannot get qos caps, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_GET_QOS_CAPS; + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_QOS_CAPS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_QOS_CAPS, NULL, 0); +} + +/** + * iavf_set_quanta_size - set quanta size of queue chunk + * @adapter: iavf adapter struct instance + * @quanta_size: quanta size in bytes + * @queue_index: starting index of queue chunk + * @num_queues: number of queues in the queue chunk + * + * This function requests PF to set quanta size of queue chunk + * starting at queue_index. + */ +static void +iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size, + u16 queue_index, u16 num_queues) +{ + struct virtchnl_quanta_cfg quanta_cfg; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, + "Cannot set queue quanta size, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_QUANTA; + quanta_cfg.quanta_size = quanta_size; + quanta_cfg.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX; + quanta_cfg.queue_select.start_queue_id = queue_index; + quanta_cfg.queue_select.num_queues = num_queues; + adapter->aq_required &= ~IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUANTA, + (u8 *)&quanta_cfg, sizeof(quanta_cfg)); +} + +/** + * iavf_cfg_queues_quanta_size - configure quanta size of queues + * @adapter: adapter structure + * + * Request that the PF configure quanta size of allocated queues. + **/ +void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter) +{ + int quanta_size = IAVF_DEFAULT_QUANTA_SIZE; + + /* Set Queue Quanta Size to default */ + iavf_set_quanta_size(adapter, quanta_size, 0, + adapter->num_active_queues); +} + +/** + * iavf_cfg_queues_bw - configure bandwidth of allocated queues + * @adapter: iavf adapter structure instance + * + * This function requests PF to configure queue bandwidth of allocated queues + */ +void iavf_cfg_queues_bw(struct iavf_adapter *adapter) +{ + struct virtchnl_queues_bw_cfg *qs_bw_cfg; + struct net_shaper *q_shaper; + int qs_to_update = 0; + int i, inx = 0; + size_t len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, + "Cannot set tc queue bw, command %d pending\n", + adapter->current_op); + return; + } + + for (i = 0; i < adapter->num_active_queues; i++) { + if (adapter->tx_rings[i].q_shaper_update) + qs_to_update++; + } + len = struct_size(qs_bw_cfg, cfg, qs_to_update); + qs_bw_cfg = kzalloc(len, GFP_KERNEL); + if (!qs_bw_cfg) + return; + + qs_bw_cfg->vsi_id = adapter->vsi.id; + qs_bw_cfg->num_queues = qs_to_update; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct iavf_ring *tx_ring = &adapter->tx_rings[i]; + + q_shaper = &tx_ring->q_shaper; + if (tx_ring->q_shaper_update) { + qs_bw_cfg->cfg[inx].queue_id = i; + qs_bw_cfg->cfg[inx].shaper.peak = q_shaper->bw_max; + qs_bw_cfg->cfg[inx].shaper.committed = q_shaper->bw_min; + qs_bw_cfg->cfg[inx].tc = 0; + inx++; + } + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_QUEUE_BW; + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUEUE_BW, + (u8 *)qs_bw_cfg, len); + kfree(qs_bw_cfg); +} + +/** * iavf_enable_channels * @adapter: adapter structure * @@ -2227,6 +2352,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; break; + case VIRTCHNL_OP_GET_QOS_CAPS: + dev_warn(&adapter->pdev->dev, "Failed to Get Qos CAPs, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + dev_warn(&adapter->pdev->dev, "Failed to Config Quanta, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + dev_warn(&adapter->pdev->dev, "Failed to Config Queue BW, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), @@ -2569,6 +2706,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (!v_retval) iavf_netdev_features_vlan_strip_set(netdev, false); break; + case VIRTCHNL_OP_GET_QOS_CAPS: { + u16 len = struct_size(adapter->qos_caps, cap, + IAVF_MAX_QOS_TC_NUM); + + memcpy(adapter->qos_caps, msg, min(msglen, len)); + + adapter->aq_required |= IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE; + } + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: { + int i; + /* shaper configuration is successful for all queues */ + for (i = 0; i < adapter->num_active_queues; i++) + adapter->tx_rings[i].q_shaper_update = false; + } + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d6f80da30dec..680a81961ba1 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -207,6 +207,7 @@ enum ice_feature { ICE_F_GNSS, ICE_F_ROCE_LAG, ICE_F_SRIOV_LAG, + ICE_F_MBX_LIMIT, ICE_F_MAX }; @@ -371,9 +372,6 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; - u16 max_frame; - u16 rx_buf_len; - struct ice_aqc_vsi_props info; /* VSI properties */ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ @@ -669,6 +667,8 @@ struct ice_pf { struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_dplls dplls; struct device *hwmon_dev; + + u8 num_quanta_prof_used; }; extern struct workqueue_struct *ice_lag_wq; @@ -1047,5 +1047,10 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } +static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw) +{ + return hw->ptp.phy_model; +} + extern const struct xdp_metadata_ops ice_xdp_md_ops; #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index ad84d8ad49a6..01a08cfd0090 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -9,12 +9,14 @@ #include <linux/spinlock.h> #include <linux/xarray.h> #include "ice_adapter.h" +#include "ice.h" static DEFINE_XARRAY(ice_adapters); static DEFINE_MUTEX(ice_adapters_mutex); /* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ #define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) +#define INDEX_FIELD_DEV GENMASK(31, 16) #define INDEX_FIELD_BUS GENMASK(12, 5) #define INDEX_FIELD_SLOT GENMASK(4, 0) @@ -24,9 +26,17 @@ static unsigned long ice_adapter_index(const struct pci_dev *pdev) WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN)); - return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | - FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | - FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); + switch (pdev->device) { + case ICE_DEV_ID_E825C_BACKPLANE: + case ICE_DEV_ID_E825C_QSFP: + case ICE_DEV_ID_E825C_SFP: + case ICE_DEV_ID_E825C_SGMII: + return FIELD_PREP(INDEX_FIELD_DEV, pdev->device); + default: + return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | + FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | + FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); + } } static struct ice_adapter *ice_adapter_new(void) @@ -40,11 +50,17 @@ static struct ice_adapter *ice_adapter_new(void) spin_lock_init(&adapter->ptp_gltsyn_time_lock); refcount_set(&adapter->refcount, 1); + mutex_init(&adapter->ports.lock); + INIT_LIST_HEAD(&adapter->ports.ports); + return adapter; } static void ice_adapter_free(struct ice_adapter *adapter) { + WARN_ON(!list_empty(&adapter->ports.ports)); + mutex_destroy(&adapter->ports.lock); + kfree(adapter); } diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h index 9d11014ec02f..e233225848b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.h +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -4,22 +4,42 @@ #ifndef _ICE_ADAPTER_H_ #define _ICE_ADAPTER_H_ +#include <linux/types.h> #include <linux/spinlock_types.h> #include <linux/refcount_types.h> struct pci_dev; +struct ice_pf; + +/** + * struct ice_port_list - data used to store the list of adapter ports + * + * This structure contains data used to maintain a list of adapter ports + * + * @ports: list of ports + * @lock: protect access to the ports list + */ +struct ice_port_list { + struct list_head ports; + /* To synchronize the ports list operations */ + struct mutex lock; +}; /** * struct ice_adapter - PCI adapter resources shared across PFs * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME * register of the PTP clock. * @refcount: Reference count. struct ice_pf objects hold the references. + * @ctrl_pf: Control PF of the adapter + * @ports: Ports list */ struct ice_adapter { + refcount_t refcount; /* For access to the GLTSYN_TIME register */ spinlock_t ptp_gltsyn_time_lock; - refcount_t refcount; + struct ice_pf *ctrl_pf; + struct ice_port_list ports; }; struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 0be1a98d7cc1..1f01f3501d6b 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1742,6 +1742,15 @@ struct ice_aqc_nvm { }; #define ICE_AQC_NVM_START_POINT 0 +#define ICE_AQC_NVM_SECTOR_UNIT 4096 +#define ICE_AQC_NVM_SDP_AC_PTR_OFFSET 0xD8 +#define ICE_AQC_NVM_SDP_AC_PTR_M GENMASK(14, 0) +#define ICE_AQC_NVM_SDP_AC_PTR_INVAL 0x7FFF +#define ICE_AQC_NVM_SDP_AC_PTR_TYPE_M BIT(15) +#define ICE_AQC_NVM_SDP_AC_SDP_NUM_M GENMASK(2, 0) +#define ICE_AQC_NVM_SDP_AC_DIR_M BIT(3) +#define ICE_AQC_NVM_SDP_AC_PIN_M GENMASK(15, 6) +#define ICE_AQC_NVM_SDP_AC_MAX_SIZE 7 #define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 4a9a6899fc45..3a8e156d7d86 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -347,6 +347,8 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf break; } + tlan_ctx->quanta_prof_idx = ring->quanta_prof_id; + tlan_ctx->tso_ena = ICE_TX_LEGACY; tlan_ctx->tso_qnum = pf_q; @@ -445,7 +447,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ - rlan_ctx.rxmax = min_t(u32, vsi->max_frame, + rlan_ctx.rxmax = min_t(u32, ring->max_frame, ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ @@ -541,8 +543,6 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) u32 num_bufs = ICE_RX_DESC_UNUSED(ring); int err; - ring->rx_buf_len = ring->vsi->rx_buf_len; - if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, @@ -641,21 +641,25 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) /** * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length * @vsi: VSI + * @ring: Rx ring to configure + * + * Determine the maximum frame size and Rx buffer length to use for a PF VSI. + * Set these in the associated Rx ring structure. */ -static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) +static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring) { if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; - vsi->rx_buf_len = ICE_RXBUF_1664; + ring->max_frame = ICE_MAX_FRAME_LEGACY_RX; + ring->rx_buf_len = ICE_RXBUF_1664; #if (PAGE_SIZE < 8192) } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && (vsi->netdev->mtu <= ETH_DATA_LEN)) { - vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; + ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; #endif } else { - vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - vsi->rx_buf_len = ICE_RXBUF_3072; + ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; + ring->rx_buf_len = ICE_RXBUF_3072; } } @@ -670,15 +674,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) { u16 i; - if (vsi->type == ICE_VSI_VF) - goto setup_rings; - - ice_vsi_cfg_frame_size(vsi); -setup_rings: /* set up individual rings */ ice_for_each_rxq(vsi, i) { - int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); + struct ice_rx_ring *ring = vsi->rx_rings[i]; + int err; + + if (vsi->type != ICE_VSI_VF) + ice_vsi_cfg_frame_size(vsi, ring); + err = ice_vsi_cfg_rxq(ring); if (err) return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 009716a12a26..b22e71dc59d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2437,6 +2437,25 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, } /** + * ice_func_id_to_logical_id - map from function id to logical pf id + * @active_function_bitmap: active function bitmap + * @pf_id: function number of device + * + * Return: logical PF ID. + */ +static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id) +{ + u8 logical_id = 0; + u8 i; + + for (i = 0; i < pf_id; i++) + if (active_function_bitmap & BIT(i)) + logical_id++; + + return logical_id; +} + +/** * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure @@ -2453,6 +2472,8 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, dev_p->num_funcs = hweight32(number); ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", dev_p->num_funcs); + + hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 20ce32dda69c..ac7db100e2cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -60,11 +60,6 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, static inline void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { } -static inline int ice_eswitch_configure(struct ice_pf *pf) -{ - return 0; -} - static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) { return DEVLINK_ESWITCH_MODE_LEGACY; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index d5cc934d1359..2924ac61300d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -4716,6 +4716,81 @@ static void ice_get_fec_stats(struct net_device *netdev, pi->lport, err); } +#define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \ + ETH_RESET_FILTER | ETH_RESET_OFFLOAD) + +#define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \ + ETH_RESET_SHARED_SHIFT) + +#define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \ + (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \ + (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)) + +#define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR + +/** + * ice_ethtool_reset - triggers a given type of reset + * @dev: network interface device structure + * @flags: set of reset flags + * + * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags. + */ +static int ice_ethtool_reset(struct net_device *dev, u32 *flags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_pf *pf = np->vsi->back; + enum ice_reset_req reset; + + switch (*flags) { + case ICE_ETHTOOL_CORER: + reset = ICE_RESET_CORER; + break; + case ICE_ETHTOOL_GLOBR: + reset = ICE_RESET_GLOBR; + break; + case ICE_ETHTOOL_PFR: + reset = ICE_RESET_PFR; + break; + default: + netdev_info(dev, "Unsupported set of ethtool flags"); + return -EOPNOTSUPP; + } + + ice_schedule_reset(pf, reset); + + *flags = 0; + + return 0; +} + +/** + * ice_repr_ethtool_reset - triggers a VF reset + * @dev: network interface device structure + * @flags: set of reset flags + * + * Return: 0 on success, + * -EOPNOTSUPP when using unsupported set of flags + * -EBUSY when VF is not ready for reset. + */ +static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags) +{ + struct ice_repr *repr = ice_netdev_to_repr(dev); + struct ice_vf *vf; + + if (repr->type != ICE_REPR_TYPE_VF || + *flags != ICE_ETHTOOL_VFR) + return -EOPNOTSUPP; + + vf = repr->vf; + + if (ice_check_vf_ready_for_cfg(vf)) + return -EBUSY; + + *flags = 0; + + return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); +} + static const struct ethtool_ops ice_ethtool_ops = { .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -4752,6 +4827,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .nway_reset = ice_nway_reset, .get_pauseparam = ice_get_pauseparam, .set_pauseparam = ice_set_pauseparam, + .reset = ice_ethtool_reset, .get_rxfh_key_size = ice_get_rxfh_key_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size, .get_rxfh = ice_get_rxfh, @@ -4804,6 +4880,7 @@ static const struct ethtool_ops ice_ethtool_repr_ops = { .get_strings = ice_repr_get_strings, .get_ethtool_stats = ice_repr_get_ethtool_stats, .get_sset_count = ice_repr_get_sset_count, + .reset = ice_repr_ethtool_reset, }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 90b9b0993122..28b0897adf32 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -23,9 +23,6 @@ int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, unsigned long *bm, struct list_head *fv_list); int -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); -int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd); bool diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index c8ea1af51ad3..f02e8ca55375 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -397,8 +397,8 @@ bool ice_gnss_is_gps_present(struct ice_hw *hw) int err; u8 data; - err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data); - if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N)) + err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data); + if (err || !!(data & ICE_P0_GNSS_PRSNT_N)) return false; } else { return false; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 91cbae1eec89..dc88aea9f473 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -6,6 +6,14 @@ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ +#define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4)) +#define GLCOMM_QUANTA_PROF_MAX_INDEX 15 +#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0 +#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M ICE_M(0x3FFF, 0) +#define GLCOMM_QUANTA_PROF_MAX_CMD_S 16 +#define GLCOMM_QUANTA_PROF_MAX_CMD_M ICE_M(0xFF, 16) +#define GLCOMM_QUANTA_PROF_MAX_DESC_S 24 +#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24) #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD_HEAD_S 0 @@ -539,5 +547,8 @@ #define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 +#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4) +#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4) #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 06e712cdc3d9..d4e74f96a8ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3880,6 +3880,9 @@ void ice_init_feature_support(struct ice_pf *pf) default: break; } + + if (pf->hw.mac_type == ICE_MAC_E830) + ice_set_feature_support(pf, ICE_F_MBX_LIMIT); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 1a6cfc8693ce..10d6fc479a32 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -88,8 +88,6 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); -int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); - bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_rdma_ena(struct ice_pf *pf); bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b1e7727b8677..a6f586f9bfd1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1546,12 +1546,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_vf_lan_overflow_event(pf, &event); break; case ice_mbx_opc_send_msg_to_pf: - data.num_msg_proc = i; - data.num_pending_arq = pending; - data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries; - data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) { + ice_vc_process_vf_msg(pf, &event, NULL); + ice_mbx_vf_dec_trig_e830(hw, &event); + } else { + u16 val = hw->mailboxq.num_rq_entries; + + data.max_num_msgs_mbx = val; + val = ICE_MBX_OVERFLOW_WATERMARK; + data.async_watermark_val = val; + data.num_msg_proc = i; + data.num_pending_arq = pending; - ice_vc_process_vf_msg(pf, &event, &data); + ice_vc_process_vf_msg(pf, &event, &data); + } break; case ice_aqc_opc_fw_logs_event: ice_get_fwlog_data(pf, &event); @@ -4082,7 +4090,11 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->vfs.table_lock); hash_init(pf->vfs.table); - ice_mbx_init_snapshot(&pf->hw); + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, + ICE_MBX_OVERFLOW_WATERMARK); + else + ice_mbx_init_snapshot(&pf->hw); xa_init(&pf->dyn_ports); xa_init(&pf->sf_nums); @@ -6512,8 +6524,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) if (changed & NETIF_F_HW_TC) { bool ena = !!(features & NETIF_F_HW_TC); - ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : - clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); + assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); } if (changed & NETIF_F_LOOPBACK) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index ef2e858f49bb..a999fface272 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -4,253 +4,187 @@ #include "ice.h" #include "ice_lib.h" #include "ice_trace.h" +#include "ice_cgu_regs.h" + +static const char ice_pin_names[][64] = { + "SDP0", + "SDP1", + "SDP2", + "SDP3", + "TIME_SYNC", + "1PPS" +}; -#define E810_OUT_PROP_DELAY_NS 1 +static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { + /* name, gpio */ + { TIME_SYNC, { 4, -1 }}, + { ONE_PPS, { -1, 5 }}, +}; -static const struct ptp_pin_desc ice_pin_desc_e810t[] = { - /* name idx func chan */ - { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, - { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, - { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, - { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, - { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, +static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { + /* name, gpio */ + { SDP0, { 0, 0 }}, + { SDP1, { 1, 1 }}, + { SDP2, { 2, 2 }}, + { SDP3, { 3, 3 }}, + { TIME_SYNC, { 4, -1 }}, + { ONE_PPS, { -1, 5 }}, }; -/** - * ice_get_sma_config_e810t - * @hw: pointer to the hw struct - * @ptp_pins: pointer to the ptp_pin_desc struture - * - * Read the configuration of the SMA control logic and put it into the - * ptp_pin_desc structure - */ -static int -ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) -{ - u8 data, i; - int status; +static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { + /* name, gpio */ + { SDP0, { 0, 0 }}, + { SDP1, { 1, 1 }}, + { SDP2, { 2, 2 }}, + { SDP3, { 3, 3 }}, + { ONE_PPS, { -1, 5 }}, +}; - /* Read initial pin state */ - status = ice_read_sma_ctrl_e810t(hw, &data); - if (status) - return status; +static const char ice_pin_names_nvm[][64] = { + "GNSS", + "SMA1", + "U.FL1", + "SMA2", + "U.FL2", +}; - /* initialize with defaults */ - for (i = 0; i < NUM_PTP_PINS_E810T; i++) { - strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, - sizeof(ptp_pins[i].name)); - ptp_pins[i].index = ice_pin_desc_e810t[i].index; - ptp_pins[i].func = ice_pin_desc_e810t[i].func; - ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; - } +static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { + /* name, gpio */ + { GNSS, { 1, -1 }}, + { SMA1, { 1, 0 }}, + { UFL1, { -1, 0 }}, + { SMA2, { 3, 2 }}, + { UFL2, { 3, -1 }}, +}; - /* Parse SMA1/UFL1 */ - switch (data & ICE_SMA1_MASK_E810T) { - case ICE_SMA1_MASK_E810T: - default: - ptp_pins[SMA1].func = PTP_PF_NONE; - ptp_pins[UFL1].func = PTP_PF_NONE; - break; - case ICE_SMA1_DIR_EN_E810T: - ptp_pins[SMA1].func = PTP_PF_PEROUT; - ptp_pins[UFL1].func = PTP_PF_NONE; - break; - case ICE_SMA1_TX_EN_E810T: - ptp_pins[SMA1].func = PTP_PF_EXTTS; - ptp_pins[UFL1].func = PTP_PF_NONE; - break; - case 0: - ptp_pins[SMA1].func = PTP_PF_EXTTS; - ptp_pins[UFL1].func = PTP_PF_PEROUT; - break; - } +static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) +{ + return !pf->adapter ? NULL : pf->adapter->ctrl_pf; +} - /* Parse SMA2/UFL2 */ - switch (data & ICE_SMA2_MASK_E810T) { - case ICE_SMA2_MASK_E810T: - default: - ptp_pins[SMA2].func = PTP_PF_NONE; - ptp_pins[UFL2].func = PTP_PF_NONE; - break; - case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): - ptp_pins[SMA2].func = PTP_PF_EXTTS; - ptp_pins[UFL2].func = PTP_PF_NONE; - break; - case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): - ptp_pins[SMA2].func = PTP_PF_PEROUT; - ptp_pins[UFL2].func = PTP_PF_NONE; - break; - case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): - ptp_pins[SMA2].func = PTP_PF_NONE; - ptp_pins[UFL2].func = PTP_PF_EXTTS; - break; - case ICE_SMA2_DIR_EN_E810T: - ptp_pins[SMA2].func = PTP_PF_PEROUT; - ptp_pins[UFL2].func = PTP_PF_EXTTS; - break; - } +static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf) +{ + struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf); - return 0; + return !ctrl_pf ? NULL : &ctrl_pf->ptp; } /** - * ice_ptp_set_sma_config_e810t - * @hw: pointer to the hw struct - * @ptp_pins: pointer to the ptp_pin_desc struture + * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc + * @pf: Board private structure + * @func: Pin function + * @chan: GPIO channel * - * Set the configuration of the SMA control logic based on the configuration in - * num_pins parameter + * Return: positive pin number when pin is present, -1 otherwise */ -static int -ice_ptp_set_sma_config_e810t(struct ice_hw *hw, - const struct ptp_pin_desc *ptp_pins) +static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, + unsigned int chan) { - int status; - u8 data; + const struct ptp_clock_info *info = &pf->ptp.info; + int i; - /* SMA1 and UFL1 cannot be set to TX at the same time */ - if (ptp_pins[SMA1].func == PTP_PF_PEROUT && - ptp_pins[UFL1].func == PTP_PF_PEROUT) - return -EINVAL; + for (i = 0; i < info->n_pins; i++) { + if (info->pin_config[i].func == func && + info->pin_config[i].chan == chan) + return i; + } - /* SMA2 and UFL2 cannot be set to RX at the same time */ - if (ptp_pins[SMA2].func == PTP_PF_EXTTS && - ptp_pins[UFL2].func == PTP_PF_EXTTS) - return -EINVAL; + return -1; +} - /* Read initial pin state value */ - status = ice_read_sma_ctrl_e810t(hw, &data); - if (status) - return status; - - /* Set the right sate based on the desired configuration */ - data &= ~ICE_SMA1_MASK_E810T; - if (ptp_pins[SMA1].func == PTP_PF_NONE && - ptp_pins[UFL1].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); - data |= ICE_SMA1_MASK_E810T; - } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && - ptp_pins[UFL1].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA1 RX"); - data |= ICE_SMA1_TX_EN_E810T; - } else if (ptp_pins[SMA1].func == PTP_PF_NONE && - ptp_pins[UFL1].func == PTP_PF_PEROUT) { - /* U.FL 1 TX will always enable SMA 1 RX */ - dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); - } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && - ptp_pins[UFL1].func == PTP_PF_PEROUT) { - dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); - } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && - ptp_pins[UFL1].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA1 TX"); - data |= ICE_SMA1_DIR_EN_E810T; - } - - data &= ~ICE_SMA2_MASK_E810T; - if (ptp_pins[SMA2].func == PTP_PF_NONE && - ptp_pins[UFL2].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); - data |= ICE_SMA2_MASK_E810T; - } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && - ptp_pins[UFL2].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA2 RX"); - data |= (ICE_SMA2_TX_EN_E810T | - ICE_SMA2_UFL2_RX_DIS_E810T); - } else if (ptp_pins[SMA2].func == PTP_PF_NONE && - ptp_pins[UFL2].func == PTP_PF_EXTTS) { - dev_info(ice_hw_to_dev(hw), "UFL2 RX"); - data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); - } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && - ptp_pins[UFL2].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA2 TX"); - data |= (ICE_SMA2_DIR_EN_E810T | - ICE_SMA2_UFL2_RX_DIS_E810T); - } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && - ptp_pins[UFL2].func == PTP_PF_EXTTS) { - dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); - data |= ICE_SMA2_DIR_EN_E810T; - } - - return ice_write_sma_ctrl_e810t(hw, data); -} - -/** - * ice_ptp_set_sma_e810t - * @info: the driver's PTP info structure - * @pin: pin index in kernel structure - * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) - * - * Set the configuration of a single SMA pin +/** + * ice_ptp_update_sma_data - update SMA pins data according to pins setup + * @pf: Board private structure + * @sma_pins: parsed SMA pins status + * @data: SMA data to update */ -static int -ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, - enum ptp_pin_function func) +static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], + u8 *data) { - struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; - struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_hw *hw = &pf->hw; - int err; + const char *state1, *state2; - if (pin < SMA1 || func > PTP_PF_PEROUT) - return -EOPNOTSUPP; - - err = ice_get_sma_config_e810t(hw, ptp_pins); - if (err) - return err; - - /* Disable the same function on the other pin sharing the channel */ - if (pin == SMA1 && ptp_pins[UFL1].func == func) - ptp_pins[UFL1].func = PTP_PF_NONE; - if (pin == UFL1 && ptp_pins[SMA1].func == func) - ptp_pins[SMA1].func = PTP_PF_NONE; - - if (pin == SMA2 && ptp_pins[UFL2].func == func) - ptp_pins[UFL2].func = PTP_PF_NONE; - if (pin == UFL2 && ptp_pins[SMA2].func == func) - ptp_pins[SMA2].func = PTP_PF_NONE; + /* Set the right state based on the desired configuration. + * When bit is set, functionality is disabled. + */ + *data &= ~ICE_ALL_SMA_MASK; + if (!sma_pins[UFL1 - 1]) { + if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { + state1 = "SMA1 Rx, U.FL1 disabled"; + *data |= ICE_SMA1_TX_EN; + } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { + state1 = "SMA1 Tx U.FL1 disabled"; + *data |= ICE_SMA1_DIR_EN; + } else { + state1 = "SMA1 disabled, U.FL1 disabled"; + *data |= ICE_SMA1_MASK; + } + } else { + /* U.FL1 Tx will always enable SMA1 Rx */ + state1 = "SMA1 Rx, U.FL1 Tx"; + } - /* Set up new pin function in the temp table */ - ptp_pins[pin].func = func; + if (!sma_pins[UFL2 - 1]) { + if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { + state2 = "SMA2 Rx, U.FL2 disabled"; + *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; + } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { + state2 = "SMA2 Tx, U.FL2 disabled"; + *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; + } else { + state2 = "SMA2 disabled, U.FL2 disabled"; + *data |= ICE_SMA2_MASK; + } + } else { + if (!sma_pins[SMA2 - 1]) { + state2 = "SMA2 disabled, U.FL2 Rx"; + *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; + } else { + state2 = "SMA2 Tx, U.FL2 Rx"; + *data |= ICE_SMA2_DIR_EN; + } + } - return ice_ptp_set_sma_config_e810t(hw, ptp_pins); + dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); } /** - * ice_verify_pin_e810t - * @info: the driver's PTP info structure - * @pin: Pin index - * @func: Assigned function - * @chan: Assigned channel + * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic + * @pf: Board private structure * - * Verify if pin supports requested pin function. If the Check pins consistency. - * Reconfigure the SMA logic attached to the given pin to enable its - * desired functionality + * Return: 0 on success, negative error code otherwise */ -static int -ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, - enum ptp_pin_function func, unsigned int chan) +static int ice_ptp_set_sma_cfg(struct ice_pf *pf) { - /* Don't allow channel reassignment */ - if (chan != ice_pin_desc_e810t[pin].chan) - return -EOPNOTSUPP; + const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; + struct ptp_pin_desc *pins = pf->ptp.pin_desc; + unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; + int err; + u8 data; - /* Check if functions are properly assigned */ - switch (func) { - case PTP_PF_NONE: - break; - case PTP_PF_EXTTS: - if (pin == UFL1) - return -EOPNOTSUPP; - break; - case PTP_PF_PEROUT: - if (pin == UFL2 || pin == GNSS) - return -EOPNOTSUPP; - break; - case PTP_PF_PHYSYNC: - return -EOPNOTSUPP; - } + /* Read initial pin state value */ + err = ice_read_sma_ctrl(&pf->hw, &data); + if (err) + return err; - return ice_ptp_set_sma_e810t(info, pin, func); + /* Get SMA/U.FL pins states */ + for (int i = 0; i < pf->ptp.info.n_pins; i++) + if (pins[i].func) { + int name_idx = ice_pins[i].name_idx; + + switch (name_idx) { + case SMA1: + case UFL1: + case SMA2: + case UFL2: + sma_pins[name_idx - 1] = pins[i].func; + break; + default: + continue; + } + } + + ice_ptp_update_sma_data(pf, sma_pins, &data); + return ice_write_sma_ctrl(&pf->hw, data); } /** @@ -800,8 +734,8 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) struct ice_ptp_port *port; unsigned int i; - mutex_lock(&pf->ptp.ports_owner.lock); - list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { + mutex_lock(&pf->adapter->ports.lock); + list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { struct ice_ptp_tx *tx = &port->tx; if (!tx || !tx->init) @@ -809,7 +743,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) ice_ptp_process_tx_tstamp(tx); } - mutex_unlock(&pf->ptp.ports_owner.lock); + mutex_unlock(&pf->adapter->ports.lock); for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { u64 tstamp_ready; @@ -974,7 +908,7 @@ ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) { struct ice_ptp_port *port; - list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) + list_for_each_entry(port, &pf->adapter->ports.ports, list_node) ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); } @@ -1363,7 +1297,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) mutex_lock(&ptp_port->ps_lock); - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: err = ice_stop_phy_timer_eth56g(hw, port, true); break; @@ -1409,7 +1343,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) mutex_lock(&ptp_port->ps_lock); - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: err = ice_start_phy_timer_eth56g(hw, port); break; @@ -1480,8 +1414,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) /* Skip HW writes if reset is in progress */ if (pf->hw.reset_ongoing) return; - - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_E810: /* Do not reconfigure E810 PHY */ return; @@ -1514,7 +1447,7 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) ice_ptp_reset_ts_memory(hw); - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: { int port; @@ -1553,7 +1486,7 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) case ICE_PHY_UNSUP: default: dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, - hw->ptp.phy_model); + ice_get_phy_model(hw)); return -EOPNOTSUPP; } } @@ -1575,10 +1508,10 @@ static void ice_ptp_restart_all_phy(struct ice_pf *pf) { struct list_head *entry; - list_for_each(entry, &pf->ptp.ports_owner.ports) { + list_for_each(entry, &pf->adapter->ports.ports) { struct ice_ptp_port *port = list_entry(entry, struct ice_ptp_port, - list_member); + list_node); if (port->link_up) ice_ptp_port_phy_restart(port); @@ -1651,33 +1584,41 @@ void ice_ptp_extts_event(struct ice_pf *pf) /** * ice_ptp_cfg_extts - Configure EXTTS pin and channel * @pf: Board private structure - * @chan: GPIO channel (0-3) - * @config: desired EXTTS configuration. - * @store: If set to true, the values will be stored + * @rq: External timestamp request + * @on: Enable/disable flag * * Configure an external timestamp event on the requested channel. * - * Return: 0 on success, -EOPNOTUSPP on unsupported flags + * Return: 0 on success, negative error code otherwise */ -static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan, - struct ice_extts_channel *config, bool store) +static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, + int on) { - u32 func, aux_reg, gpio_reg, irq_reg; + u32 aux_reg, gpio_reg, irq_reg; struct ice_hw *hw = &pf->hw; + unsigned int chan, gpio_pin; + int pin_desc_idx; u8 tmr_idx; /* Reject requests with unsupported flags */ - if (config->flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) + + if (rq->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) return -EOPNOTSUPP; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + chan = rq->index; + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); + if (pin_desc_idx < 0) + return -EIO; + + gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0]; irq_reg = rd32(hw, PFINT_OICR_ENA); - if (config->ena) { + if (on) { /* Enable the interrupt */ irq_reg |= PFINT_OICR_TSYN_EVNT_M; aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; @@ -1686,33 +1627,38 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan, #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) /* set event level to requested edge */ - if (config->flags & PTP_FALLING_EDGE) + if (rq->flags & PTP_FALLING_EDGE) aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; - if (config->flags & PTP_RISING_EDGE) + if (rq->flags & PTP_RISING_EDGE) aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; /* Write GPIO CTL reg. * 0x1 is input sampled by EVENT register(channel) * + num_in_channels * tmr_idx */ - func = 1 + chan + (tmr_idx * 3); - gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); - pf->ptp.ext_ts_chan |= (1 << chan); + gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, + 1 + chan + (tmr_idx * 3)); } else { + bool last_enabled = true; + /* clear the values we set to reset defaults */ aux_reg = 0; gpio_reg = 0; - pf->ptp.ext_ts_chan &= ~(1 << chan); - if (!pf->ptp.ext_ts_chan) + + for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++) + if ((pf->ptp.extts_rqs[i].flags & + PTP_ENABLE_FEATURE) && + i != chan) { + last_enabled = false; + } + + if (last_enabled) irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; } wr32(hw, PFINT_OICR_ENA, irq_reg); wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); - wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg); - - if (store) - memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config)); + wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); return 0; } @@ -1723,16 +1669,10 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan, */ static void ice_ptp_disable_all_extts(struct ice_pf *pf) { - struct ice_extts_channel extts_cfg = {}; - int i; - - for (i = 0; i < pf->ptp.info.n_ext_ts; i++) { - if (pf->ptp.extts_channels[i].ena) { - extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin; - extts_cfg.ena = false; - ice_ptp_cfg_extts(pf, i, &extts_cfg, false); - } - } + for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) + if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) + ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], + false); synchronize_irq(pf->oicr_irq.virq); } @@ -1745,273 +1685,322 @@ static void ice_ptp_disable_all_extts(struct ice_pf *pf) */ static void ice_ptp_enable_all_extts(struct ice_pf *pf) { - int i; - - for (i = 0; i < pf->ptp.info.n_ext_ts; i++) { - if (pf->ptp.extts_channels[i].ena) - ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i], - false); - } + for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) + if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) + ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], + true); } /** - * ice_ptp_cfg_clkout - Configure clock to generate periodic wave - * @pf: Board private structure - * @chan: GPIO channel (0-3) - * @config: desired periodic clk configuration. NULL will disable channel - * @store: If set to true the values will be stored + * ice_ptp_write_perout - Write periodic wave parameters to HW + * @hw: pointer to the HW struct + * @chan: target channel + * @gpio_pin: target GPIO pin + * @start: target time to start periodic output + * @period: target period * - * Configure the internal clock generator modules to generate the clock wave of - * specified period. + * Return: 0 on success, negative error code otherwise */ -static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, - struct ice_perout_channel *config, bool store) +static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, + unsigned int gpio_pin, u64 start, u64 period) { - u64 current_time, period, start_time, phase; - struct ice_hw *hw = &pf->hw; - u32 func, val, gpio_pin; - u8 tmr_idx; - if (config && config->flags & ~PTP_PEROUT_PHASE) - return -EOPNOTSUPP; - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + u32 val = 0; /* 0. Reset mode & out_en in AUX_OUT */ wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); - /* If we're disabling the output, clear out CLKO and TGT and keep - * output level low - */ - if (!config || !config->ena) { - wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); - wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); - wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); - - val = GLGEN_GPIO_CTL_PIN_DIR_M; - gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; - wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); - - /* Store the value if requested */ - if (store) - memset(&pf->ptp.perout_channels[chan], 0, - sizeof(struct ice_perout_channel)); - - return 0; - } - period = config->period; - start_time = config->start_time; - div64_u64_rem(start_time, period, &phase); - gpio_pin = config->gpio_pin; + if (ice_is_e825c(hw)) { + int err; - /* 1. Write clkout with half of required period value */ - if (period & 0x1) { - dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); - goto err; + /* Enable/disable CGU 1PPS output for E825C */ + err = ice_cgu_cfg_pps_out(hw, !!period); + if (err) + return err; } + /* 1. Write perout with half of required period value. + * HW toggles output when source clock hits the TGT and then adds + * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle. + */ period >>= 1; - /* For proper operation, the GLTSYN_CLKO must be larger than clock tick + /* For proper operation, GLTSYN_CLKO must be larger than clock tick and + * period has to fit in 32 bit register. */ #define MIN_PULSE 3 - if (period <= MIN_PULSE || period > U32_MAX) { - dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", - MIN_PULSE * 2); - goto err; + if (!!period && (period <= MIN_PULSE || period > U32_MAX)) { + dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32", + MIN_PULSE); + return -EIO; } wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); - /* Allow time for programming before start_time is hit */ - current_time = ice_ptp_read_src_clk_reg(pf, NULL); - - /* if start time is in the past start the timer at the nearest second - * maintaining phase - */ - if (start_time < current_time) - start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase; - - if (ice_is_e810(hw)) - start_time -= E810_OUT_PROP_DELAY_NS; - else - start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw)); - /* 2. Write TARGET time */ - wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); - wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); + wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start)); + wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start)); /* 3. Write AUX_OUT register */ - val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; + if (!!period) + val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); /* 4. write GPIO CTL reg */ - func = 8 + chan + (tmr_idx * 4); - val = GLGEN_GPIO_CTL_PIN_DIR_M | - FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); + val = GLGEN_GPIO_CTL_PIN_DIR_M; + if (!!period) + val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, + 8 + chan + (tmr_idx * 4)); + wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); - /* Store the value if requested */ - if (store) { - memcpy(&pf->ptp.perout_channels[chan], config, - sizeof(struct ice_perout_channel)); - pf->ptp.perout_channels[chan].start_time = phase; + return 0; +} + +/** + * ice_ptp_cfg_perout - Configure clock to generate periodic wave + * @pf: Board private structure + * @rq: Periodic output request + * @on: Enable/disable flag + * + * Configure the internal clock generator modules to generate the clock wave of + * specified period. + * + * Return: 0 on success, negative error code otherwise + */ +static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, + int on) +{ + u64 clk, period, start, phase; + struct ice_hw *hw = &pf->hw; + unsigned int gpio_pin; + int pin_desc_idx; + + if (rq->flags & ~PTP_PEROUT_PHASE) + return -EOPNOTSUPP; + + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); + if (pin_desc_idx < 0) + return -EIO; + + gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; + period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; + + /* If we're disabling the output or period is 0, clear out CLKO and TGT + * and keep output level low. + */ + if (!on || !period) + return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); + + if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && + period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) { + dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); + return -EOPNOTSUPP; } - return 0; -err: - dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); - return -EFAULT; + if (period & 0x1) { + dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); + return -EIO; + } + + start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec; + + /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */ + if (rq->flags & PTP_PEROUT_PHASE) + phase = start; + else + div64_u64_rem(start, period, &phase); + + /* If we have only phase or start time is in the past, start the timer + * at the next multiple of period, maintaining phase. + */ + clk = ice_ptp_read_src_clk_reg(pf, NULL); + if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw)) + start = div64_u64(clk + period - 1, period) * period + phase; + + /* Compensate for propagation delay from the generator to the pin. */ + start -= ice_prop_delay(hw); + + return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); } /** - * ice_ptp_disable_all_clkout - Disable all currently configured outputs - * @pf: pointer to the PF structure + * ice_ptp_disable_all_perout - Disable all currently configured outputs + * @pf: Board private structure * * Disable all currently configured clock outputs. This is necessary before - * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to + * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to * re-enable the clocks again. */ -static void ice_ptp_disable_all_clkout(struct ice_pf *pf) +static void ice_ptp_disable_all_perout(struct ice_pf *pf) { - uint i; - - for (i = 0; i < pf->ptp.info.n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, NULL, false); + for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_rqs[i].period.sec || + pf->ptp.perout_rqs[i].period.nsec) + ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], + false); } /** - * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs - * @pf: pointer to the PF structure + * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs + * @pf: Board private structure * * Enable all currently configured clock outputs. Use this after - * ice_ptp_disable_all_clkout to reconfigure the output signals according to + * ice_ptp_disable_all_perout to reconfigure the output signals according to * their configuration. */ -static void ice_ptp_enable_all_clkout(struct ice_pf *pf) +static void ice_ptp_enable_all_perout(struct ice_pf *pf) { - uint i; - - for (i = 0; i < pf->ptp.info.n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], - false); + for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_rqs[i].period.sec || + pf->ptp.perout_rqs[i].period.nsec) + ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], + true); } /** - * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC - * @info: the driver's PTP info structure - * @rq: The requested feature to change - * @on: Enable/disable flag + * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO + * @pf: Board private structure + * @pin: Pin index + * @func: Assigned function + * + * Return: 0 on success, negative error code otherwise */ -static int -ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, - struct ptp_clock_request *rq, int on) +static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, + enum ptp_pin_function func) { - struct ice_pf *pf = ptp_info_to_pf(info); - bool sma_pres = false; - unsigned int chan; - u32 gpio_pin; + unsigned int gpio_pin; - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) - sma_pres = true; + switch (func) { + case PTP_PF_PEROUT: + gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; + break; + case PTP_PF_EXTTS: + gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; + break; + default: + return -EOPNOTSUPP; + } - switch (rq->type) { - case PTP_CLK_REQ_PEROUT: - { - struct ice_perout_channel clk_cfg = {}; - - chan = rq->perout.index; - if (sma_pres) { - if (chan == ice_pin_desc_e810t[SMA1].chan) - clk_cfg.gpio_pin = GPIO_20; - else if (chan == ice_pin_desc_e810t[SMA2].chan) - clk_cfg.gpio_pin = GPIO_22; - else - return -1; - } else if (ice_is_e810t(&pf->hw)) { - if (chan == 0) - clk_cfg.gpio_pin = GPIO_20; - else - clk_cfg.gpio_pin = GPIO_22; - } else if (chan == PPS_CLK_GEN_CHAN) { - clk_cfg.gpio_pin = PPS_PIN_INDEX; - } else { - clk_cfg.gpio_pin = chan; - } + for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { + struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; + unsigned int chan = pin_desc->chan; - clk_cfg.flags = rq->perout.flags; - clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + - rq->perout.period.nsec); - clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + - rq->perout.start.nsec); - clk_cfg.ena = !!on; + /* Skip pin idx from the request */ + if (i == pin) + continue; - return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); - } - case PTP_CLK_REQ_EXTTS: - { - struct ice_extts_channel extts_cfg = {}; - - chan = rq->extts.index; - if (sma_pres) { - if (chan < ice_pin_desc_e810t[SMA2].chan) - gpio_pin = GPIO_21; - else - gpio_pin = GPIO_23; - } else if (ice_is_e810t(&pf->hw)) { - if (chan == 0) - gpio_pin = GPIO_21; - else - gpio_pin = GPIO_23; - } else { - gpio_pin = chan; + if (pin_desc->func == PTP_PF_PEROUT && + pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { + pf->ptp.perout_rqs[chan].period.sec = 0; + pf->ptp.perout_rqs[chan].period.nsec = 0; + pin_desc->func = PTP_PF_NONE; + pin_desc->chan = 0; + dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", + i, gpio_pin); + return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], + false); + } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && + pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { + pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; + pin_desc->func = PTP_PF_NONE; + pin_desc->chan = 0; + dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", + i, gpio_pin); + return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], + false); } + } - extts_cfg.flags = rq->extts.flags; - extts_cfg.gpio_pin = gpio_pin; - extts_cfg.ena = !!on; + return 0; +} - return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true); - } +/** + * ice_verify_pin - verify if pin supports requested pin function + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Return: 0 on success, -EOPNOTSUPP when function is not supported. + */ +static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct ice_pf *pf = ptp_info_to_pf(info); + const struct ice_ptp_pin_desc *pin_desc; + + pin_desc = &pf->ptp.ice_pin_desc[pin]; + + /* Is assigned function allowed? */ + switch (func) { + case PTP_PF_EXTTS: + if (pin_desc->gpio[0] < 0) + return -EOPNOTSUPP; + break; + case PTP_PF_PEROUT: + if (pin_desc->gpio[1] < 0) + return -EOPNOTSUPP; + break; + case PTP_PF_NONE: + break; + case PTP_PF_PHYSYNC: default: return -EOPNOTSUPP; } + + /* On adapters with SMA_CTRL disable other pins that share same GPIO */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + ice_ptp_disable_shared_pin(pf, pin, func); + pf->ptp.pin_desc[pin].func = func; + pf->ptp.pin_desc[pin].chan = chan; + return ice_ptp_set_sma_cfg(pf); + } + + return 0; } /** - * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC - * @info: the driver's PTP info structure + * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC + * @info: The driver's PTP info structure * @rq: The requested feature to change * @on: Enable/disable flag + * + * Return: 0 on success, negative error code otherwise */ -static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, - struct ptp_clock_request *rq, int on) +static int ice_ptp_gpio_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) { struct ice_pf *pf = ptp_info_to_pf(info); + int err; switch (rq->type) { - case PTP_CLK_REQ_PPS: + case PTP_CLK_REQ_PEROUT: { - struct ice_perout_channel clk_cfg = {}; + struct ptp_perout_request *cached = + &pf->ptp.perout_rqs[rq->perout.index]; - clk_cfg.flags = rq->perout.flags; - clk_cfg.gpio_pin = PPS_PIN_INDEX; - clk_cfg.period = NSEC_PER_SEC; - clk_cfg.ena = !!on; - - return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); + err = ice_ptp_cfg_perout(pf, &rq->perout, on); + if (!err) { + *cached = rq->perout; + } else { + cached->period.sec = 0; + cached->period.nsec = 0; + } + return err; } case PTP_CLK_REQ_EXTTS: { - struct ice_extts_channel extts_cfg = {}; - - extts_cfg.flags = rq->extts.flags; - extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX; - extts_cfg.ena = !!on; + struct ptp_extts_request *cached = + &pf->ptp.extts_rqs[rq->extts.index]; - return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true); + err = ice_ptp_cfg_extts(pf, &rq->extts, on); + if (!err) + *cached = rq->extts; + else + cached->flags &= ~PTP_ENABLE_FEATURE; + return err; } default: return -EOPNOTSUPP; @@ -2059,7 +2048,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) /* For Vernier mode on E82X, we need to recalibrate after new settime. * Start with marking timestamps as invalid. */ - if (hw->ptp.phy_model == ICE_PHY_E82X) { + if (ice_get_phy_model(hw) == ICE_PHY_E82X) { err = ice_ptp_clear_phy_offset_ready_e82x(hw); if (err) dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); @@ -2071,7 +2060,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) } /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); err = ice_ptp_write_init(pf, &ts64); ice_ptp_unlock(hw); @@ -2080,10 +2069,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_reset_cached_phctime(pf); /* Reenable periodic outputs */ - ice_ptp_enable_all_clkout(pf); + ice_ptp_enable_all_perout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (hw->ptp.phy_model == ICE_PHY_E82X) + if (ice_get_phy_model(hw) == ICE_PHY_E82X) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2142,12 +2131,12 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) } /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); err = ice_ptp_write_adj(pf, delta); /* Reenable periodic outputs */ - ice_ptp_enable_all_clkout(pf); + ice_ptp_enable_all_perout(pf); ice_ptp_unlock(hw); @@ -2405,20 +2394,41 @@ u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, } /** - * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * ice_ptp_setup_pin_cfg - setup PTP pin_config structure + * @pf: Board private structure + */ +static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) +{ + for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { + const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; + struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; + const char *name = NULL; + + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + name = ice_pin_names[desc->name_idx]; + else if (desc->name_idx != GPIO_NA) + name = ice_pin_names_nvm[desc->name_idx]; + if (name) + strscpy(pin->name, name, sizeof(pin->name)); + + pin->index = i; + } + + pf->ptp.info.pin_config = pf->ptp.pin_desc; +} + +/** + * ice_ptp_disable_pins - Disable PTP pins * @pf: pointer to the PF structure - * @info: PTP clock info structure * * Disable the OS access to the SMA pins. Called to clear out the OS - * indications of pin support when we fail to setup the E810-T SMA control - * register. + * indications of pin support when we fail to setup the SMA control register. */ -static void -ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +static void ice_ptp_disable_pins(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); + struct ptp_clock_info *info = &pf->ptp.info; - dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n"); info->enable = NULL; info->verify = NULL; @@ -2428,126 +2438,158 @@ ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) } /** - * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM * @pf: pointer to the PF structure - * @info: PTP clock info structure + * @entries: SDP connection section from NVM + * @num_entries: number of valid entries in sdp_entries + * @pins: PTP pins array to update * - * Finish setting up the SMA pins by allocating pin_config, and setting it up - * according to the current status of the SMA. On failure, disable all of the - * extended SMA pin support. + * Return: 0 on success, negative error code otherwise. */ -static void -ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, + unsigned int num_entries, + struct ice_ptp_pin_desc *pins) { - struct device *dev = ice_pf_to_dev(pf); - int err; + unsigned int n_pins = 0; + unsigned int i; - /* Allocate memory for kernel pins interface */ - info->pin_config = devm_kcalloc(dev, info->n_pins, - sizeof(*info->pin_config), GFP_KERNEL); - if (!info->pin_config) { - ice_ptp_disable_sma_pins_e810t(pf, info); - return; - } + /* Setup ice_pin_desc array */ + for (i = 0; i < ICE_N_PINS_MAX; i++) { + pins[i].name_idx = -1; + pins[i].gpio[0] = -1; + pins[i].gpio[1] = -1; + } + + for (i = 0; i < num_entries; i++) { + u16 entry = le16_to_cpu(entries[i]); + DECLARE_BITMAP(bitmap, GPIO_NA); + unsigned int bitmap_idx; + bool dir; + u16 gpio; + + *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); + dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); + gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); + for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { + unsigned int idx; + + /* Check if entry's pin bit is valid */ + if (bitmap_idx >= NUM_PTP_PINS_NVM && + bitmap_idx != GPIO_NA) + continue; - /* Read current SMA status */ - err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); - if (err) - ice_ptp_disable_sma_pins_e810t(pf, info); -} + /* Check if pin already exists */ + for (idx = 0; idx < ICE_N_PINS_MAX; idx++) + if (pins[idx].name_idx == bitmap_idx) + break; + + if (idx == ICE_N_PINS_MAX) { + /* Pin not found, setup its entry and name */ + idx = n_pins++; + pins[idx].name_idx = bitmap_idx; + if (bitmap_idx == GPIO_NA) + strscpy(pf->ptp.pin_desc[idx].name, + ice_pin_names[gpio], + sizeof(pf->ptp.pin_desc[idx] + .name)); + } -/** - * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs - * @pf: pointer to the PF instance - * @info: PTP clock capabilities - */ -static void -ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) -{ - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { - info->n_ext_ts = N_EXT_TS_E810; - info->n_per_out = N_PER_OUT_E810T; - info->n_pins = NUM_PTP_PINS_E810T; - info->verify = ice_verify_pin_e810t; - - /* Complete setup of the SMA pins */ - ice_ptp_setup_sma_pins_e810t(pf, info); - } else if (ice_is_e810t(&pf->hw)) { - info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; - info->n_per_out = N_PER_OUT_NO_SMA_E810T; - } else { - info->n_per_out = N_PER_OUT_E810; - info->n_ext_ts = N_EXT_TS_E810; + /* Setup in/out GPIO number */ + pins[idx].gpio[dir] = gpio; + } } -} -/** - * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs - * @pf: pointer to the PF instance - * @info: PTP clock capabilities - */ -static void -ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) -{ - info->pps = 1; - info->n_per_out = 0; - info->n_ext_ts = 1; + for (i = 0; i < n_pins; i++) { + dev_dbg(ice_pf_to_dev(pf), + "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n", + i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]); + } + + pf->ptp.info.n_pins = n_pins; + return 0; } /** - * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support + * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support * @pf: Board private structure - * @info: PTP info to fill * - * Assign functions to the PTP capabiltiies structure for E82x devices. + * Assign functions to the PTP capabilities structure for E82X devices. * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for E82x + * in ice_ptp_set_caps. Only add functions here which are distinct for E82X * devices. */ -static void -ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) +static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) { #ifdef CONFIG_ICE_HWTS if (boot_cpu_has(X86_FEATURE_ART) && boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) - info->getcrosststamp = ice_ptp_getcrosststamp_e82x; + pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x; + #endif /* CONFIG_ICE_HWTS */ + if (ice_is_e825c(&pf->hw)) { + pf->ptp.ice_pin_desc = ice_pin_desc_e825c; + pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); + } else { + pf->ptp.ice_pin_desc = ice_pin_desc_e82x; + pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); + } + ice_ptp_setup_pin_cfg(pf); } /** * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support * @pf: Board private structure - * @info: PTP info to fill * * Assign functions to the PTP capabiltiies structure for E810 devices. * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for e810 + * in ice_ptp_set_caps. Only add functions here which are distinct for E810 * devices. */ -static void -ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) +static void ice_ptp_set_funcs_e810(struct ice_pf *pf) { - info->enable = ice_ptp_gpio_enable_e810; - ice_ptp_setup_pins_e810(pf, info); -} + __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE]; + struct ice_ptp_pin_desc *desc = NULL; + struct ice_ptp *ptp = &pf->ptp; + unsigned int num_entries; + int err; -/** - * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support - * @pf: Board private structure - * @info: PTP info to fill - * - * Assign functions to the PTP capabiltiies structure for E823 devices. - * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for e823 - * devices. - */ -static void -ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) -{ - ice_ptp_set_funcs_e82x(pf, info); + err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries); + if (err) { + /* SDP section does not exist in NVM or is corrupted */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + ptp->ice_pin_desc = ice_pin_desc_e810_sma; + ptp->info.n_pins = + ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); + } else { + pf->ptp.ice_pin_desc = ice_pin_desc_e810; + pf->ptp.info.n_pins = + ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); + err = 0; + } + } else { + desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, + sizeof(struct ice_ptp_pin_desc), + GFP_KERNEL); + if (!desc) + goto err; - info->enable = ice_ptp_gpio_enable_e823; - ice_ptp_setup_pins_e823(pf, info); + err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc); + if (err) + goto err; + + ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc; + } + + ptp->info.pin_config = ptp->pin_desc; + ice_ptp_setup_pin_cfg(pf); + + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + err = ice_ptp_set_sma_cfg(pf); +err: + if (err) { + devm_kfree(ice_pf_to_dev(pf), desc); + ice_ptp_disable_pins(pf); + } } /** @@ -2567,13 +2609,15 @@ static void ice_ptp_set_caps(struct ice_pf *pf) info->adjfine = ice_ptp_adjfine; info->gettimex64 = ice_ptp_gettimex64; info->settime64 = ice_ptp_settime64; + info->n_per_out = GLTSYN_TGT_H_IDX_MAX; + info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX; + info->enable = ice_ptp_gpio_enable; + info->verify = ice_verify_pin; if (ice_is_e810(&pf->hw)) - ice_ptp_set_funcs_e810(pf, info); - else if (ice_is_e823(&pf->hw)) - ice_ptp_set_funcs_e823(pf, info); + ice_ptp_set_funcs_e810(pf); else - ice_ptp_set_funcs_e82x(pf, info); + ice_ptp_set_funcs_e82x(pf); } /** @@ -2775,7 +2819,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); src_tmr = ice_get_ptp_src_clock_index(&pf->hw); @@ -2813,10 +2857,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) /* Write the increment time value to PHY and LAN */ err = ice_ptp_write_incval(hw, ice_base_incval(pf)); - if (err) { - ice_ptp_unlock(hw); - return err; - } + if (err) + goto err_unlock; /* Write the initial Time value to PHY and LAN using the cached PHC * time before the reset and time difference between stopping and @@ -2829,10 +2871,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) ts = ktime_to_timespec64(ktime_get_real()); } err = ice_ptp_write_init(pf, &ts); - if (err) { - ice_ptp_unlock(hw); - return err; - } + if (err) + goto err_unlock; /* Release the global hardware lock */ ice_ptp_unlock(hw); @@ -2852,10 +2892,14 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) } /* Re-enable all periodic outputs and external timestamp events */ - ice_ptp_enable_all_clkout(pf); + ice_ptp_enable_all_perout(pf); ice_ptp_enable_all_extts(pf); return 0; + +err_unlock: + ice_ptp_unlock(hw); + return err; } /** @@ -2895,187 +2939,49 @@ err: dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); } -/** - * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device - * @aux_dev: auxiliary device to get the auxiliary PF for - */ -static struct ice_pf * -ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) -{ - struct ice_ptp_port *aux_port; - struct ice_ptp *aux_ptp; - - aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); - aux_ptp = container_of(aux_port, struct ice_ptp, port); - - return container_of(aux_ptp, struct ice_pf, ptp); -} - -/** - * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device - * @aux_dev: auxiliary device to get the PF for - */ -static struct ice_pf * -ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) +static bool ice_is_primary(struct ice_hw *hw) { - struct ice_ptp_port_owner *ports_owner; - const struct auxiliary_driver *aux_drv; - struct ice_ptp *owner_ptp; - - if (!aux_dev->dev.driver) - return NULL; - - aux_drv = to_auxiliary_drv(aux_dev->dev.driver); - ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, - aux_driver); - owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); - return container_of(owner_ptp, struct ice_pf, ptp); + return ice_is_e825c(hw) && ice_is_dual(hw) ? + !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true; } -/** - * ice_ptp_auxbus_probe - Probe auxiliary devices - * @aux_dev: PF's auxiliary device - * @id: Auxiliary device ID - */ -static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, - const struct auxiliary_device_id *id) +static int ice_ptp_setup_adapter(struct ice_pf *pf) { - struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); - struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); - - if (WARN_ON(!owner_pf)) - return -ENODEV; + if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) + return -EPERM; - INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); - mutex_lock(&owner_pf->ptp.ports_owner.lock); - list_add(&aux_pf->ptp.port.list_member, - &owner_pf->ptp.ports_owner.ports); - mutex_unlock(&owner_pf->ptp.ports_owner.lock); + pf->adapter->ctrl_pf = pf; return 0; } -/** - * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus - * @aux_dev: PF's auxiliary device - */ -static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) +static int ice_ptp_setup_pf(struct ice_pf *pf) { - struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); - struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); + struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); + struct ice_ptp *ptp = &pf->ptp; - mutex_lock(&owner_pf->ptp.ports_owner.lock); - list_del(&aux_pf->ptp.port.list_member); - mutex_unlock(&owner_pf->ptp.ports_owner.lock); -} + if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP) + return -ENODEV; -/** - * ice_ptp_auxbus_shutdown - * @aux_dev: PF's auxiliary device - */ -static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) -{ - /* Doing nothing here, but handle to auxbus driver must be satisfied */ -} + INIT_LIST_HEAD(&ptp->port.list_node); + mutex_lock(&pf->adapter->ports.lock); -/** - * ice_ptp_auxbus_suspend - * @aux_dev: PF's auxiliary device - * @state: power management state indicator - */ -static int -ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) -{ - /* Doing nothing here, but handle to auxbus driver must be satisfied */ - return 0; -} + list_add(&ptp->port.list_node, + &pf->adapter->ports.ports); + mutex_unlock(&pf->adapter->ports.lock); -/** - * ice_ptp_auxbus_resume - * @aux_dev: PF's auxiliary device - */ -static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) -{ - /* Doing nothing here, but handle to auxbus driver must be satisfied */ return 0; } -/** - * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table - * @pf: Board private structure - * @name: auxiliary bus driver name - */ -static struct auxiliary_device_id * -ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) -{ - struct auxiliary_device_id *ids; - - /* Second id left empty to terminate the array */ - ids = devm_kcalloc(ice_pf_to_dev(pf), 2, - sizeof(struct auxiliary_device_id), GFP_KERNEL); - if (!ids) - return NULL; - - snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); - - return ids; -} - -/** - * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver - * @pf: Board private structure - */ -static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) +static void ice_ptp_cleanup_pf(struct ice_pf *pf) { - struct auxiliary_driver *aux_driver; - struct ice_ptp *ptp; - struct device *dev; - char *name; - int err; - - ptp = &pf->ptp; - dev = ice_pf_to_dev(pf); - aux_driver = &ptp->ports_owner.aux_driver; - INIT_LIST_HEAD(&ptp->ports_owner.ports); - mutex_init(&ptp->ports_owner.lock); - name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", - pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), - ice_get_ptp_src_clock_index(&pf->hw)); - if (!name) - return -ENOMEM; - - aux_driver->name = name; - aux_driver->shutdown = ice_ptp_auxbus_shutdown; - aux_driver->suspend = ice_ptp_auxbus_suspend; - aux_driver->remove = ice_ptp_auxbus_remove; - aux_driver->resume = ice_ptp_auxbus_resume; - aux_driver->probe = ice_ptp_auxbus_probe; - aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); - if (!aux_driver->id_table) - return -ENOMEM; + struct ice_ptp *ptp = &pf->ptp; - err = auxiliary_driver_register(aux_driver); - if (err) { - devm_kfree(dev, aux_driver->id_table); - dev_err(dev, "Failed registering aux_driver, name <%s>\n", - name); + if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) { + mutex_lock(&pf->adapter->ports.lock); + list_del(&ptp->port.list_node); + mutex_unlock(&pf->adapter->ports.lock); } - - return err; -} - -/** - * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver - * @pf: Board private structure - */ -static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) -{ - struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; - - auxiliary_driver_unregister(aux_driver); - devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); - - mutex_destroy(&pf->ptp.ports_owner.lock); } /** @@ -3087,15 +2993,12 @@ static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) */ int ice_ptp_clock_index(struct ice_pf *pf) { - struct auxiliary_device *aux_dev; - struct ice_pf *owner_pf; + struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); struct ptp_clock *clock; - aux_dev = &pf->ptp.port.aux_dev; - owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); - if (!owner_pf) + if (!ctrl_ptp) return -1; - clock = owner_pf->ptp.clock; + clock = ctrl_ptp->clock; return clock ? ptp_clock_index(clock) : -1; } @@ -3129,18 +3032,14 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Write the increment time value to PHY and LAN */ err = ice_ptp_write_incval(hw, ice_base_incval(pf)); - if (err) { - ice_ptp_unlock(hw); - goto err_exit; - } + if (err) + goto err_unlock; ts = ktime_to_timespec64(ktime_get_real()); /* Write the initial Time value to PHY and LAN */ err = ice_ptp_write_init(pf, &ts); - if (err) { - ice_ptp_unlock(hw); - goto err_exit; - } + if (err) + goto err_unlock; /* Release the global hardware lock */ ice_ptp_unlock(hw); @@ -3155,19 +3054,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf) if (err) goto err_clk; - err = ice_ptp_register_auxbus_driver(pf); - if (err) { - dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); - goto err_aux; - } - return 0; -err_aux: - ptp_clock_unregister(pf->ptp.clock); err_clk: pf->ptp.clock = NULL; err_exit: return err; + +err_unlock: + ice_ptp_unlock(hw); + return err; } /** @@ -3209,7 +3104,7 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) mutex_init(&ptp_port->ps_lock); - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, ptp_port->port_num); @@ -3227,76 +3122,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) } /** - * ice_ptp_release_auxbus_device - * @dev: device that utilizes the auxbus - */ -static void ice_ptp_release_auxbus_device(struct device *dev) -{ - /* Doing nothing here, but handle to auxbux device must be satisfied */ -} - -/** - * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device - * @pf: Board private structure - */ -static int ice_ptp_create_auxbus_device(struct ice_pf *pf) -{ - struct auxiliary_device *aux_dev; - struct ice_ptp *ptp; - struct device *dev; - char *name; - int err; - u32 id; - - ptp = &pf->ptp; - id = ptp->port.port_num; - dev = ice_pf_to_dev(pf); - - aux_dev = &ptp->port.aux_dev; - - name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", - pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), - ice_get_ptp_src_clock_index(&pf->hw)); - if (!name) - return -ENOMEM; - - aux_dev->name = name; - aux_dev->id = id; - aux_dev->dev.release = ice_ptp_release_auxbus_device; - aux_dev->dev.parent = dev; - - err = auxiliary_device_init(aux_dev); - if (err) - goto aux_err; - - err = auxiliary_device_add(aux_dev); - if (err) { - auxiliary_device_uninit(aux_dev); - goto aux_err; - } - - return 0; -aux_err: - dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); - devm_kfree(dev, name); - return err; -} - -/** - * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device - * @pf: Board private structure - */ -static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) -{ - struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; - - auxiliary_device_delete(aux_dev); - auxiliary_device_uninit(aux_dev); - - memset(aux_dev, 0, sizeof(*aux_dev)); -} - -/** * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode * @pf: Board private structure * @@ -3307,7 +3132,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) */ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { - switch (pf->hw.ptp.phy_model) { + switch (ice_get_phy_model(&pf->hw)) { case ICE_PHY_E82X: /* E822 based PHY has the clock owner process the interrupt * for all ports. @@ -3350,19 +3175,26 @@ void ice_ptp_init(struct ice_pf *pf) /* If this function owns the clock hardware, it must allocate and * configure the PTP clock device to represent it. */ - if (ice_pf_src_tmr_owned(pf)) { + if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) { + err = ice_ptp_setup_adapter(pf); + if (err) + goto err_exit; err = ice_ptp_init_owner(pf); if (err) - goto err; + goto err_exit; } + err = ice_ptp_setup_pf(pf); + if (err) + goto err_exit; + ptp->port.port_num = hw->pf_id; if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) ptp->port.port_num = hw->pf_id * 2; err = ice_ptp_init_port(pf, &ptp->port); if (err) - goto err; + goto err_exit; /* Start the PHY timestamping block */ ice_ptp_reset_phy_timestamping(pf); @@ -3370,20 +3202,16 @@ void ice_ptp_init(struct ice_pf *pf) /* Configure initial Tx interrupt settings */ ice_ptp_cfg_tx_interrupt(pf); - err = ice_ptp_create_auxbus_device(pf); - if (err) - goto err; - ptp->state = ICE_PTP_READY; err = ice_ptp_init_work(pf, ptp); if (err) - goto err; + goto err_exit; dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); return; -err: +err_exit: /* If we registered a PTP clock, release it */ if (pf->ptp.clock) { ptp_clock_unregister(ptp->clock); @@ -3410,7 +3238,7 @@ void ice_ptp_release(struct ice_pf *pf) /* Disable timestamping for both Tx and Rx */ ice_ptp_disable_timestamp_mode(pf); - ice_ptp_remove_auxbus_device(pf); + ice_ptp_cleanup_pf(pf); ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); @@ -3425,14 +3253,11 @@ void ice_ptp_release(struct ice_pf *pf) pf->ptp.kworker = NULL; } - if (ice_pf_src_tmr_owned(pf)) - ice_ptp_unregister_auxbus_driver(pf); - if (!pf->ptp.clock) return; /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); ptp_clock_unregister(pf->ptp.clock); pf->ptp.clock = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 2db2257a0fb2..824e73b677a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -9,37 +9,6 @@ #include "ice_ptp_hw.h" -enum ice_ptp_pin_e810 { - GPIO_20 = 0, - GPIO_21, - GPIO_22, - GPIO_23, - NUM_PTP_PIN_E810 -}; - -enum ice_ptp_pin_e810t { - GNSS = 0, - SMA1, - UFL1, - SMA2, - UFL2, - NUM_PTP_PINS_E810T -}; - -struct ice_perout_channel { - bool ena; - u32 gpio_pin; - u32 flags; - u64 period; - u64 start_time; -}; - -struct ice_extts_channel { - bool ena; - u32 gpio_pin; - u32 flags; -}; - /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp * is stored in a buffer of registers. Depending on the specific hardware, * this buffer might be shared across multiple PHY ports. @@ -169,9 +138,8 @@ struct ice_ptp_tx { * ready for PTP functionality. It is used to track the port initialization * and determine when the port's PHY offset is valid. * - * @list_member: list member structure of auxiliary device + * @list_node: list member structure * @tx: Tx timestamp tracking for this port - * @aux_dev: auxiliary device associated with this port * @ov_work: delayed work task for tracking when PHY offset is valid * @ps_lock: mutex used to protect the overall PTP PHY start procedure * @link_up: indicates whether the link is up @@ -179,9 +147,8 @@ struct ice_ptp_tx { * @port_num: the port number this structure represents */ struct ice_ptp_port { - struct list_head list_member; + struct list_head list_node; struct ice_ptp_tx tx; - struct auxiliary_device aux_dev; struct kthread_delayed_work ov_work; struct mutex ps_lock; /* protects overall PTP PHY start procedure */ bool link_up; @@ -195,22 +162,6 @@ enum ice_ptp_tx_interrupt { ICE_PTP_TX_INTERRUPT_ALL, }; -/** - * struct ice_ptp_port_owner - data used to handle the PTP clock owner info - * - * This structure contains data necessary for the PTP clock owner to correctly - * handle the timestamping feature for all attached ports. - * - * @aux_driver: the structure carring the auxiliary driver information - * @ports: list of porst handled by this port owner - * @lock: protect access to ports list - */ -struct ice_ptp_port_owner { - struct auxiliary_driver aux_driver; - struct list_head ports; - struct mutex lock; -}; - #define GLTSYN_TGT_H_IDX_MAX 4 enum ice_ptp_state { @@ -221,20 +172,69 @@ enum ice_ptp_state { ICE_PTP_ERROR, }; +enum ice_ptp_pin { + SDP0 = 0, + SDP1, + SDP2, + SDP3, + TIME_SYNC, + ONE_PPS +}; + +enum ice_ptp_pin_nvm { + GNSS = 0, + SMA1, + UFL1, + SMA2, + UFL2, + NUM_PTP_PINS_NVM, + GPIO_NA = 9 +}; + +/* Per-channel register definitions */ +#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) +#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8)) +#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8)) +#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16)) +#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16)) +#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16)) +#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16)) +#define GLTSYN_EVNT_H_IDX_MAX 3 + +/* Pin definitions for PTP */ +#define ICE_N_PINS_MAX 6 +#define ICE_SMA_PINS_NUM 4 +#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \ + sizeof(struct ice_ptp_pin_desc)) + +/** + * struct ice_ptp_pin_desc - hardware pin description data + * @name_idx: index of the name of pin in ice_pin_names + * @gpio: the associated GPIO input and output pins + * + * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array + * for the device. Device families have separate sets of available pins with + * varying restrictions. + */ +struct ice_ptp_pin_desc { + int name_idx; + int gpio[2]; +}; + /** * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK * @state: current state of PTP state machine * @tx_interrupt_mode: the TX interrupt mode for the PTP clock * @port: data for the PHY port initialization procedure - * @ports_owner: data for the auxiliary driver owner * @work: delayed work function for periodic tasks * @cached_phc_time: a cached copy of the PHC time for timestamp extension * @cached_phc_jiffies: jiffies when cached_phc_time was last updated - * @ext_ts_chan: the external timestamp channel in use - * @ext_ts_irq: the external timestamp IRQ in use * @kworker: kwork thread for handling periodic work - * @perout_channels: periodic output data - * @extts_channels: channels for external timestamps + * @ext_ts_irq: the external timestamp IRQ in use + * @pin_desc: structure defining pins + * @ice_pin_desc: internal structure describing pin relations + * @perout_rqs: cached periodic output requests + * @extts_rqs: cached external timestamp requests * @info: structure defining PTP hardware capabilities * @clock: pointer to registered PTP clock device * @tstamp_config: hardware timestamping configuration @@ -250,15 +250,15 @@ struct ice_ptp { enum ice_ptp_state state; enum ice_ptp_tx_interrupt tx_interrupt_mode; struct ice_ptp_port port; - struct ice_ptp_port_owner ports_owner; struct kthread_delayed_work work; u64 cached_phc_time; unsigned long cached_phc_jiffies; - u8 ext_ts_chan; - u8 ext_ts_irq; struct kthread_worker *kworker; - struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX]; - struct ice_extts_channel extts_channels[GLTSYN_TGT_H_IDX_MAX]; + u8 ext_ts_irq; + struct ptp_pin_desc pin_desc[ICE_N_PINS_MAX]; + const struct ice_ptp_pin_desc *ice_pin_desc; + struct ptp_perout_request perout_rqs[GLTSYN_TGT_H_IDX_MAX]; + struct ptp_extts_request extts_rqs[GLTSYN_EVNT_H_IDX_MAX]; struct ptp_clock_info info; struct ptp_clock *clock; struct hwtstamp_config tstamp_config; @@ -289,27 +289,6 @@ struct ice_ptp { #define FIFO_EMPTY BIT(2) #define FIFO_OK 0xFF #define ICE_PTP_FIFO_NUM_CHECKS 5 -/* Per-channel register definitions */ -#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) -#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8)) -#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8)) -#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16)) -#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16)) -#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16)) -#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16)) -#define GLTSYN_EVNT_H_IDX_MAX 3 - -/* Pin definitions for PTP PPS out */ -#define PPS_CLK_GEN_CHAN 3 -#define PPS_CLK_SRC_CHAN 2 -#define PPS_PIN_INDEX 5 -#define TIME_SYNC_PIN_INDEX 4 -#define N_EXT_TS_E810 3 -#define N_PER_OUT_E810 4 -#define N_PER_OUT_E810T 3 -#define N_PER_OUT_NO_SMA_E810T 2 -#define N_EXT_TS_NO_SMA_E810T 2 -#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) int ice_ptp_clock_index(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index e6980b94a6c1..585ce200c60f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -334,7 +334,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = { * reference. See the struct ice_time_ref_info_e82x for information about the * meaning of each constant. */ -const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* pll_freq */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 3a33e6b9b313..da88c6ccfaeb 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -661,6 +661,29 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, return 0; } +#define ICE_ONE_PPS_OUT_AMP_MAX 3 + +/** + * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU + * @hw: pointer to the HW struct + * @enable: true to enable 1PPS output, false to disable it + * + * Return: 0 on success, other negative error code when CGU read/write failed + */ +int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable) +{ + union nac_cgu_dword9 dw9; + int err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + dw9.one_pps_out_en = enable; + dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX; + return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); +} + /** * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits * @hw: pointer to the HW struct @@ -806,7 +829,7 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, /* Certain hardware families share the same register values for the * port register and source timer register. */ - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_E810: return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810; default: @@ -5150,9 +5173,9 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) return 0; } -/* E810T SMA functions +/* E810 SMA functions * - * The following functions operate specifically on E810T hardware and are used + * The following functions operate specifically on E810 hardware and are used * to access the extended GPIOs available. */ @@ -5219,14 +5242,14 @@ ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) } /** - * ice_read_sma_ctrl_e810t + * ice_read_sma_ctrl * @hw: pointer to the hw struct * @data: pointer to data to be read from the GPIO controller * * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the * PCA9575 expander, so only bits 3-7 in data are valid. */ -int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) +int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data) { int status; u16 handle; @@ -5238,7 +5261,7 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) *data = 0; - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) { bool pin; status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, @@ -5252,14 +5275,14 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) } /** - * ice_write_sma_ctrl_e810t + * ice_write_sma_ctrl * @hw: pointer to the hw struct * @data: data to be written to the GPIO controller * * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 * of the PCA9575 expander, so only bits 3-7 in data are valid. */ -int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) +int ice_write_sma_ctrl(struct ice_hw *hw, u8 data) { int status; u16 handle; @@ -5269,7 +5292,7 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) if (status) return status; - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) { bool pin; pin = !(data & (1 << i)); @@ -5283,14 +5306,14 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) } /** - * ice_read_pca9575_reg_e810t + * ice_read_pca9575_reg * @hw: pointer to the hw struct * @offset: GPIO controller register offset * @data: pointer to data to be read from the GPIO controller * * Read the register from the GPIO controller */ -int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) { struct ice_aqc_link_topo_addr link_topo; __le16 addr; @@ -5314,6 +5337,66 @@ int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) } /** + * ice_ptp_read_sdp_ac - read SDP available connections section from NVM + * @hw: pointer to the HW struct + * @entries: returns the SDP available connections section from NVM + * @num_entries: returns the number of valid entries + * + * Return: 0 on success, negative error code if NVM read failed or section does + * not exist or is corrupted + */ +int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries) +{ + __le16 data; + u32 offset; + int err; + + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) + goto exit; + + /* Read the offset of SDP_AC */ + offset = ICE_AQC_NVM_SDP_AC_PTR_OFFSET; + err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true, + NULL); + if (err) + goto exit; + + /* Check if section exist */ + offset = FIELD_GET(ICE_AQC_NVM_SDP_AC_PTR_M, le16_to_cpu(data)); + if (offset == ICE_AQC_NVM_SDP_AC_PTR_INVAL) { + err = -EINVAL; + goto exit; + } + + if (offset & ICE_AQC_NVM_SDP_AC_PTR_TYPE_M) { + offset &= ICE_AQC_NVM_SDP_AC_PTR_M; + offset *= ICE_AQC_NVM_SECTOR_UNIT; + } else { + offset *= sizeof(data); + } + + /* Skip reading section length and read the number of valid entries */ + offset += sizeof(data); + err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true, + NULL); + if (err) + goto exit; + *num_entries = le16_to_cpu(data); + + /* Read SDP configuration section */ + offset += sizeof(data); + err = ice_aq_read_nvm(hw, 0, offset, *num_entries * sizeof(data), + entries, false, true, NULL); + +exit: + if (err) + dev_dbg(ice_hw_to_dev(hw), "Failed to configure SDP connection section\n"); + ice_release_nvm(hw); + return err; +} + +/** * ice_ptp_init_phy_e810 - initialize PHY parameters * @ptp: pointer to the PTP HW struct */ @@ -5419,7 +5502,7 @@ void ice_ptp_init_hw(struct ice_hw *hw) static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) { - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); case ICE_PHY_E82X: @@ -5484,7 +5567,7 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) u32 port; /* PHY models which can program all ports simultaneously */ - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_E810: return ice_ptp_port_cmd_e810(hw, cmd); default: @@ -5563,7 +5646,7 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: err = ice_ptp_prep_phy_time_eth56g(hw, (u32)(time & 0xFFFFFFFF)); @@ -5609,7 +5692,7 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: err = ice_ptp_prep_phy_incval_eth56g(hw, incval); break; @@ -5678,7 +5761,7 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: err = ice_ptp_prep_phy_adj_eth56g(hw, adj); break; @@ -5711,7 +5794,7 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); case ICE_PHY_E810: @@ -5741,7 +5824,7 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: return ice_clear_ptp_tstamp_eth56g(hw, block, idx); case ICE_PHY_E810: @@ -5804,7 +5887,7 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) */ void ice_ptp_reset_ts_memory(struct ice_hw *hw) { - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: ice_ptp_reset_ts_memory_eth56g(hw); break; @@ -5833,7 +5916,7 @@ int ice_ptp_init_phc(struct ice_hw *hw) /* Clear event err indications for auxiliary pins */ (void)rd32(hw, GLTSYN_STAT(src_idx)); - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: return ice_ptp_init_phc_eth56g(hw); case ICE_PHY_E810: @@ -5858,7 +5941,7 @@ int ice_ptp_init_phc(struct ice_hw *hw) */ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) { - switch (hw->ptp.phy_model) { + switch (ice_get_phy_model(hw)) { case ICE_PHY_ETH56G: return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, tstamp_ready); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 0852a34ade91..656daff3447e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -316,7 +316,7 @@ ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ]; extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES]; /* Table of constants related to possible TIME_REF sources */ -extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ]; +extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ]; /* Table of constants for Vernier calibration on E822 */ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; @@ -326,10 +326,12 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; */ #define ICE_E810_PLL_FREQ 812500000 #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL -#define E810_OUT_PROP_DELAY_NS 1 +#define ICE_E810_OUT_PROP_DELAY_NS 1 +#define ICE_E825C_OUT_PROP_DELAY_NS 11 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); +int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable); bool ice_ptp_lock(struct ice_hw *hw); void ice_ptp_unlock(struct ice_hw *hw); void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd); @@ -358,7 +360,7 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad); * * Returns the current TIME_REF from the capabilities structure. */ -static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw) +static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw) { return hw->func_caps.ts_func_info.time_ref; } @@ -379,17 +381,17 @@ ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref) { - return e822_time_ref[time_ref].pll_freq; + return e82x_time_ref[time_ref].pll_freq; } static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) { - return e822_time_ref[time_ref].nominal_incval; + return e82x_time_ref[time_ref].nominal_incval; } static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) { - return e822_time_ref[time_ref].pps_delay; + return e82x_time_ref[time_ref].pps_delay; } /* E822 Vernier calibration functions */ @@ -400,10 +402,10 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port); int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold); /* E810 family functions */ -int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); -int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); -int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); -bool ice_is_pca9575_present(struct ice_hw *hw); +int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data); +int ice_write_sma_ctrl(struct ice_hw *hw, u8 data); +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data); +int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries); enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input); struct dpll_pin_frequency * ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num); @@ -420,8 +422,6 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status); int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset); int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port); -int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port); -int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port); int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold); int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); @@ -431,6 +431,20 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); #define ICE_ETH56G_NOMINAL_THRESH4 0x7777 #define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 +static inline u64 ice_prop_delay(const struct ice_hw *hw) +{ + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ICE_E825C_OUT_PROP_DELAY_NS; + case ICE_PHY_E810: + return ICE_E810_OUT_PROP_DELAY_NS; + case ICE_PHY_E82X: + return ice_e82x_pps_delay(ice_e82x_time_ref(hw)); + default: + return 0; + } +} + /** * ice_get_base_incval - Get base clock increment value * @hw: pointer to the HW struct @@ -451,6 +465,11 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw) } } +static inline bool ice_is_dual(struct ice_hw *hw) +{ + return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); +} + #define PFTSYN_SEM_BYTES 4 #define ICE_PTP_CLOCK_INDEX_0 0x00 @@ -688,30 +707,27 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw) #define LOW_TX_MEMORY_BANK_START 0x03090000 #define HIGH_TX_MEMORY_BANK_START 0x03090004 -/* E810T SMA controller pin control */ -#define ICE_SMA1_DIR_EN_E810T BIT(4) -#define ICE_SMA1_TX_EN_E810T BIT(5) -#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3) -#define ICE_SMA2_DIR_EN_E810T BIT(6) -#define ICE_SMA2_TX_EN_E810T BIT(7) - -#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \ - ICE_SMA1_TX_EN_E810T) -#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \ - ICE_SMA2_DIR_EN_E810T | \ - ICE_SMA2_TX_EN_E810T) -#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \ - ICE_SMA2_MASK_E810T) - -#define ICE_SMA_MIN_BIT_E810T 3 -#define ICE_SMA_MAX_BIT_E810T 7 +/* SMA controller pin control */ +#define ICE_SMA1_DIR_EN BIT(4) +#define ICE_SMA1_TX_EN BIT(5) +#define ICE_SMA2_UFL2_RX_DIS BIT(3) +#define ICE_SMA2_DIR_EN BIT(6) +#define ICE_SMA2_TX_EN BIT(7) + +#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN) +#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \ + ICE_SMA2_TX_EN) +#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK) + +#define ICE_SMA_MIN_BIT 3 +#define ICE_SMA_MAX_BIT 7 #define ICE_PCA9575_P1_OFFSET 8 -/* E810T PCA9575 IO controller registers */ +/* PCA9575 IO controller registers */ #define ICE_PCA9575_P0_IN 0x0 -/* E810T PCA9575 IO controller pin control */ -#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4) +/* PCA9575 IO controller pin control */ +#define ICE_P0_GNSS_PRSNT_N BIT(4) /* ETH56G PHY register addresses */ /* Timestamp PHY incval registers */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 91cb393f616f..b83f99c01d91 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -194,7 +194,8 @@ void ice_free_vfs(struct ice_pf *pf) } /* clear malicious info since the VF is getting released */ - list_del(&vf->mbx_info.list_entry); + if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + list_del(&vf->mbx_info.list_entry); mutex_unlock(&vf->cfg_lock); } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index feba314a3fe4..cb347c852ba9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -359,8 +359,9 @@ struct ice_rx_ring { struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; u32 nr_frags; - dma_addr_t dma; /* physical address of ring */ + u16 max_frame; u16 rx_buf_len; + dma_addr_t dma; /* physical address of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) @@ -406,6 +407,7 @@ struct ice_tx_ring { #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) u8 flags; u8 dcb_tc; /* Traffic class of ring */ + u16 quanta_prof_id; } ____cacheline_internodealigned_in_smp; static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index afcead4baef4..79f960c6680d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -154,7 +154,6 @@ static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring) } void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx); -int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring); int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, bool frame); void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 45768796691f..adb168860711 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -905,6 +905,7 @@ struct ice_hw { u8 revision_id; u8 pf_id; /* device profile info */ + u8 logical_pf_id; u16 max_burst_size; /* driver sets this value */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 8c434689e3f7..c7c0c2f50c26 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -717,6 +717,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) } /** + * ice_reset_vf_mbx_cnt - reset VF mailbox message count + * @vf: pointer to the VF structure + * + * This function clears the VF mailbox message count, and should be called on + * VF reset. + */ +static void ice_reset_vf_mbx_cnt(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); + else + ice_mbx_clear_malvf(&vf->mbx_info); +} + +/** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * @@ -742,7 +759,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) /* clear all malicious info if the VFs are getting reset */ ice_for_each_vf(pf, bkt, vf) - ice_mbx_clear_malvf(&vf->mbx_info); + ice_reset_vf_mbx_cnt(vf); /* If VFs have been disabled, there is no need to reset */ if (test_and_set_bit(ICE_VF_DIS, pf->state)) { @@ -958,7 +975,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) ice_eswitch_update_repr(&vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ - ice_mbx_clear_malvf(&vf->mbx_info); + ice_reset_vf_mbx_cnt(vf); out_unlock: if (lag && lag->bonded && lag->primary && @@ -1011,7 +1028,10 @@ void ice_initialize_vf_entry(struct ice_vf *vf) ice_vf_fdir_init(vf); /* Initialize mailbox info for this VF */ - ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); + else + ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); mutex_init(&vf->cfg_lock); } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index be4266899690..4261fe1c2bcd 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -59,6 +59,13 @@ struct ice_fdir_prof_info { u64 fdir_active_cnt; }; +struct ice_vf_qs_bw { + u32 committed; + u32 peak; + u16 queue_id; + u8 tc; +}; + /* VF operations */ struct ice_vf_ops { enum ice_disq_rst_src reset_type; @@ -140,6 +147,7 @@ struct ice_vf { struct devlink_port devlink_port; u16 num_msix; /* num of MSI-X configured on this VF */ + struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF]; }; /* Flags for controlling behavior of ice_reset_vf */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c index 40cb4ba0789c..75c8113e58ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c @@ -211,6 +211,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info, } /** + * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter + * @hw: pointer to the HW struct + * @event: pointer to the control queue receive event + * + * This function triggers to decrement the counter + * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes + * the buffers at the PF mailbox queue. + */ +void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event) +{ + u16 vfid = le16_to_cpu(event->desc.retval); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1); +} + +/** + * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count + * @hw: pointer to the HW struct + * @vf_id: VF ID in the PF space + * + * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should + * be called when a VF is created and on VF reset. + */ +void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id) +{ + u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id)); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg); +} + +/** * ice_mbx_vf_state_handler - Handle states of the overflow algorithm * @hw: pointer to the HW struct * @mbx_data: pointer to structure containing mailbox data diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h index 44bc030d17e0..684de89e5c5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h @@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd); u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event); +void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id); int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, struct ice_mbx_vf_info *vf_info, bool *report_malvf); @@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw) { } +static inline void +ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event) +{ +} + #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VF_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 59f62306b9cb..aa2080747714 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -495,6 +495,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS; + vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; @@ -1035,6 +1038,191 @@ error_param: } /** + * ice_vc_get_qos_caps - Get current QoS caps from PF + * @vf: pointer to the VF info + * + * Get VF's QoS capabilities, such as TC number, arbiter and + * bandwidth from PF. + * + * Return: 0 on success or negative error value. + */ +static int ice_vc_get_qos_caps(struct ice_vf *vf) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_qos_cap_list *cap_list = NULL; + u8 tc_prio[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct virtchnl_qos_cap_elem *cfg = NULL; + struct ice_vsi_ctx *vsi_ctx; + struct ice_pf *pf = vf->pf; + struct ice_port_info *pi; + struct ice_vsi *vsi; + u8 numtc, tc; + u16 len = 0; + int ret, i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + pi = pf->hw.port_info; + numtc = vsi->tc_cfg.numtc; + + vsi_ctx = ice_get_vsi_ctx(pi->hw, vf->lan_vsi_idx); + if (!vsi_ctx) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + len = struct_size(cap_list, cap, numtc); + cap_list = kzalloc(len, GFP_KERNEL); + if (!cap_list) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + cap_list->vsi_id = vsi->vsi_num; + cap_list->num_elem = numtc; + + /* Store the UP2TC configuration from DCB to a user priority bitmap + * of each TC. Each element of prio_of_tc represents one TC. Each + * bitmap indicates the user priorities belong to this TC. + */ + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + tc = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i]; + tc_prio[tc] |= BIT(i); + } + + for (i = 0; i < numtc; i++) { + cfg = &cap_list->cap[i]; + cfg->tc_num = i; + cfg->tc_prio = tc_prio[i]; + cfg->arbiter = pi->qos_cfg.local_dcbx_cfg.etscfg.tsatable[i]; + cfg->weight = VIRTCHNL_STRICT_WEIGHT; + cfg->type = VIRTCHNL_BW_SHAPER; + cfg->shaper.committed = vsi_ctx->sched.bw_t_info[i].cir_bw.bw; + cfg->shaper.peak = vsi_ctx->sched.bw_t_info[i].eir_bw.bw; + } + +err: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_QOS_CAPS, v_ret, + (u8 *)cap_list, len); + kfree(cap_list); + return ret; +} + +/** + * ice_vf_cfg_qs_bw - Configure per queue bandwidth + * @vf: pointer to the VF info + * @num_queues: number of queues to be configured + * + * Configure per queue bandwidth. + * + * Return: 0 on success or negative error value. + */ +static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues) +{ + struct ice_hw *hw = &vf->pf->hw; + struct ice_vsi *vsi; + int ret; + u16 i; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + for (i = 0; i < num_queues; i++) { + u32 p_rate, min_rate; + u8 tc; + + p_rate = vf->qs_bw[i].peak; + min_rate = vf->qs_bw[i].committed; + tc = vf->qs_bw[i].tc; + if (p_rate) + ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MAX_BW, p_rate); + else + ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MAX_BW); + if (ret) + return ret; + + if (min_rate) + ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MIN_BW, min_rate); + else + ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MIN_BW); + + if (ret) + return ret; + } + + return 0; +} + +/** + * ice_vf_cfg_q_quanta_profile - Configure quanta profile + * @vf: pointer to the VF info + * @quanta_prof_idx: pointer to the quanta profile index + * @quanta_size: quanta size to be set + * + * This function chooses available quanta profile and configures the register. + * The quanta profile is evenly divided by the number of device ports, and then + * available to the specific PF and VFs. The first profile for each PF is a + * reserved default profile. Only quanta size of the rest unused profile can be + * modified. + * + * Return: 0 on success or negative error value. + */ +static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size, + u16 *quanta_prof_idx) +{ + const u16 n_desc = calc_quanta_desc(quanta_size); + struct ice_hw *hw = &vf->pf->hw; + const u16 n_cmd = 2 * n_desc; + struct ice_pf *pf = vf->pf; + u16 per_pf, begin_id; + u8 n_used; + u32 reg; + + begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs * + hw->logical_pf_id; + + if (quanta_size == ICE_DFLT_QUANTA) { + *quanta_prof_idx = begin_id; + } else { + per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / + hw->dev_caps.num_funcs; + n_used = pf->num_quanta_prof_used; + if (n_used < per_pf) { + *quanta_prof_idx = begin_id + 1 + n_used; + pf->num_quanta_prof_used++; + } else { + return -EINVAL; + } + } + + reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) | + FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) | + FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc); + wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg); + + return 0; +} + +/** * ice_vc_cfg_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1636,6 +1824,141 @@ error_param: } /** + * ice_vc_cfg_q_bw - Configure per queue bandwidth + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command descriptor + * + * Configure VF queues bandwidth. + * + * Return: 0 on success or negative error value. + */ +static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_queues_bw_cfg *qbw = + (struct virtchnl_queues_bw_cfg *)msg; + struct ice_vsi *vsi; + u16 i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF || + qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + for (i = 0; i < qbw->num_queues; i++) { + if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 && + qbw->cfg[i].shaper.peak > vf->max_tx_rate) + dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n", + qbw->cfg[i].queue_id, vf->vf_id, + vf->max_tx_rate); + if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 && + qbw->cfg[i].shaper.committed < vf->min_tx_rate) + dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n", + qbw->cfg[i].queue_id, vf->vf_id, + vf->max_tx_rate); + } + + for (i = 0; i < qbw->num_queues; i++) { + vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id; + vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak; + vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed; + vf->qs_bw[i].tc = qbw->cfg[i].tc; + } + + if (ice_vf_cfg_qs_bw(vf, qbw->num_queues)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + +err: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW, + v_ret, NULL, 0); +} + +/** + * ice_vc_cfg_q_quanta - Configure per queue quanta + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command descriptor + * + * Configure VF queues quanta. + * + * Return: 0 on success or negative error value. + */ +static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u16 quanta_prof_id, quanta_size, start_qid, end_qid, i; + struct virtchnl_quanta_cfg *qquanta = + (struct virtchnl_quanta_cfg *)msg; + struct ice_vsi *vsi; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + end_qid = qquanta->queue_select.start_queue_id + + qquanta->queue_select.num_queues; + if (end_qid > ICE_MAX_RSS_QS_PER_VF || + end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + quanta_size = qquanta->quanta_size; + if (quanta_size > ICE_MAX_QUANTA_SIZE || + quanta_size < ICE_MIN_QUANTA_SIZE) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (quanta_size % 64) { + dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n"); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size, + &quanta_prof_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto err; + } + + start_qid = qquanta->queue_select.start_queue_id; + for (i = start_qid; i < end_qid; i++) + vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id; + +err: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA, + v_ret, NULL, 0); +} + +/** * ice_vc_cfg_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1715,8 +2038,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Tx queue info from VF into VSI */ if (qpi->txq.ring_len > 0) { - vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; - vsi->tx_rings[i]->count = qpi->txq.ring_len; + vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[q_idx]->count = qpi->txq.ring_len; /* Disable any existing queue first */ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) @@ -1725,7 +2048,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n", - vf->vf_id, i); + vf->vf_id, q_idx); goto error_param; } } @@ -1733,39 +2056,37 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Rx queue info from VF into VSI */ if (qpi->rxq.ring_len > 0) { u16 max_frame_size = ice_vc_get_max_frame_size(vf); + struct ice_rx_ring *ring = vsi->rx_rings[q_idx]; u32 rxdid; - vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; - vsi->rx_rings[i]->count = qpi->rxq.ring_len; + ring->dma = qpi->rxq.dma_ring_addr; + ring->count = qpi->rxq.ring_len; if (qpi->rxq.crc_disable) - vsi->rx_rings[q_idx]->flags |= - ICE_RX_FLAGS_CRC_STRIP_DIS; + ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; else - vsi->rx_rings[q_idx]->flags &= - ~ICE_RX_FLAGS_CRC_STRIP_DIS; + ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) goto error_param; - vsi->rx_buf_len = qpi->rxq.databuffer_size; - vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; + ring->rx_buf_len = qpi->rxq.databuffer_size; if (qpi->rxq.max_pkt_size > max_frame_size || qpi->rxq.max_pkt_size < 64) goto error_param; - vsi->max_frame = qpi->rxq.max_pkt_size; + ring->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is * not expected to account for it in the MTU * calculation */ if (ice_vf_is_port_vlan_ena(vf)) - vsi->max_frame += VLAN_HLEN; + ring->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", - vf->vf_id, i); + vf->vf_id, q_idx); goto error_param; } @@ -3821,6 +4142,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, + .get_qos_caps = ice_vc_get_qos_caps, + .cfg_q_bw = ice_vc_cfg_q_bw, + .cfg_q_quanta = ice_vc_cfg_q_quanta, }; /** @@ -4009,8 +4333,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata) * @event: pointer to the AQ event * @mbxdata: information used to detect VF attempting mailbox overflow * - * called from the common asq/arq handler to - * process request from VF + * Called from the common asq/arq handler to process request from VF. When this + * flow is used for devices with hardware VF to PF message queue overflow + * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf + * check is skipped. */ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, struct ice_mbx_data *mbxdata) @@ -4036,7 +4362,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, mutex_lock(&vf->cfg_lock); /* Check if the VF is trying to overflow the mailbox */ - if (ice_is_malicious_vf(vf, mbxdata)) + if (mbxdata && ice_is_malicious_vf(vf, mbxdata)) goto finish; /* Check if VF is disabled. */ @@ -4177,6 +4503,15 @@ error_handler: case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: err = ops->dis_vlan_insertion_v2_msg(vf, msg); break; + case VIRTCHNL_OP_GET_QOS_CAPS: + err = ops->get_qos_caps(vf); + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + err = ops->cfg_q_bw(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + err = ops->cfg_q_quanta(vf, msg); + break; case VIRTCHNL_OP_UNKNOWN: default: dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index 3a4115869153..0c629aef9baf 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -13,6 +13,13 @@ /* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ #define ICE_MAX_VLAN_PER_VF 8 +#define ICE_DFLT_QUANTA 1024 +#define ICE_MAX_QUANTA_SIZE 4096 +#define ICE_MIN_QUANTA_SIZE 256 + +#define calc_quanta_desc(x) \ + max_t(u16, 12, min_t(u16, 63, (((x) + 66) / 132) * 2 + 4)) + /* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for * broadcast, and 16 for additional unicast/multicast filters */ @@ -61,6 +68,10 @@ struct ice_virtchnl_ops { int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg); int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*get_qos_caps)(struct ice_vf *vf); + int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg); + int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg); + int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg); }; #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index d796dbd2a440..c105a82ee136 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -84,6 +84,11 @@ static const u32 fdir_pf_allowlist_opcodes[] = { VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER, }; +static const u32 tc_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW, + VIRTCHNL_OP_CONFIG_QUANTA, +}; + struct allowlist_opcode_info { const u32 *opcodes; size_t size; @@ -104,6 +109,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = { ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes), }; /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index f0537826f840..9c1fe84108ed 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -438,7 +438,8 @@ struct idpf_q_vector { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_q_vector, 112, - 424 + 2 * sizeof(struct dim), + 24 + sizeof(struct napi_struct) + + 2 * sizeof(struct dim), 8 + sizeof(cpumask_var_t)); struct idpf_rx_queue_stats { diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index 6e110f28f922..529b7d18b662 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -63,6 +63,5 @@ enum e1000_mng_mode { #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 -void e1000_init_function_pointers_82575(struct e1000_hw *hw); #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index 091cddf4ada8..4f652ab713b3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -7,7 +7,6 @@ s32 igb_acquire_nvm(struct e1000_hw *hw); void igb_release_nvm(struct e1000_hw *hw); s32 igb_read_mac_addr(struct e1000_hw *hw); -s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size); s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 81cf3361a1e5..87c7e6251a4f 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1403,7 +1403,7 @@ static struct platform_driver korina_driver = { .of_match_table = of_match_ptr(korina_match), }, .probe = korina_probe, - .remove_new = korina_remove, + .remove = korina_remove, }; module_platform_driver(korina_driver); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 7179271f63b6..660dff5426e7 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -734,7 +734,7 @@ static void ltq_etop_remove(struct platform_device *pdev) } static struct platform_driver ltq_mii_driver = { - .remove_new = ltq_etop_remove, + .remove = ltq_etop_remove, .driver = { .name = "ltq_etop", }, diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 07904a528f21..b8766fb7a844 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -669,7 +669,7 @@ MODULE_DEVICE_TABLE(of, xrx200_match); static struct platform_driver xrx200_driver = { .probe = xrx200_probe, - .remove_new = xrx200_remove, + .remove = xrx200_remove, .driver = { .name = "lantiq,xrx200-net", .of_match_table = xrx200_match, diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c index ff54fbe41bcc..829a4b828f8e 100644 --- a/drivers/net/ethernet/litex/litex_liteeth.c +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -309,7 +309,7 @@ MODULE_DEVICE_TABLE(of, liteeth_of_match); static struct platform_driver liteeth_driver = { .probe = liteeth_probe, - .remove_new = liteeth_remove, + .remove = liteeth_remove, .driver = { .name = DRV_NAME, .of_match_table = liteeth_of_match, diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 9e80899546d9..a06048719e84 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1698,13 +1698,9 @@ static void mv643xx_eth_get_strings(struct net_device *dev, { int i; - if (stringset == ETH_SS_STATS) { - for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { - memcpy(data + i * ETH_GSTRING_LEN, - mv643xx_eth_stats[i].stat_string, - ETH_GSTRING_LEN); - } - } + if (stringset == ETH_SS_STATS) + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) + ethtool_puts(&data, mv643xx_eth_stats[i].stat_string); } static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, @@ -2843,29 +2839,24 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) struct mv643xx_eth_shared_platform_data *pd; struct mv643xx_eth_shared_private *msp; const struct mbus_dram_target_info *dram; - struct resource *res; int ret; if (!mv643xx_eth_version_printed++) pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", mv643xx_eth_driver_version); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) - return -EINVAL; - msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (msp == NULL) return -ENOMEM; platform_set_drvdata(pdev, msp); - msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (msp->base == NULL) - return -ENOMEM; + msp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(msp->base)) + return PTR_ERR(msp->base); - msp->clk = devm_clk_get(&pdev->dev, NULL); - if (!IS_ERR(msp->clk)) - clk_prepare_enable(msp->clk); + msp->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); + if (IS_ERR(msp->clk)) + return PTR_ERR(msp->clk); /* * (Re-)program MBUS remapping windows if we are asked to. @@ -2876,7 +2867,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_probe(pdev); if (ret) - goto err_put_clk; + return ret; pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? @@ -2884,25 +2875,16 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) infer_hw_params(msp); return 0; - -err_put_clk: - if (!IS_ERR(msp->clk)) - clk_disable_unprepare(msp->clk); - return ret; } static void mv643xx_eth_shared_remove(struct platform_device *pdev) { - struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); - mv643xx_eth_shared_of_remove(); - if (!IS_ERR(msp->clk)) - clk_disable_unprepare(msp->clk); } static struct platform_driver mv643xx_eth_shared_driver = { .probe = mv643xx_eth_shared_probe, - .remove_new = mv643xx_eth_shared_remove, + .remove = mv643xx_eth_shared_remove, .driver = { .name = MV643XX_ETH_SHARED_NAME, .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), @@ -3307,7 +3289,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) static struct platform_driver mv643xx_eth_driver = { .probe = mv643xx_eth_probe, - .remove_new = mv643xx_eth_remove, + .remove = mv643xx_eth_remove, .shutdown = mv643xx_eth_shutdown, .driver = { .name = MV643XX_ETH_NAME, diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index e1d003fdbc2e..3f4447e68888 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -348,13 +348,12 @@ static int orion_mdio_probe(struct platform_device *pdev) if (type == BUS_TYPE_XSMI) orion_mdio_xsmi_set_mdc_freq(bus); } else { - dev->clk[0] = clk_get(&pdev->dev, NULL); - if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; + dev->clk[0] = clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(dev->clk[0])) { + ret = PTR_ERR(dev->clk[0]); goto out_clk; } - if (!IS_ERR(dev->clk[0])) - clk_prepare_enable(dev->clk[0]); + clk_prepare_enable(dev->clk[0]); } @@ -422,8 +421,6 @@ static void orion_mdio_remove(struct platform_device *pdev) mdiobus_unregister(bus); for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { - if (IS_ERR(dev->clk[i])) - break; clk_disable_unprepare(dev->clk[i]); clk_put(dev->clk[i]); } @@ -447,7 +444,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, - .remove_new = orion_mdio_remove, + .remove = orion_mdio_remove, .driver = { .name = "orion-mdio", .of_match_table = orion_mdio_match, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d72b2d5f96db..1fb285fa0bdb 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4795,11 +4795,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, int i; for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) - memcpy(data + i * ETH_GSTRING_LEN, - mvneta_statistics[i].name, ETH_GSTRING_LEN); + ethtool_puts(&data, mvneta_statistics[i].name); if (!pp->bm_priv) { - data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); page_pool_ethtool_stats_get_strings(data); } } @@ -5883,7 +5881,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match); static struct platform_driver mvneta_driver = { .probe = mvneta_probe, - .remove_new = mvneta_remove, + .remove = mvneta_remove, .driver = { .name = MVNETA_DRIVER_NAME, .of_match_table = mvneta_match, diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 3f46a0fed048..6bb380494919 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -485,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match); static struct platform_driver mvneta_bm_driver = { .probe = mvneta_bm_probe, - .remove_new = mvneta_bm_remove, + .remove = mvneta_bm_remove, .driver = { .name = MVNETA_BM_DRIVER_NAME, .of_match_table = mvneta_bm_match, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 3880dcc0418b..103632ba78a2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7774,7 +7774,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); static struct platform_driver mvpp2_driver = { .probe = mvpp2_probe, - .remove_new = mvpp2_remove, + .remove = mvpp2_remove, .driver = { .name = MVPP2_DRIVER_NAME, .of_match_table = mvpp2_match, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 87ba77e5026a..8c700ee4a82b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -663,16 +663,16 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); -static void get_lf_str_list(struct rvu_block block, int pcifunc, +static void get_lf_str_list(const struct rvu_block *block, int pcifunc, char *lfs) { - int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; + int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max; - for_each_set_bit(lf, block.lf.bmap, block.lf.max) { - if (lf >= block.lf.max) + for_each_set_bit(lf, block->lf.bmap, block->lf.max) { + if (lf >= block->lf.max) break; - if (block.fn_map[lf] != pcifunc) + if (block->fn_map[lf] != pcifunc) continue; if (lf == prev_lf + 1) { @@ -719,7 +719,7 @@ static int get_max_column_width(struct rvu *rvu) if (!strlen(block.name)) continue; - get_lf_str_list(block, pcifunc, buf); + get_lf_str_list(&block, pcifunc, buf); if (lf_str_size <= strlen(buf)) lf_str_size = strlen(buf) + 1; } @@ -803,7 +803,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, continue; len = 0; lfs[len] = '\0'; - get_lf_str_list(block, pcifunc, lfs); + get_lf_str_list(&block, pcifunc, lfs); if (strlen(lfs)) flag = 1; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index c1c99d7054f8..7417087b6db5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -203,6 +203,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf) rsp = (struct nix_bandprof_alloc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } + if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) { rc = -EIO; goto out; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 87d5776e3b88..7510a918d942 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1837,6 +1837,10 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf) if (!rc) { rsp = (struct nix_hw_info *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } /* HW counts VLAN insertion bytes (8 for double tag) * irrespective of whether SQE is requesting to insert VLAN diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index aa01110f04a3..294fba58b670 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -315,6 +315,11 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pfc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto unlock; + } + if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) { dev_warn(pfvf->dev, "Failed to config PFC\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index 80d853b343f9..2046dd0da00d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -28,6 +28,11 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, if (!err) { rsp = (struct cgx_mac_addr_add_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pf->mbox.lock); + return PTR_ERR(rsp); + } + *dmac_index = rsp->index; } @@ -200,6 +205,10 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) rsp = (struct cgx_mac_addr_update_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 32468c663605..5197ce816581 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -343,6 +343,11 @@ static void otx2_get_pauseparam(struct net_device *netdev, if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pause_frm_cfg *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pfvf->mbox.lock); + return; + } + pause->rx_pause = rsp->rx_pause; pause->tx_pause = rsp->tx_pause; } @@ -1072,6 +1077,11 @@ static int otx2_set_fecparam(struct net_device *netdev, rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto end; + } + if (rsp->fec >= 0) pfvf->linfo.fec = rsp->fec; else diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 98c31a16c70b..58720a161ee2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -119,6 +119,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + goto exit; for (ent = 0; ent < rsp->count; ent++) flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; @@ -197,6 +199,10 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pfvf->mbox.lock); + return PTR_ERR(rsp); + } if (rsp->count != req->count) { netdev_info(pfvf->netdev, @@ -232,6 +238,10 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf) frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &freq->hdr); + if (IS_ERR(frsp)) { + mutex_unlock(&pfvf->mbox.lock); + return PTR_ERR(frsp); + } if (frsp->enable) { pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 1a59c952aa01..fe38426ec42d 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1579,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match); static struct platform_driver pxa168_eth_driver = { .probe = pxa168_eth_probe, - .remove_new = pxa168_eth_remove, + .remove = pxa168_eth_remove, .shutdown = pxa168_eth_shutdown, .resume = pxa168_eth_resume, .suspend = pxa168_eth_suspend, diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 2c26eb185283..f463a505f5ba 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -554,7 +554,7 @@ #define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0) #define REG_EGRESS_RATE_METER_CFG 0x100c -#define EGRESS_RATE_METER_EN_MASK BIT(29) +#define EGRESS_RATE_METER_EN_MASK BIT(31) #define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17) #define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12) #define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0) @@ -1116,17 +1116,23 @@ static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); } +static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth) +{ + u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); + + return FIELD_GET(PSE_ALLRSV_MASK, val); +} + static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, u32 port, u32 queue, u32 val) { - u32 orig_val, tmp, all_rsv, fq_limit; + u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); + u32 tmp, all_rsv, fq_limit; airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); /* modify all rsv */ - orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); - tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); - all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp); + all_rsv = airoha_fe_get_pse_all_rsv(eth); all_rsv += (val - orig_val); airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); @@ -1166,11 +1172,13 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth) [FE_PSE_PORT_GDM4] = 2, [FE_PSE_PORT_CDM5] = 2, }; + u32 all_rsv; int q; + all_rsv = airoha_fe_get_pse_all_rsv(eth); /* hw misses PPE2 oq rsv */ - airoha_fe_set(eth, REG_FE_PSE_BUF_SET, - PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]); + all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]; + airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); /* CMD1 */ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) @@ -1363,7 +1371,8 @@ static int airoha_fe_init(struct airoha_eth *eth) airoha_fe_set(eth, REG_GDM_MISC_CFG, GDM2_RDM_ACK_WAIT_PREF_MASK | GDM2_CHN_VLD_MODE_MASK); - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15); + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, + FIELD_PREP(CDM2_OAM_QSEL_MASK, 15)); /* init fragment and assemble Force Port */ /* NPU Core-3, NPU Bridge Channel-3 */ @@ -1701,9 +1710,11 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) WRITE_ONCE(desc->msg1, 0); if (skb) { + u16 queue = skb_get_queue_mapping(skb); struct netdev_queue *txq; - txq = netdev_get_tx_queue(skb->dev, qid); + txq = netdev_get_tx_queue(skb->dev, queue); + netdev_tx_completed_queue(txq, 1, skb->len); if (netif_tx_queue_stopped(txq) && q->ndesc - q->queued >= q->free_thr) netif_tx_wake_queue(txq); @@ -2331,7 +2342,7 @@ static int airoha_dev_stop(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); struct airoha_qdma *qdma = port->qdma; - int err; + int i, err; netif_tx_disable(dev); err = airoha_set_gdm_ports(qdma->eth, false); @@ -2342,6 +2353,14 @@ static int airoha_dev_stop(struct net_device *dev) GLOBAL_CFG_TX_DMA_EN_MASK | GLOBAL_CFG_RX_DMA_EN_MASK); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + if (!qdma->q_tx[i].ndesc) + continue; + + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); + netdev_tx_reset_subqueue(dev, i); + } + return 0; } @@ -2479,7 +2498,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, q->queued += i; skb_tx_timestamp(skb); - if (!netdev_xmit_more()) + netdev_tx_sent_queue(txq, skb->len); + + if (netif_xmit_stopped(txq) || !netdev_xmit_more()) airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); @@ -2780,7 +2801,7 @@ MODULE_DEVICE_TABLE(of, of_airoha_match); static struct platform_driver airoha_driver = { .probe = airoha_probe, - .remove_new = airoha_remove, + .remove = airoha_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = of_airoha_match, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index ed7313c10a05..f01ceee5f02d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -4329,10 +4329,8 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) case ETH_SS_STATS: { struct mtk_mac *mac = netdev_priv(dev); - for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { - memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) + ethtool_puts(&data, mtk_ethtool_stats[i].str); if (mtk_page_pool_enabled(mac->hw)) page_pool_ethtool_stats_get_strings(data); break; @@ -5358,7 +5356,7 @@ MODULE_DEVICE_TABLE(of, of_mtk_match); static struct platform_driver mtk_driver = { .probe = mtk_probe, - .remove_new = mtk_remove, + .remove = mtk_remove, .driver = { .name = "mtk_soc_eth", .of_match_table = of_mtk_match, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 461cc2c79c71..0e92956e84cf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -156,7 +156,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, break; case RX: cq->mcq.comp = mlx4_en_rx_irq; - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq); + netif_napi_add_config(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, + cq_idx); netif_napi_set_irq(&cq->napi, irq); napi_enable(&cq->napi); netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c index 1c062a2e8996..45737d039252 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c @@ -318,7 +318,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, } actions[num_actions++] = smfs_rule->count_action; - actions[num_actions++] = attr->modify_hdr->action.dr_action; + actions[num_actions++] = attr->modify_hdr->fs_dr_action.dr_action; actions[num_actions++] = fs_smfs->fwd_action; nat = (attr->ft == fs_smfs->ct_nat); @@ -379,7 +379,7 @@ static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_ struct mlx5dr_rule *rule; actions[0] = smfs_rule->count_action; - actions[1] = attr->modify_hdr->action.dr_action; + actions[1] = attr->modify_hdr->fs_dr_action.dr_action; actions[2] = fs_smfs->fwd_action; rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index dcfccaaa8d91..4877a9d86807 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1026,7 +1026,7 @@ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv) return ERR_PTR(-ENOMEM); counter->is_shared = false; - counter->counter = mlx5_fc_create_ex(ct_priv->dev, true); + counter->counter = mlx5_fc_create(ct_priv->dev, true); if (IS_ERR(counter->counter)) { ct_dbg("Failed to create counter for ct entry"); ret = PTR_ERR(counter->counter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e601324a690a..ce94859014f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2697,7 +2697,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->aff_mask = irq_get_effective_affinity_mask(irq); c->lag_port = mlx5e_enumerate_lag_port(mdev, ix); - netif_napi_add(netdev, &c->napi, mlx5e_napi_poll); + netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix); netif_napi_set_irq(&c->napi, irq); err = mlx5e_open_queues(c, params, cparam); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index f8869c9b6802..d0f38818363f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -187,7 +187,7 @@ rate_err: return err; } -void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport) { struct mlx5_devlink_port *dl_port; @@ -195,7 +195,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct return; dl_port = vport->dl_port; - mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); + mlx5_esw_qos_vport_update_node(vport, NULL, NULL); devl_rate_leaf_destroy(&dl_port->dl_port); devl_port_unregister(&dl_port->dl_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h index 1ce332f21ebe..43550a416a6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h @@ -9,107 +9,111 @@ #include <linux/tracepoint.h> #include "eswitch.h" +#include "qos.h" TRACE_EVENT(mlx5_esw_vport_qos_destroy, - TP_PROTO(const struct mlx5_vport *vport), - TP_ARGS(vport), - TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device)) + TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport), + TP_ARGS(dev, vport), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) __field(unsigned short, vport_id) - __field(unsigned int, tsar_ix) + __field(unsigned int, sched_elem_ix) ), TP_fast_assign(__assign_str(devname); __entry->vport_id = vport->vport; - __entry->tsar_ix = vport->qos.esw_tsar_ix; + __entry->sched_elem_ix = mlx5_esw_qos_vport_get_sched_elem_ix(vport); ), - TP_printk("(%s) vport=%hu tsar_ix=%u\n", - __get_str(devname), __entry->vport_id, __entry->tsar_ix + TP_printk("(%s) vport=%hu sched_elem_ix=%u\n", + __get_str(devname), __entry->vport_id, __entry->sched_elem_ix ) ); DECLARE_EVENT_CLASS(mlx5_esw_vport_qos_template, - TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), - TP_ARGS(vport, bw_share, max_rate), - TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device)) + TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport, + u32 bw_share, u32 max_rate), + TP_ARGS(dev, vport, bw_share, max_rate), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) __field(unsigned short, vport_id) - __field(unsigned int, tsar_ix) + __field(unsigned int, sched_elem_ix) __field(unsigned int, bw_share) __field(unsigned int, max_rate) - __field(void *, group) + __field(void *, parent) ), TP_fast_assign(__assign_str(devname); __entry->vport_id = vport->vport; - __entry->tsar_ix = vport->qos.esw_tsar_ix; + __entry->sched_elem_ix = mlx5_esw_qos_vport_get_sched_elem_ix(vport); __entry->bw_share = bw_share; __entry->max_rate = max_rate; - __entry->group = vport->qos.group; + __entry->parent = mlx5_esw_qos_vport_get_parent(vport); ), - TP_printk("(%s) vport=%hu tsar_ix=%u bw_share=%u, max_rate=%u group=%p\n", - __get_str(devname), __entry->vport_id, __entry->tsar_ix, - __entry->bw_share, __entry->max_rate, __entry->group + TP_printk("(%s) vport=%hu sched_elem_ix=%u bw_share=%u, max_rate=%u parent=%p\n", + __get_str(devname), __entry->vport_id, __entry->sched_elem_ix, + __entry->bw_share, __entry->max_rate, __entry->parent ) ); DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_create, - TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), - TP_ARGS(vport, bw_share, max_rate) + TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport, + u32 bw_share, u32 max_rate), + TP_ARGS(dev, vport, bw_share, max_rate) ); DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_config, - TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), - TP_ARGS(vport, bw_share, max_rate) + TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport, + u32 bw_share, u32 max_rate), + TP_ARGS(dev, vport, bw_share, max_rate) ); -DECLARE_EVENT_CLASS(mlx5_esw_group_qos_template, +DECLARE_EVENT_CLASS(mlx5_esw_node_qos_template, TP_PROTO(const struct mlx5_core_dev *dev, - const struct mlx5_esw_rate_group *group, + const struct mlx5_esw_sched_node *node, unsigned int tsar_ix), - TP_ARGS(dev, group, tsar_ix), + TP_ARGS(dev, node, tsar_ix), TP_STRUCT__entry(__string(devname, dev_name(dev->device)) - __field(const void *, group) + __field(const void *, node) __field(unsigned int, tsar_ix) ), TP_fast_assign(__assign_str(devname); - __entry->group = group; + __entry->node = node; __entry->tsar_ix = tsar_ix; ), - TP_printk("(%s) group=%p tsar_ix=%u\n", - __get_str(devname), __entry->group, __entry->tsar_ix + TP_printk("(%s) node=%p tsar_ix=%u\n", + __get_str(devname), __entry->node, __entry->tsar_ix ) ); -DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_create, +DEFINE_EVENT(mlx5_esw_node_qos_template, mlx5_esw_node_qos_create, TP_PROTO(const struct mlx5_core_dev *dev, - const struct mlx5_esw_rate_group *group, + const struct mlx5_esw_sched_node *node, unsigned int tsar_ix), - TP_ARGS(dev, group, tsar_ix) + TP_ARGS(dev, node, tsar_ix) ); -DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_destroy, +DEFINE_EVENT(mlx5_esw_node_qos_template, mlx5_esw_node_qos_destroy, TP_PROTO(const struct mlx5_core_dev *dev, - const struct mlx5_esw_rate_group *group, + const struct mlx5_esw_sched_node *node, unsigned int tsar_ix), - TP_ARGS(dev, group, tsar_ix) + TP_ARGS(dev, node, tsar_ix) ); -TRACE_EVENT(mlx5_esw_group_qos_config, +TRACE_EVENT(mlx5_esw_node_qos_config, TP_PROTO(const struct mlx5_core_dev *dev, - const struct mlx5_esw_rate_group *group, + const struct mlx5_esw_sched_node *node, unsigned int tsar_ix, u32 bw_share, u32 max_rate), - TP_ARGS(dev, group, tsar_ix, bw_share, max_rate), + TP_ARGS(dev, node, tsar_ix, bw_share, max_rate), TP_STRUCT__entry(__string(devname, dev_name(dev->device)) - __field(const void *, group) + __field(const void *, node) __field(unsigned int, tsar_ix) __field(unsigned int, bw_share) __field(unsigned int, max_rate) ), TP_fast_assign(__assign_str(devname); - __entry->group = group; + __entry->node = node; __entry->tsar_ix = tsar_ix; __entry->bw_share = bw_share; __entry->max_rate = max_rate; ), - TP_printk("(%s) group=%p tsar_ix=%u bw_share=%u max_rate=%u\n", - __get_str(devname), __entry->group, __entry->tsar_ix, + TP_printk("(%s) node=%p tsar_ix=%u bw_share=%u max_rate=%u\n", + __get_str(devname), __entry->node, __entry->tsar_ix, __entry->bw_share, __entry->max_rate ) ); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 8587cd572da5..45183de424f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -176,20 +176,10 @@ static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) static int esw_create_legacy_table(struct mlx5_eswitch *esw) { - int err; - memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); atomic64_set(&esw->user_count, 0); - err = esw_create_legacy_vepa_table(esw); - if (err) - return err; - - err = esw_create_legacy_fdb_table(esw); - if (err) - esw_destroy_legacy_vepa_table(esw); - - return err; + return esw_create_legacy_fdb_table(esw); } static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) @@ -259,15 +249,22 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, if (!setting) { esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_vepa_table(esw); return 0; } if (esw->fdb_table.legacy.vepa_uplink_rule) return 0; + err = esw_create_legacy_vepa_table(esw); + if (err) + return err; + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; + if (!spec) { + err = -ENOMEM; + goto out; + } /* Uplink rule forward uplink traffic to FDB */ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); @@ -303,8 +300,10 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, out: kvfree(spec); - if (err) + if (err) { esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_vepa_table(esw); + } return err; } @@ -513,15 +512,11 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - int err; if (!mlx5_esw_allowed(esw)) return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); - mutex_lock(&esw->state_lock); - err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate); - mutex_unlock(&esw->state_lock); - return err; + return mlx5_esw_qos_set_vport_rate(evport, max_rate, min_rate); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 02a3563f51ad..7e7f99b38a37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -11,181 +11,247 @@ /* Minimum supported BW share value by the HW is 1 Mbit/sec */ #define MLX5_MIN_BW_SHARE 1 -#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ - min_t(u32, max_t(u32, DIV_ROUND_UP(rate, divider), MLX5_MIN_BW_SHARE), limit) +/* Holds rate nodes associated with an E-Switch. */ +struct mlx5_qos_domain { + /* Serializes access to all qos changes in the qos domain. */ + struct mutex lock; + /* List of all mlx5_esw_sched_nodes. */ + struct list_head nodes; +}; -struct mlx5_esw_rate_group { - u32 tsar_ix; +static void esw_qos_lock(struct mlx5_eswitch *esw) +{ + mutex_lock(&esw->qos.domain->lock); +} + +static void esw_qos_unlock(struct mlx5_eswitch *esw) +{ + mutex_unlock(&esw->qos.domain->lock); +} + +static void esw_assert_qos_lock_held(struct mlx5_eswitch *esw) +{ + lockdep_assert_held(&esw->qos.domain->lock); +} + +static struct mlx5_qos_domain *esw_qos_domain_alloc(void) +{ + struct mlx5_qos_domain *qos_domain; + + qos_domain = kzalloc(sizeof(*qos_domain), GFP_KERNEL); + if (!qos_domain) + return NULL; + + mutex_init(&qos_domain->lock); + INIT_LIST_HEAD(&qos_domain->nodes); + + return qos_domain; +} + +static int esw_qos_domain_init(struct mlx5_eswitch *esw) +{ + esw->qos.domain = esw_qos_domain_alloc(); + + return esw->qos.domain ? 0 : -ENOMEM; +} + +static void esw_qos_domain_release(struct mlx5_eswitch *esw) +{ + kfree(esw->qos.domain); + esw->qos.domain = NULL; +} + +enum sched_node_type { + SCHED_NODE_TYPE_VPORTS_TSAR, + SCHED_NODE_TYPE_VPORT, +}; + +static const char * const sched_node_type_str[] = { + [SCHED_NODE_TYPE_VPORTS_TSAR] = "vports TSAR", + [SCHED_NODE_TYPE_VPORT] = "vport", +}; + +struct mlx5_esw_sched_node { + u32 ix; + /* Bandwidth parameters. */ u32 max_rate; u32 min_rate; + /* A computed value indicating relative min_rate between node's children. */ u32 bw_share; - struct list_head list; + /* The parent node in the rate hierarchy. */ + struct mlx5_esw_sched_node *parent; + /* Entry in the parent node's children list. */ + struct list_head entry; + /* The type of this node in the rate hierarchy. */ + enum sched_node_type type; + /* The eswitch this node belongs to. */ + struct mlx5_eswitch *esw; + /* The children nodes of this node, empty list for leaf nodes. */ + struct list_head children; + /* Valid only if this node is associated with a vport. */ + struct mlx5_vport *vport; }; -static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx, - u32 tsar_ix, u32 max_rate, u32 bw_share) +static void +esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent) { - u32 bitmask = 0; - - if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return -EOPNOTSUPP; + list_del_init(&node->entry); + node->parent = parent; + list_add_tail(&node->entry, &parent->children); + node->esw = parent->esw; +} - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; +u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport) +{ + if (!vport->qos.sched_node) + return 0; - return mlx5_modify_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - tsar_ix, - bitmask); + return vport->qos.sched_node->ix; } -static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) +struct mlx5_esw_sched_node * +mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport) { - u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_core_dev *dev = esw->dev; - int err; + if (!vport->qos.sched_node) + return NULL; - err = esw_qos_tsar_config(dev, sched_ctx, - group->tsar_ix, - max_rate, bw_share); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed"); + return vport->qos.sched_node->parent; +} - trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate); +static void esw_qos_sched_elem_config_warn(struct mlx5_esw_sched_node *node, int err) +{ + if (node->vport) { + esw_warn(node->esw->dev, + "E-Switch modify %s scheduling element failed (vport=%d,err=%d)\n", + sched_node_type_str[node->type], node->vport->vport, err); + return; + } - return err; + esw_warn(node->esw->dev, + "E-Switch modify %s scheduling element failed (err=%d)\n", + sched_node_type_str[node->type], err); } -static int esw_qos_vport_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u32 max_rate, u32 bw_share, - struct netlink_ext_ack *extack) +static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share, + struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_core_dev *dev = esw->dev; + struct mlx5_core_dev *dev = node->esw->dev; + u32 bitmask = 0; int err; - if (!vport->qos.enabled) - return -EIO; + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return -EOPNOTSUPP; - err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix, - max_rate, bw_share); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + + err = mlx5_modify_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + sched_ctx, + node->ix, + bitmask); if (err) { - esw_warn(esw->dev, - "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", - vport->vport, err); - NL_SET_ERR_MSG_MOD(extack, "E-Switch modify TSAR vport element failed"); + esw_qos_sched_elem_config_warn(node, err); + NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed"); + return err; } - trace_mlx5_esw_vport_qos_config(vport, bw_share, max_rate); + if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR) + trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate); + else if (node->type == SCHED_NODE_TYPE_VPORT) + trace_mlx5_esw_vport_qos_config(dev, node->vport, bw_share, max_rate); return 0; } static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw, - struct mlx5_esw_rate_group *group, - bool group_level) + struct mlx5_esw_sched_node *parent) { + struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes; u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - struct mlx5_vport *evport; + struct mlx5_esw_sched_node *node; u32 max_guarantee = 0; - unsigned long i; - - if (group_level) { - struct mlx5_esw_rate_group *group; - list_for_each_entry(group, &esw->qos.groups, list) { - if (group->min_rate < max_guarantee) - continue; - max_guarantee = group->min_rate; - } - } else { - mlx5_esw_for_each_vport(esw, i, evport) { - if (!evport->enabled || !evport->qos.enabled || - evport->qos.group != group || evport->qos.min_rate < max_guarantee) - continue; - max_guarantee = evport->qos.min_rate; - } + /* Find max min_rate across all nodes. + * This will correspond to fw_max_bw_share in the final bw_share calculation. + */ + list_for_each_entry(node, nodes, entry) { + if (node->esw == esw && node->ix != esw->qos.root_tsar_ix && + node->min_rate > max_guarantee) + max_guarantee = node->min_rate; } if (max_guarantee) return max_t(u32, max_guarantee / fw_max_bw_share, 1); - /* If vports min rate divider is 0 but their group has bw_share configured, then - * need to set bw_share for vports to minimal value. + /* If nodes max min_rate divider is 0 but their parent has bw_share + * configured, then set bw_share for nodes to minimal value. */ - if (!group_level && !max_guarantee && group && group->bw_share) + + if (parent && parent->bw_share) return 1; + + /* If the node nodes has min_rate configured, a divider of 0 sets all + * nodes' bw_share to 0, effectively disabling min guarantees. + */ return 0; } static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max) { - if (divider) - return MLX5_RATE_TO_BW_SHARE(min_rate, divider, fw_max); - - return 0; + if (!divider) + return 0; + return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max); } -static int esw_qos_normalize_vports_min_rate(struct mlx5_eswitch *esw, - struct mlx5_esw_rate_group *group, - struct netlink_ext_ack *extack) +static int esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, + u32 divider, + struct netlink_ext_ack *extack) { - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - u32 divider = esw_qos_calculate_min_rate_divider(esw, group, false); - struct mlx5_vport *evport; - unsigned long i; + u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share); u32 bw_share; int err; - mlx5_esw_for_each_vport(esw, i, evport) { - if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group) - continue; - bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share); + bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share); - if (bw_share == evport->qos.bw_share) - continue; + if (bw_share == node->bw_share) + return 0; - err = esw_qos_vport_config(esw, evport, evport->qos.max_rate, bw_share, extack); - if (err) - return err; + err = esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); + if (err) + return err; - evport->qos.bw_share = bw_share; - } + node->bw_share = bw_share; - return 0; + return err; } -static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divider, - struct netlink_ext_ack *extack) +static int esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - struct mlx5_esw_rate_group *group; - u32 bw_share; - int err; + struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes; + u32 divider = esw_qos_calculate_min_rate_divider(esw, parent); + struct mlx5_esw_sched_node *node; - list_for_each_entry(group, &esw->qos.groups, list) { - bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share); + list_for_each_entry(node, nodes, entry) { + int err; - if (bw_share == group->bw_share) + if (node->esw != esw || node->ix == esw->qos.root_tsar_ix) continue; - err = esw_qos_group_config(esw, group, group->max_rate, bw_share, extack); + err = esw_qos_update_sched_node_bw_share(node, divider, extack); if (err) return err; - group->bw_share = bw_share; - - /* All the group's vports need to be set with default bw_share - * to enable them with QOS - */ - err = esw_qos_normalize_vports_min_rate(esw, group, extack); + if (list_empty(&node->children)) + continue; + err = esw_qos_normalize_min_rate(node->esw, node, extack); if (err) return err; } @@ -193,117 +259,109 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid return 0; } -static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport, +static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate, struct netlink_ext_ack *extack) { + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; u32 fw_max_bw_share, previous_min_rate; bool min_rate_supported; int err; - lockdep_assert_held(&esw->state_lock); - fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + esw_assert_qos_lock_held(vport_node->esw); + fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share); + min_rate_supported = MLX5_CAP_QOS(vport->dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE; if (min_rate && !min_rate_supported) return -EOPNOTSUPP; - if (min_rate == evport->qos.min_rate) + if (min_rate == vport_node->min_rate) return 0; - previous_min_rate = evport->qos.min_rate; - evport->qos.min_rate = min_rate; - err = esw_qos_normalize_vports_min_rate(esw, evport->qos.group, extack); + previous_min_rate = vport_node->min_rate; + vport_node->min_rate = min_rate; + err = esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); if (err) - evport->qos.min_rate = previous_min_rate; + vport_node->min_rate = previous_min_rate; return err; } -static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport, +static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate, struct netlink_ext_ack *extack) { + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; u32 act_max_rate = max_rate; bool max_rate_supported; int err; - lockdep_assert_held(&esw->state_lock); - max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + esw_assert_qos_lock_held(vport_node->esw); + max_rate_supported = MLX5_CAP_QOS(vport->dev, esw_rate_limit); if (max_rate && !max_rate_supported) return -EOPNOTSUPP; - if (max_rate == evport->qos.max_rate) + if (max_rate == vport_node->max_rate) return 0; - /* If parent group has rate limit need to set to group - * value when new max rate is 0. - */ - if (evport->qos.group && !max_rate) - act_max_rate = evport->qos.group->max_rate; - - err = esw_qos_vport_config(esw, evport, act_max_rate, evport->qos.bw_share, extack); + /* Use parent node limit if new max rate is 0. */ + if (!max_rate) + act_max_rate = vport_node->parent->max_rate; + err = esw_qos_sched_elem_config(vport_node, act_max_rate, vport_node->bw_share, extack); if (!err) - evport->qos.max_rate = max_rate; + vport_node->max_rate = max_rate; return err; } -static int esw_qos_set_group_min_rate(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, - u32 min_rate, struct netlink_ext_ack *extack) +static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, + u32 min_rate, struct netlink_ext_ack *extack) { - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - struct mlx5_core_dev *dev = esw->dev; - u32 previous_min_rate, divider; + struct mlx5_eswitch *esw = node->esw; + u32 previous_min_rate; int err; - if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE)) + if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || + MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE) return -EOPNOTSUPP; - if (min_rate == group->min_rate) + if (min_rate == node->min_rate) return 0; - previous_min_rate = group->min_rate; - group->min_rate = min_rate; - divider = esw_qos_calculate_min_rate_divider(esw, group, true); - err = esw_qos_normalize_groups_min_rate(esw, divider, extack); + previous_min_rate = node->min_rate; + node->min_rate = min_rate; + err = esw_qos_normalize_min_rate(esw, NULL, extack); if (err) { - group->min_rate = previous_min_rate; - NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch node min rate setting failed"); /* Attempt restoring previous configuration */ - divider = esw_qos_calculate_min_rate_divider(esw, group, true); - if (esw_qos_normalize_groups_min_rate(esw, divider, extack)) + node->min_rate = previous_min_rate; + if (esw_qos_normalize_min_rate(esw, NULL, extack)) NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed"); } return err; } -static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw, - struct mlx5_esw_rate_group *group, - u32 max_rate, struct netlink_ext_ack *extack) +static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, + u32 max_rate, struct netlink_ext_ack *extack) { - struct mlx5_vport *vport; - unsigned long i; + struct mlx5_esw_sched_node *vport_node; int err; - if (group->max_rate == max_rate) + if (node->max_rate == max_rate) return 0; - err = esw_qos_group_config(esw, group, max_rate, group->bw_share, extack); + err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); if (err) return err; - group->max_rate = max_rate; + node->max_rate = max_rate; - /* Any unlimited vports in the group should be set - * with the value of the group. - */ - mlx5_esw_for_each_vport(esw, i, vport) { - if (!vport->enabled || !vport->qos.enabled || - vport->qos.group != group || vport->qos.max_rate) + /* Any unlimited vports in the node should be set with the value of the node. */ + list_for_each_entry(vport_node, &node->children, entry) { + if (vport_node->max_rate) continue; - err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack); + err = esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); if (err) NL_SET_ERR_MSG_MOD(extack, "E-Switch vport implicit rate limit setting failed"); @@ -312,54 +370,62 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw, return err; } -static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) +static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id, + u32 *tsar_ix) { - switch (type) { - case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_TSAR; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT_TC; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; - } - return false; + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + void *attr; + + if (!mlx5_qos_element_type_supported(dev, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR, + SCHEDULING_HIERARCHY_E_SWITCH) || + !mlx5_qos_tsar_type_supported(dev, + TSAR_ELEMENT_TSAR_TYPE_DWRR, + SCHEDULING_HIERARCHY_E_SWITCH)) + return -EOPNOTSUPP; + + MLX5_SET(scheduling_context, tsar_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); + MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, + parent_element_id); + attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); + MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR); + + return mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + tsar_ctx, + tsar_ix); } -static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u32 max_rate, u32 bw_share) +static int +esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + u32 max_rate, u32 bw_share, u32 *sched_elem_ix) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_esw_rate_group *group = vport->qos.group; - struct mlx5_core_dev *dev = esw->dev; - u32 parent_tsar_ix; - void *vport_elem; + struct mlx5_core_dev *dev = parent->esw->dev; + void *attr; int err; - if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT)) + if (!mlx5_qos_element_type_supported(dev, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT, + SCHEDULING_HIERARCHY_E_SWITCH)) return -EOPNOTSUPP; - parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); - MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_tsar_ix); + attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); + MLX5_SET(vport_element, attr, vport_number, vport->vport); + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->ix); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, sched_ctx, - &vport->qos.esw_tsar_ix); + sched_elem_ix); if (err) { - esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", + esw_warn(dev, + "E-Switch create vport scheduling element failed (vport=%d,err=%d)\n", vport->vport, err); return err; } @@ -367,141 +433,154 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, return 0; } -static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - struct mlx5_esw_rate_group *curr_group, - struct mlx5_esw_rate_group *new_group, - struct netlink_ext_ack *extack) +static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, + struct mlx5_esw_sched_node *curr_node, + struct mlx5_esw_sched_node *new_node, + struct netlink_ext_ack *extack) { + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; u32 max_rate; int err; - err = mlx5_destroy_scheduling_element_cmd(esw->dev, + err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, - vport->qos.esw_tsar_ix); + vport_node->ix); if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR vport element failed"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy vport scheduling element failed"); return err; } - vport->qos.group = new_group; - max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate; - - /* If vport is unlimited, we set the group's value. - * Therefore, if the group is limited it will apply to - * the vport as well and if not, vport will remain unlimited. - */ - err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share); + /* Use new node max rate if vport max rate is unlimited. */ + max_rate = vport_node->max_rate ? vport_node->max_rate : new_node->max_rate; + err = esw_qos_vport_create_sched_element(vport, new_node, max_rate, + vport_node->bw_share, + &vport_node->ix); if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed."); + NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); goto err_sched; } + esw_qos_node_set_parent(vport->qos.sched_node, new_node); + return 0; err_sched: - vport->qos.group = curr_group; - max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate; - if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share)) - esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n", + max_rate = vport_node->max_rate ? vport_node->max_rate : curr_node->max_rate; + if (esw_qos_vport_create_sched_element(vport, curr_node, max_rate, + vport_node->bw_share, + &vport_node->ix)) + esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", vport->vport); return err; } -static int esw_qos_vport_update_group(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - struct mlx5_esw_rate_group *group, - struct netlink_ext_ack *extack) +static int esw_qos_vport_update_node(struct mlx5_vport *vport, + struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack) { - struct mlx5_esw_rate_group *new_group, *curr_group; + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *new_node, *curr_node; int err; - if (!vport->enabled) - return -EINVAL; - - curr_group = vport->qos.group; - new_group = group ?: esw->qos.group0; - if (curr_group == new_group) + esw_assert_qos_lock_held(esw); + curr_node = vport_node->parent; + new_node = node ?: esw->qos.node0; + if (curr_node == new_node) return 0; - err = esw_qos_update_group_scheduling_element(esw, vport, curr_group, new_group, extack); + err = esw_qos_update_node_scheduling_element(vport, curr_node, new_node, extack); if (err) return err; - /* Recalculate bw share weights of old and new groups */ - if (vport->qos.bw_share || new_group->bw_share) { - esw_qos_normalize_vports_min_rate(esw, curr_group, extack); - esw_qos_normalize_vports_min_rate(esw, new_group, extack); + /* Recalculate bw share weights of old and new nodes */ + if (vport_node->bw_share || new_node->bw_share) { + esw_qos_normalize_min_rate(curr_node->esw, curr_node, extack); + esw_qos_normalize_min_rate(new_node->esw, new_node, extack); } return 0; } -static struct mlx5_esw_rate_group * -__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) +static struct mlx5_esw_sched_node * +__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type, + struct mlx5_esw_sched_node *parent) { - u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_esw_rate_group *group; - __be32 *attr; - u32 divider; - int err; - - group = kzalloc(sizeof(*group), GFP_KERNEL); - if (!group) - return ERR_PTR(-ENOMEM); + struct list_head *parent_children; + struct mlx5_esw_sched_node *node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return NULL; + + node->esw = esw; + node->ix = tsar_ix; + node->type = type; + node->parent = parent; + INIT_LIST_HEAD(&node->children); + parent_children = parent ? &parent->children : &esw->qos.domain->nodes; + list_add_tail(&node->entry, parent_children); + + return node; +} - MLX5_SET(scheduling_context, tsar_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); +static void __esw_qos_free_node(struct mlx5_esw_sched_node *node) +{ + list_del(&node->entry); + kfree(node); +} - attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); - *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); +static struct mlx5_esw_sched_node * +__esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *node; + u32 tsar_ix; + int err; - MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, - esw->qos.root_tsar_ix); - err = mlx5_create_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - tsar_ctx, - &group->tsar_ix); + err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix); if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed"); - goto err_sched_elem; + NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed"); + return ERR_PTR(err); } - list_add_tail(&group->list, &esw->qos.groups); + node = __esw_qos_alloc_node(esw, tsar_ix, SCHED_NODE_TYPE_VPORTS_TSAR, parent); + if (!node) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc node failed"); + err = -ENOMEM; + goto err_alloc_node; + } - divider = esw_qos_calculate_min_rate_divider(esw, group, true); - if (divider) { - err = esw_qos_normalize_groups_min_rate(esw, divider, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed"); - goto err_min_rate; - } + err = esw_qos_normalize_min_rate(esw, NULL, extack); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); + goto err_min_rate; } - trace_mlx5_esw_group_qos_create(esw->dev, group, group->tsar_ix); + trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix); - return group; + return node; err_min_rate: - list_del(&group->list); + __esw_qos_free_node(node); +err_alloc_node: if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, - group->tsar_ix)) - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed"); -err_sched_elem: - kfree(group); + tsar_ix)) + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for node failed"); return ERR_PTR(err); } static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack); static void esw_qos_put(struct mlx5_eswitch *esw); -static struct mlx5_esw_rate_group * -esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) +static struct mlx5_esw_sched_node * +esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { - struct mlx5_esw_rate_group *group; + struct mlx5_esw_sched_node *node; int err; + esw_assert_qos_lock_held(esw); if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth)) return ERR_PTR(-EOPNOTSUPP); @@ -509,96 +588,71 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta if (err) return ERR_PTR(err); - group = __esw_qos_create_rate_group(esw, extack); - if (IS_ERR(group)) + node = __esw_qos_create_vports_sched_node(esw, NULL, extack); + if (IS_ERR(node)) esw_qos_put(esw); - return group; + return node; } -static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, - struct mlx5_esw_rate_group *group, - struct netlink_ext_ack *extack) +static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) { - u32 divider; + struct mlx5_eswitch *esw = node->esw; int err; - list_del(&group->list); - - divider = esw_qos_calculate_min_rate_divider(esw, NULL, true); - err = esw_qos_normalize_groups_min_rate(esw, divider, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch groups' normalization failed"); + trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix); err = mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, - group->tsar_ix); + node->ix); if (err) NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); + __esw_qos_free_node(node); - trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix); - - kfree(group); - - return err; -} - -static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, - struct mlx5_esw_rate_group *group, - struct netlink_ext_ack *extack) -{ - int err; + err = esw_qos_normalize_min_rate(esw, NULL, extack); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); - err = __esw_qos_destroy_rate_group(esw, group, extack); - esw_qos_put(esw); return err; } static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { - u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_core_dev *dev = esw->dev; - __be32 *attr; int err; if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) return -EOPNOTSUPP; - if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) || - !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR)) - return -EOPNOTSUPP; - - MLX5_SET(scheduling_context, tsar_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); - - attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); - *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); - - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - tsar_ctx, - &esw->qos.root_tsar_ix); + err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix); if (err) { esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err); return err; } - INIT_LIST_HEAD(&esw->qos.groups); if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) { - esw->qos.group0 = __esw_qos_create_rate_group(esw, extack); - if (IS_ERR(esw->qos.group0)) { - esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n", - PTR_ERR(esw->qos.group0)); - err = PTR_ERR(esw->qos.group0); - goto err_group0; - } + esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack); + } else { + /* The eswitch doesn't support scheduling nodes. + * Create a software-only node0 using the root TSAR to attach vport QoS to. + */ + if (!__esw_qos_alloc_node(esw, + esw->qos.root_tsar_ix, + SCHED_NODE_TYPE_VPORTS_TSAR, + NULL)) + esw->qos.node0 = ERR_PTR(-ENOMEM); + } + if (IS_ERR(esw->qos.node0)) { + err = PTR_ERR(esw->qos.node0); + esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err); + goto err_node0; } refcount_set(&esw->qos.refcnt, 1); return 0; -err_group0: +err_node0: if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, esw->qos.root_tsar_ix)) esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n"); @@ -610,8 +664,11 @@ static void esw_qos_destroy(struct mlx5_eswitch *esw) { int err; - if (esw->qos.group0) - __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL); + if (esw->qos.node0->ix != esw->qos.root_tsar_ix) + __esw_qos_destroy_node(esw->qos.node0, NULL); + else + __esw_qos_free_node(esw->qos.node0); + esw->qos.node0 = NULL; err = mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -624,8 +681,7 @@ static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { int err = 0; - lockdep_assert_held(&esw->state_lock); - + esw_assert_qos_lock_held(esw); if (!refcount_inc_not_zero(&esw->qos.refcnt)) { /* esw_qos_create() set refcount to 1 only on success. * No need to decrement on failure. @@ -638,81 +694,121 @@ static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) static void esw_qos_put(struct mlx5_eswitch *esw) { - lockdep_assert_held(&esw->state_lock); + esw_assert_qos_lock_held(esw); if (refcount_dec_and_test(&esw->qos.refcnt)) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, +static int esw_qos_vport_enable(struct mlx5_vport *vport, u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) { + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + u32 sched_elem_ix; int err; - lockdep_assert_held(&esw->state_lock); - if (vport->qos.enabled) + esw_assert_qos_lock_held(esw); + if (vport->qos.sched_node) return 0; err = esw_qos_get(esw, extack); if (err) return err; - vport->qos.group = esw->qos.group0; - - err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share); + err = esw_qos_vport_create_sched_element(vport, esw->qos.node0, max_rate, bw_share, + &sched_elem_ix); if (err) goto err_out; - vport->qos.enabled = true; - trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate); + vport->qos.sched_node = __esw_qos_alloc_node(esw, sched_elem_ix, SCHED_NODE_TYPE_VPORT, + esw->qos.node0); + if (!vport->qos.sched_node) { + err = -ENOMEM; + goto err_alloc; + } + + vport->qos.sched_node->vport = vport; + + trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); return 0; +err_alloc: + if (mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, sched_elem_ix)) + esw_warn(esw->dev, "E-Switch destroy vport scheduling element failed.\n"); err_out: esw_qos_put(esw); return err; } -void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) { + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *vport_node; + struct mlx5_core_dev *dev; int err; lockdep_assert_held(&esw->state_lock); - if (!vport->qos.enabled) - return; - WARN(vport->qos.group && vport->qos.group != esw->qos.group0, - "Disabling QoS on port before detaching it from group"); + esw_qos_lock(esw); + vport_node = vport->qos.sched_node; + if (!vport_node) + goto unlock; + WARN(vport_node->parent != esw->qos.node0, + "Disabling QoS on port before detaching it from node"); - err = mlx5_destroy_scheduling_element_cmd(esw->dev, + dev = vport_node->esw->dev; + trace_mlx5_esw_vport_qos_destroy(dev, vport); + + err = mlx5_destroy_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, - vport->qos.esw_tsar_ix); + vport_node->ix); if (err) - esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", + esw_warn(dev, + "E-Switch destroy vport scheduling element failed (vport=%d,err=%d)\n", vport->vport, err); + __esw_qos_free_node(vport_node); memset(&vport->qos, 0, sizeof(vport->qos)); - trace_mlx5_esw_vport_qos_destroy(vport); esw_qos_put(esw); +unlock: + esw_qos_unlock(esw); } -int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - u32 max_rate, u32 min_rate) +int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate) { + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err; - lockdep_assert_held(&esw->state_lock); - err = esw_qos_vport_enable(esw, vport, 0, 0, NULL); + esw_qos_lock(esw); + err = esw_qos_vport_enable(vport, 0, 0, NULL); if (err) - return err; + goto unlock; - err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL); + err = esw_qos_set_vport_min_rate(vport, min_rate, NULL); if (!err) - err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL); - + err = esw_qos_set_vport_max_rate(vport, max_rate, NULL); +unlock: + esw_qos_unlock(esw); return err; } +bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate) +{ + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + bool enabled; + + esw_qos_lock(esw); + enabled = !!vport->qos.sched_node; + if (enabled) { + *max_rate = vport->qos.sched_node->max_rate; + *min_rate = vport->qos.sched_node->min_rate; + } + esw_qos_unlock(esw); + return enabled; +} + static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) { struct ethtool_link_ksettings lksettings; @@ -800,21 +896,22 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 return err; } - mutex_lock(&esw->state_lock); - if (!vport->qos.enabled) { + esw_qos_lock(esw); + if (!vport->qos.sched_node) { /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ - err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL); + err = esw_qos_vport_enable(vport, rate_mbps, 0, NULL); } else { - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); + struct mlx5_core_dev *dev = vport->qos.sched_node->parent->esw->dev; + MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - err = mlx5_modify_scheduling_element_cmd(esw->dev, + err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx, - vport->qos.esw_tsar_ix, + vport->qos.sched_node->ix, bitmask); } - mutex_unlock(&esw->state_lock); + esw_qos_unlock(esw); return err; } @@ -852,6 +949,17 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char * return 0; } +int mlx5_esw_qos_init(struct mlx5_eswitch *esw) +{ + return esw_qos_domain_init(esw); +} + +void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw) +{ + if (esw->qos.domain) + esw_qos_domain_release(esw); +} + /* Eswitch devlink rate API */ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, @@ -869,14 +977,14 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void if (err) return err; - mutex_lock(&esw->state_lock); - err = esw_qos_vport_enable(esw, vport, 0, 0, extack); + esw_qos_lock(esw); + err = esw_qos_vport_enable(vport, 0, 0, extack); if (err) goto unlock; - err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack); + err = esw_qos_set_vport_min_rate(vport, tx_share, extack); unlock: - mutex_unlock(&esw->state_lock); + esw_qos_unlock(esw); return err; } @@ -895,57 +1003,55 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * if (err) return err; - mutex_lock(&esw->state_lock); - err = esw_qos_vport_enable(esw, vport, 0, 0, extack); + esw_qos_lock(esw); + err = esw_qos_vport_enable(vport, 0, 0, extack); if (err) goto unlock; - err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack); + err = esw_qos_set_vport_max_rate(vport, tx_max, extack); unlock: - mutex_unlock(&esw->state_lock); + esw_qos_unlock(esw); return err; } int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, u64 tx_share, struct netlink_ext_ack *extack) { - struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; - struct mlx5_esw_rate_group *group = priv; + struct mlx5_esw_sched_node *node = priv; + struct mlx5_eswitch *esw = node->esw; int err; - err = esw_qos_devlink_rate_to_mbps(dev, "tx_share", &tx_share, extack); + err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_share", &tx_share, extack); if (err) return err; - mutex_lock(&esw->state_lock); - err = esw_qos_set_group_min_rate(esw, group, tx_share, extack); - mutex_unlock(&esw->state_lock); + esw_qos_lock(esw); + err = esw_qos_set_node_min_rate(node, tx_share, extack); + esw_qos_unlock(esw); return err; } int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, u64 tx_max, struct netlink_ext_ack *extack) { - struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; - struct mlx5_esw_rate_group *group = priv; + struct mlx5_esw_sched_node *node = priv; + struct mlx5_eswitch *esw = node->esw; int err; - err = esw_qos_devlink_rate_to_mbps(dev, "tx_max", &tx_max, extack); + err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_max", &tx_max, extack); if (err) return err; - mutex_lock(&esw->state_lock); - err = esw_qos_set_group_max_rate(esw, group, tx_max, extack); - mutex_unlock(&esw->state_lock); + esw_qos_lock(esw); + err = esw_qos_set_node_max_rate(node, tx_max, extack); + esw_qos_unlock(esw); return err; } int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, struct netlink_ext_ack *extack) { - struct mlx5_esw_rate_group *group; + struct mlx5_esw_sched_node *node; struct mlx5_eswitch *esw; int err = 0; @@ -953,7 +1059,7 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->state_lock); + esw_qos_lock(esw); if (esw->mode != MLX5_ESWITCH_OFFLOADS) { NL_SET_ERR_MSG_MOD(extack, "Rate node creation supported only in switchdev mode"); @@ -961,51 +1067,53 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, goto unlock; } - group = esw_qos_create_rate_group(esw, extack); - if (IS_ERR(group)) { - err = PTR_ERR(group); + node = esw_qos_create_vports_sched_node(esw, extack); + if (IS_ERR(node)) { + err = PTR_ERR(node); goto unlock; } - *priv = group; + *priv = node; unlock: - mutex_unlock(&esw->state_lock); + esw_qos_unlock(esw); return err; } int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, struct netlink_ext_ack *extack) { - struct mlx5_esw_rate_group *group = priv; - struct mlx5_eswitch *esw; + struct mlx5_esw_sched_node *node = priv; + struct mlx5_eswitch *esw = node->esw; int err; - esw = mlx5_devlink_eswitch_get(rate_node->devlink); - if (IS_ERR(esw)) - return PTR_ERR(esw); - - mutex_lock(&esw->state_lock); - err = esw_qos_destroy_rate_group(esw, group, extack); - mutex_unlock(&esw->state_lock); + esw_qos_lock(esw); + err = __esw_qos_destroy_node(node, extack); + esw_qos_put(esw); + esw_qos_unlock(esw); return err; } -int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - struct mlx5_esw_rate_group *group, - struct netlink_ext_ack *extack) +int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, + struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack) { + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err = 0; - mutex_lock(&esw->state_lock); - if (!vport->qos.enabled && !group) + if (node && node->esw != esw) { + NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported"); + return -EOPNOTSUPP; + } + + esw_qos_lock(esw); + if (!vport->qos.sched_node && !node) goto unlock; - err = esw_qos_vport_enable(esw, vport, 0, 0, extack); + err = esw_qos_vport_enable(vport, 0, 0, extack); if (!err) - err = esw_qos_vport_update_group(esw, vport, group, extack); + err = esw_qos_vport_update_node(vport, node, extack); unlock: - mutex_unlock(&esw->state_lock); + esw_qos_unlock(esw); return err; } @@ -1014,13 +1122,12 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, void *priv, void *parent_priv, struct netlink_ext_ack *extack) { - struct mlx5_esw_rate_group *group; + struct mlx5_esw_sched_node *node; struct mlx5_vport *vport = priv; if (!parent) - return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, - vport, NULL, extack); + return mlx5_esw_qos_vport_update_node(vport, NULL, extack); - group = parent_priv; - return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, vport, group, extack); + node = parent_priv; + return mlx5_esw_qos_vport_update_node(vport, node, extack); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h index 0141e9d52037..61a6fdd5c267 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -6,9 +6,15 @@ #ifdef CONFIG_MLX5_ESWITCH -int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport, - u32 max_rate, u32 min_rate); -void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +int mlx5_esw_qos_init(struct mlx5_eswitch *esw); +void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw); + +int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min_rate); +bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate); +void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport); + +u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport); +struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport); int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, u64 tx_share, struct netlink_ext_ack *extack); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 7aef30dbd82d..09719e9b8611 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -894,7 +894,7 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport vport_num, 1, MLX5_VPORT_ADMIN_STATE_DOWN); - mlx5_esw_qos_vport_disable(esw, vport); + mlx5_esw_qos_vport_disable(vport); esw_vport_cleanup_acl(esw, vport); } @@ -1061,6 +1061,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { + kfree(vport->qos.sched_node); memset(&vport->qos, 0, sizeof(vport->qos)); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; @@ -1073,6 +1074,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { + kfree(vport->qos.sched_node); memset(&vport->qos, 0, sizeof(vport->qos)); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; @@ -1481,6 +1483,10 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); mlx5_eq_notifier_register(esw->dev, &esw->nb); + err = mlx5_esw_qos_init(esw); + if (err) + goto err_qos_init; + if (esw->mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { @@ -1504,6 +1510,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) return 0; err_esw_enable: + mlx5_esw_qos_cleanup(esw); +err_qos_init: mlx5_eq_notifier_unregister(esw->dev, &esw->nb); mlx5_esw_acls_ns_cleanup(esw); return err; @@ -1632,6 +1640,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) if (esw->mode == MLX5_ESWITCH_OFFLOADS) devl_rate_nodes_destroy(devlink); + mlx5_esw_qos_cleanup(esw); } void mlx5_eswitch_disable(struct mlx5_eswitch *esw) @@ -2061,6 +2070,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, u16 vport, struct ifla_vf_info *ivi) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + u32 max_rate, min_rate; if (IS_ERR(evport)) return PTR_ERR(evport); @@ -2075,9 +2085,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; - if (evport->qos.enabled) { - ivi->min_tx_rate = evport->qos.min_rate; - ivi->max_tx_rate = evport->qos.max_rate; + + if (mlx5_esw_qos_get_vport_rate(evport, &max_rate, &min_rate)) { + ivi->max_tx_rate = max_rate; + ivi->min_tx_rate = min_rate; } mutex_unlock(&esw->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index f44b4c7ebcfd..14dd42d44e6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -212,13 +212,10 @@ struct mlx5_vport { struct mlx5_vport_info info; + /* Protected with the E-Switch qos domain lock. */ struct { - bool enabled; - u32 esw_tsar_ix; - u32 bw_share; - u32 min_rate; - u32 max_rate; - struct mlx5_esw_rate_group *group; + /* Vport scheduling element node. */ + struct mlx5_esw_sched_node *sched_node; } qos; u16 vport; @@ -333,6 +330,7 @@ enum { }; struct dentry; +struct mlx5_qos_domain; struct mlx5_eswitch { struct mlx5_core_dev *dev; @@ -359,15 +357,17 @@ struct mlx5_eswitch { struct rw_semaphore mode_lock; atomic64_t user_count; + /* Protected with the E-Switch qos domain lock. */ struct { - u32 root_tsar_ix; - struct mlx5_esw_rate_group *group0; - struct list_head groups; /* Protected by esw->state_lock */ - - /* Protected by esw->state_lock. - * Initially 0, meaning no QoS users and QoS is disabled. - */ + /* Initially 0, meaning no QoS users and QoS is disabled. */ refcount_t refcnt; + u32 root_tsar_ix; + struct mlx5_qos_domain *domain; + /* Contains all vports with QoS enabled but no explicit node. + * Cannot be NULL if QoS is enabled, but may be a fake node + * referencing the root TSAR if the esw doesn't support nodes. + */ + struct mlx5_esw_sched_node *node0; } qos; struct mlx5_esw_bridge_offloads *br_offloads; @@ -427,10 +427,9 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate); -int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - struct mlx5_esw_rate_group *group, - struct netlink_ext_ack *extack); +int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, + struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack); int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, @@ -806,7 +805,7 @@ int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport); -void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport); struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f24f91d213f2..fd34f43d18d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2617,7 +2617,7 @@ int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vpor return err; load_err: - mlx5_esw_offloads_devlink_port_unregister(esw, vport); + mlx5_esw_offloads_devlink_port_unregister(vport); return err; } @@ -2628,7 +2628,7 @@ void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *v mlx5_esw_offloads_rep_unload(esw, vport->vport); - mlx5_esw_offloads_devlink_port_unregister(esw, vport); + mlx5_esw_offloads_devlink_port_unregister(vport); } static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 964937f17cf5..b30976627c6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -63,7 +63,7 @@ struct mlx5_modify_hdr { enum mlx5_flow_namespace_type ns_type; enum mlx5_flow_resource_owner owner; union { - struct mlx5_fs_dr_action action; + struct mlx5_fs_dr_action fs_dr_action; u32 id; }; }; @@ -73,7 +73,7 @@ struct mlx5_pkt_reformat { int reformat_type; /* from mlx5_ifc */ enum mlx5_flow_resource_owner owner; union { - struct mlx5_fs_dr_action action; + struct mlx5_fs_dr_action fs_dr_action; u32 id; }; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 0c26d707eed2..62d0c689796b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -32,13 +32,11 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/fs.h> -#include <linux/rbtree.h> #include "mlx5_core.h" #include "fs_core.h" #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) -#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) #define MLX5_INIT_COUNTERS_BULK 8 @@ -52,21 +50,37 @@ struct mlx5_fc_cache { }; struct mlx5_fc { - struct list_head list; - struct llist_node addlist; - struct llist_node dellist; - - /* last{packets,bytes} members are used when calculating the delta since - * last reading - */ + u32 id; + bool aging; + struct mlx5_fc_bulk *bulk; + struct mlx5_fc_cache cache; + /* last{packets,bytes} are used for calculating deltas since last reading. */ u64 lastpackets; u64 lastbytes; +}; - struct mlx5_fc_bulk *bulk; - u32 id; - bool aging; +struct mlx5_fc_pool { + struct mlx5_core_dev *dev; + struct mutex pool_lock; /* protects pool lists */ + struct list_head fully_used; + struct list_head partially_used; + struct list_head unused; + int available_fcs; + int used_fcs; + int threshold; +}; - struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; +struct mlx5_fc_stats { + struct xarray counters; + + struct workqueue_struct *wq; + struct delayed_work work; + unsigned long sampling_interval; /* jiffies */ + u32 *bulk_query_out; + int bulk_query_len; + bool bulk_query_alloc_failed; + unsigned long next_bulk_query_alloc; + struct mlx5_fc_pool fc_pool; }; static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev); @@ -74,78 +88,6 @@ static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool); static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool); static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc); -/* locking scheme: - * - * It is the responsibility of the user to prevent concurrent calls or bad - * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference - * to struct mlx5_fc. - * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a - * dump (access to struct mlx5_fc) after a counter is destroyed. - * - * access to counter list: - * - create (user context) - * - mlx5_fc_create() only adds to an addlist to be used by - * mlx5_fc_stats_work(). addlist is a lockless single linked list - * that doesn't require any additional synchronization when adding single - * node. - * - spawn thread to do the actual destroy - * - * - destroy (user context) - * - add a counter to lockless dellist - * - spawn thread to do the actual del - * - * - dump (user context) - * user should not call dump after destroy - * - * - query (single thread workqueue context) - * destroy/dump - no conflict (see destroy) - * query/dump - packets and bytes might be inconsistent (since update is not - * atomic) - * query/create - no conflict (see create) - * since every create/destroy spawn the work, only after necessary time has - * elapsed, the thread will actually query the hardware. - */ - -static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev, - u32 id) -{ - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - unsigned long next_id = (unsigned long)id + 1; - struct mlx5_fc *counter; - unsigned long tmp; - - rcu_read_lock(); - /* skip counters that are in idr, but not yet in counters list */ - idr_for_each_entry_continue_ul(&fc_stats->counters_idr, - counter, tmp, next_id) { - if (!list_empty(&counter->list)) - break; - } - rcu_read_unlock(); - - return counter ? &counter->list : &fc_stats->counters; -} - -static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev, - struct mlx5_fc *counter) -{ - struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id); - - list_add_tail(&counter->list, next); -} - -static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, - struct mlx5_fc *counter) -{ - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - - list_del(&counter->list); - - spin_lock(&fc_stats->counters_idr_lock); - WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id)); - spin_unlock(&fc_stats->counters_idr_lock); -} - static int get_init_bulk_query_len(struct mlx5_core_dev *dev) { return min_t(int, MLX5_INIT_COUNTERS_BULK, @@ -174,47 +116,64 @@ static void update_counter_cache(int index, u32 *bulk_raw_data, cache->lastuse = jiffies; } -static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, - struct mlx5_fc *first, - u32 last_id) +/* Synchronization notes + * + * Access to counter array: + * - create - mlx5_fc_create() (user context) + * - inserts the counter into the xarray. + * + * - destroy - mlx5_fc_destroy() (user context) + * - erases the counter from the xarray and releases it. + * + * - query mlx5_fc_query(), mlx5_fc_query_cached{,_raw}() (user context) + * - user should not access a counter after destroy. + * + * - bulk query (single thread workqueue context) + * - create: query relies on 'lastuse' to avoid updating counters added + * around the same time as the current bulk cmd. + * - destroy: destroyed counters will not be accessed, even if they are + * destroyed during a bulk query command. + */ +static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - bool query_more_counters = (first->id <= last_id); - int cur_bulk_len = fc_stats->bulk_query_len; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; + u32 bulk_len = fc_stats->bulk_query_len; + XA_STATE(xas, &fc_stats->counters, 0); u32 *data = fc_stats->bulk_query_out; - struct mlx5_fc *counter = first; + struct mlx5_fc *counter; + u32 last_bulk_id = 0; + u64 bulk_query_time; u32 bulk_base_id; - int bulk_len; int err; - while (query_more_counters) { - /* first id must be aligned to 4 when using bulk query */ - bulk_base_id = counter->id & ~0x3; - - /* number of counters to query inc. the last counter */ - bulk_len = min_t(int, cur_bulk_len, - ALIGN(last_id - bulk_base_id + 1, 4)); - - err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, - data); - if (err) { - mlx5_core_err(dev, "Error doing bulk query: %d\n", err); - return; - } - query_more_counters = false; - - list_for_each_entry_from(counter, &fc_stats->counters, list) { - int counter_index = counter->id - bulk_base_id; - struct mlx5_fc_cache *cache = &counter->cache; - - if (counter->id >= bulk_base_id + bulk_len) { - query_more_counters = true; - break; + xas_lock(&xas); + xas_for_each(&xas, counter, U32_MAX) { + if (xas_retry(&xas, counter)) + continue; + if (unlikely(counter->id >= last_bulk_id)) { + /* Start new bulk query. */ + /* First id must be aligned to 4 when using bulk query. */ + bulk_base_id = counter->id & ~0x3; + last_bulk_id = bulk_base_id + bulk_len; + /* The lock is released while querying the hw and reacquired after. */ + xas_unlock(&xas); + /* The same id needs to be processed again in the next loop iteration. */ + xas_reset(&xas); + bulk_query_time = jiffies; + err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, data); + if (err) { + mlx5_core_err(dev, "Error doing bulk query: %d\n", err); + return; } - - update_counter_cache(counter_index, data, cache); + xas_lock(&xas); + continue; } + /* Do not update counters added after bulk query was started. */ + if (time_after64(bulk_query_time, counter->cache.lastuse)) + update_counter_cache(counter->id - bulk_base_id, data, + &counter->cache); } + xas_unlock(&xas); } static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter) @@ -225,7 +184,7 @@ static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter) static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; if (counter->bulk) mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter); @@ -233,85 +192,55 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) mlx5_fc_free(dev, counter); } -static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev) +static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev, + int bulk_query_len) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - int max_bulk_len = get_max_bulk_query_len(dev); - unsigned long now = jiffies; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; u32 *bulk_query_out_tmp; - int max_out_len; - - if (fc_stats->bulk_query_alloc_failed && - time_before(now, fc_stats->next_bulk_query_alloc)) - return; + int out_len; - max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); - bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL); + out_len = mlx5_cmd_fc_get_bulk_query_out_len(bulk_query_len); + bulk_query_out_tmp = kvzalloc(out_len, GFP_KERNEL); if (!bulk_query_out_tmp) { mlx5_core_warn_once(dev, - "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n", - max_bulk_len); - fc_stats->bulk_query_alloc_failed = true; - fc_stats->next_bulk_query_alloc = - now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD; + "Can't increase flow counters bulk query buffer size, alloc failed, bulk_query_len(%d)\n", + bulk_query_len); return; } - kfree(fc_stats->bulk_query_out); + kvfree(fc_stats->bulk_query_out); fc_stats->bulk_query_out = bulk_query_out_tmp; - fc_stats->bulk_query_len = max_bulk_len; - if (fc_stats->bulk_query_alloc_failed) { - mlx5_core_info(dev, - "Flow counters bulk query buffer size increased, bulk_size(%d)\n", - max_bulk_len); - fc_stats->bulk_query_alloc_failed = false; - } + fc_stats->bulk_query_len = bulk_query_len; + mlx5_core_info(dev, + "Flow counters bulk query buffer size increased, bulk_query_len(%d)\n", + bulk_query_len); } -static void mlx5_fc_stats_work(struct work_struct *work) +static int mlx5_fc_num_counters(struct mlx5_fc_stats *fc_stats) { - struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, - priv.fc_stats.work.work); - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - /* Take dellist first to ensure that counters cannot be deleted before - * they are inserted. - */ - struct llist_node *dellist = llist_del_all(&fc_stats->dellist); - struct llist_node *addlist = llist_del_all(&fc_stats->addlist); - struct mlx5_fc *counter = NULL, *last = NULL, *tmp; - unsigned long now = jiffies; - - if (addlist || !list_empty(&fc_stats->counters)) - queue_delayed_work(fc_stats->wq, &fc_stats->work, - fc_stats->sampling_interval); - - llist_for_each_entry(counter, addlist, addlist) { - mlx5_fc_stats_insert(dev, counter); - fc_stats->num_counters++; - } - - llist_for_each_entry_safe(counter, tmp, dellist, dellist) { - mlx5_fc_stats_remove(dev, counter); + struct mlx5_fc *counter; + int num_counters = 0; + unsigned long id; - mlx5_fc_release(dev, counter); - fc_stats->num_counters--; - } + xa_for_each(&fc_stats->counters, id, counter) + num_counters++; + return num_counters; +} - if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) && - fc_stats->num_counters > get_init_bulk_query_len(dev)) - mlx5_fc_stats_bulk_query_size_increase(dev); +static void mlx5_fc_stats_work(struct work_struct *work) +{ + struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats, + work.work); + struct mlx5_core_dev *dev = fc_stats->fc_pool.dev; - if (time_before(now, fc_stats->next_query) || - list_empty(&fc_stats->counters)) - return; - last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list); + queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval); - counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, - list); - if (counter) - mlx5_fc_stats_query_counter_range(dev, counter, last->id); + /* Grow the bulk query buffer to max if not maxed and enough counters are present. */ + if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) && + mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev))) + mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev)); - fc_stats->next_query = now + fc_stats->sampling_interval; + mlx5_fc_stats_query_all_counters(dev); } static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev) @@ -334,7 +263,7 @@ static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev) static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; struct mlx5_fc *counter; if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) { @@ -346,16 +275,15 @@ static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging) return mlx5_fc_single_alloc(dev); } -struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging) +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) { struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging); - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; int err; if (IS_ERR(counter)) return counter; - INIT_LIST_HEAD(&counter->list); counter->aging = aging; if (aging) { @@ -365,18 +293,9 @@ struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging) counter->lastbytes = counter->cache.bytes; counter->lastpackets = counter->cache.packets; - idr_preload(GFP_KERNEL); - spin_lock(&fc_stats->counters_idr_lock); - - err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id, - GFP_NOWAIT); - - spin_unlock(&fc_stats->counters_idr_lock); - idr_preload_end(); - if (err) + err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL)); + if (err != 0) goto err_out_alloc; - - llist_add(&counter->addlist, &fc_stats->addlist); } return counter; @@ -385,16 +304,6 @@ err_out_alloc: mlx5_fc_release(dev, counter); return ERR_PTR(err); } - -struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) -{ - struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging); - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - - if (aging) - mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); - return counter; -} EXPORT_SYMBOL(mlx5_fc_create); u32 mlx5_fc_id(struct mlx5_fc *counter) @@ -405,39 +314,32 @@ EXPORT_SYMBOL(mlx5_fc_id); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; if (!counter) return; - if (counter->aging) { - llist_add(&counter->dellist, &fc_stats->dellist); - mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); - return; - } - + if (counter->aging) + xa_erase(&fc_stats->counters, counter->id); mlx5_fc_release(dev, counter); } EXPORT_SYMBOL(mlx5_fc_destroy); int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - int init_bulk_len; - int init_out_len; + struct mlx5_fc_stats *fc_stats; + + fc_stats = kzalloc(sizeof(*fc_stats), GFP_KERNEL); + if (!fc_stats) + return -ENOMEM; + dev->priv.fc_stats = fc_stats; - spin_lock_init(&fc_stats->counters_idr_lock); - idr_init(&fc_stats->counters_idr); - INIT_LIST_HEAD(&fc_stats->counters); - init_llist_head(&fc_stats->addlist); - init_llist_head(&fc_stats->dellist); + xa_init(&fc_stats->counters); - init_bulk_len = get_init_bulk_query_len(dev); - init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len); - fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL); + /* Allocate initial (small) bulk query buffer. */ + mlx5_fc_stats_bulk_query_buf_realloc(dev, get_init_bulk_query_len(dev)); if (!fc_stats->bulk_query_out) - return -ENOMEM; - fc_stats->bulk_query_len = init_bulk_len; + goto err_bulk; fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) @@ -447,34 +349,35 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); mlx5_fc_pool_init(&fc_stats->fc_pool, dev); + queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD); return 0; err_wq_create: - kfree(fc_stats->bulk_query_out); + kvfree(fc_stats->bulk_query_out); +err_bulk: + kfree(fc_stats); return -ENOMEM; } void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - struct llist_node *tmplist; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; struct mlx5_fc *counter; - struct mlx5_fc *tmp; - - cancel_delayed_work_sync(&dev->priv.fc_stats.work); - destroy_workqueue(dev->priv.fc_stats.wq); - dev->priv.fc_stats.wq = NULL; + unsigned long id; - tmplist = llist_del_all(&fc_stats->addlist); - llist_for_each_entry_safe(counter, tmp, tmplist, addlist) - mlx5_fc_release(dev, counter); + cancel_delayed_work_sync(&fc_stats->work); + destroy_workqueue(fc_stats->wq); + fc_stats->wq = NULL; - list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) + xa_for_each(&fc_stats->counters, id, counter) { + xa_erase(&fc_stats->counters, id); mlx5_fc_release(dev, counter); + } + xa_destroy(&fc_stats->counters); mlx5_fc_pool_cleanup(&fc_stats->fc_pool); - idr_destroy(&fc_stats->counters_idr); - kfree(fc_stats->bulk_query_out); + kvfree(fc_stats->bulk_query_out); + kfree(fc_stats); } int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, @@ -518,7 +421,7 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, struct delayed_work *dwork, unsigned long delay) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; queue_delayed_work(fc_stats->wq, dwork, delay); } @@ -526,7 +429,7 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, unsigned long interval) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; fc_stats->sampling_interval = min_t(unsigned long, interval, fc_stats->sampling_interval); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 4f55e55ecb55..566710d34a7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -35,6 +35,7 @@ struct mlx5_fw_reset { enum { MLX5_FW_RST_STATE_IDLE = 0, MLX5_FW_RST_STATE_TOGGLE_REQ = 4, + MLX5_FW_RST_STATE_DROP_MODE = 5, }; enum { @@ -616,6 +617,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work) struct mlx5_fw_reset *fw_reset; struct mlx5_core_dev *dev; unsigned long timeout; + int poll_freq = 20; bool reset_action; u8 rst_state; int err; @@ -651,7 +653,12 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work) reset_action = true; break; } - msleep(20); + if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) { + mlx5_core_info(dev, "Sync Reset Drop mode ack\n"); + mlx5_set_fw_rst_ack(dev); + poll_freq = 1000; + } + msleep(poll_freq); } while (!time_after(jiffies, timeout)); if (!reset_action) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 62c770b0eaa8..99de67c3aa74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -224,6 +224,8 @@ void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change); int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); +bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy); +bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy); int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, void *context, u32 *element_id); int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c index db2bd3ad63ba..6be9981bb6b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c @@ -28,7 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP)) + if (!mlx5_qos_element_type_supported(mdev, + SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP, + SCHEDULING_HIERARCHY_NIC)) return -EOPNOTSUPP; MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); @@ -47,8 +49,12 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; void *attr; - if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) || - !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR)) + if (!mlx5_qos_element_type_supported(mdev, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR, + SCHEDULING_HIERARCHY_NIC) || + !mlx5_qos_tsar_type_supported(mdev, + TSAR_ELEMENT_TSAR_TYPE_DWRR, + SCHEDULING_HIERARCHY_NIC)) return -EOPNOTSUPP; MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index 9f8b4005f4bd..e393391966e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -34,6 +34,64 @@ #include <linux/mlx5/driver.h> #include "mlx5_core.h" +bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy) +{ + int cap; + + switch (hierarchy) { + case SCHEDULING_HIERARCHY_E_SWITCH: + cap = MLX5_CAP_QOS(dev, esw_tsar_type); + break; + case SCHEDULING_HIERARCHY_NIC: + cap = MLX5_CAP_QOS(dev, nic_tsar_type); + break; + default: + return false; + } + + switch (type) { + case TSAR_ELEMENT_TSAR_TYPE_DWRR: + return cap & TSAR_TYPE_CAP_MASK_DWRR; + case TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN: + return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN; + case TSAR_ELEMENT_TSAR_TYPE_ETS: + return cap & TSAR_TYPE_CAP_MASK_ETS; + } + + return false; +} + +bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy) +{ + int cap; + + switch (hierarchy) { + case SCHEDULING_HIERARCHY_E_SWITCH: + cap = MLX5_CAP_QOS(dev, esw_element_type); + break; + case SCHEDULING_HIERARCHY_NIC: + cap = MLX5_CAP_QOS(dev, nic_element_type); + break; + default: + return false; + } + + switch (type) { + case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: + return cap & ELEMENT_TYPE_CAP_MASK_TSAR; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: + return cap & ELEMENT_TYPE_CAP_MASK_VPORT; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: + return cap & ELEMENT_TYPE_CAP_MASK_VPORT_TC; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: + return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP: + return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP; + } + + return false; +} + /* Scheduling element fw management */ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, void *ctx, u32 *element_id) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 833cb68c744f..4b349d4005e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -256,6 +256,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, { struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain; struct mlx5dr_action_dest *term_actions; + struct mlx5_pkt_reformat *pkt_reformat; struct mlx5dr_match_parameters params; struct mlx5_core_dev *dev = ns->dev; struct mlx5dr_action **fs_dr_actions; @@ -332,18 +333,19 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { bool is_decap; - if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { + pkt_reformat = fte->act_dests.action.pkt_reformat; + if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { err = -EINVAL; mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n"); goto free_actions; } - is_decap = fte->act_dests.action.pkt_reformat->reformat_type == + is_decap = pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; if (is_decap) actions[num_actions++] = - fte->act_dests.action.pkt_reformat->action.dr_action; + pkt_reformat->fs_dr_action.dr_action; else delay_encap_set = true; } @@ -370,9 +372,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - actions[num_actions++] = - fte->act_dests.action.modify_hdr->action.dr_action; + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + struct mlx5_modify_hdr *modify_hdr = fte->act_dests.action.modify_hdr; + + actions[num_actions++] = modify_hdr->fs_dr_action.dr_action; + } if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]); @@ -395,8 +399,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } if (delay_encap_set) - actions[num_actions++] = - fte->act_dests.action.pkt_reformat->action.dr_action; + actions[num_actions++] = pkt_reformat->fs_dr_action.dr_action; /* The order of the actions below is not important */ @@ -458,9 +461,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, term_actions[num_term_actions].dest = tmp_action; if (dst->dest_attr.vport.flags & - MLX5_FLOW_DEST_VPORT_REFORMAT_ID) + MLX5_FLOW_DEST_VPORT_REFORMAT_ID) { + pkt_reformat = dst->dest_attr.vport.pkt_reformat; term_actions[num_term_actions].reformat = - dst->dest_attr.vport.pkt_reformat->action.dr_action; + pkt_reformat->fs_dr_action.dr_action; + } num_term_actions++; break; @@ -671,7 +676,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns } pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW; - pkt_reformat->action.dr_action = action; + pkt_reformat->fs_dr_action.dr_action = action; return 0; } @@ -679,7 +684,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, struct mlx5_pkt_reformat *pkt_reformat) { - mlx5dr_action_destroy(pkt_reformat->action.dr_action); + mlx5dr_action_destroy(pkt_reformat->fs_dr_action.dr_action); } static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, @@ -702,7 +707,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, } modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW; - modify_hdr->action.dr_action = action; + modify_hdr->fs_dr_action.dr_action = action; return 0; } @@ -710,7 +715,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, struct mlx5_modify_hdr *modify_hdr) { - mlx5dr_action_destroy(modify_hdr->action.dr_action); + mlx5dr_action_destroy(modify_hdr->fs_dr_action.dr_action); } static int @@ -836,7 +841,7 @@ int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: case MLX5_REFORMAT_TYPE_INSERT_HDR: - return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->action.dr_action); + return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action); } return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 385a56ac7348..fb2e5b844c15 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -520,7 +520,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match); static struct platform_driver mlxbf_gige_driver = { .probe = mlxbf_gige_probe, - .remove_new = mlxbf_gige_remove, + .remove = mlxbf_gige_remove, .shutdown = mlxbf_gige_shutdown, .driver = { .name = KBUILD_MODNAME, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 947500f8ed71..7aa1a462a103 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -67,7 +67,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) for (j = 0; j < block->instances_count; j++) { const struct mlxsw_afk_element_info *elinfo; - struct mlxsw_afk_element_inst *elinst; + const struct mlxsw_afk_element_inst *elinst; elinst = &block->instances[j]; elinfo = &mlxsw_afk_element_infos[elinst->element]; @@ -154,7 +154,7 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk, const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; for (j = 0; j < block->instances_count; j++) { - struct mlxsw_afk_element_inst *elinst; + const struct mlxsw_afk_element_inst *elinst; elinst = &block->instances[j]; if (elinst->element == element) { @@ -386,7 +386,7 @@ mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block, int i; for (i = 0; i < block->instances_count; i++) { - struct mlxsw_afk_element_inst *elinst; + const struct mlxsw_afk_element_inst *elinst; elinst = &block->instances[i]; if (elinst->element == element) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 98a05598178b..5aa1afb3f2ca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -117,7 +117,7 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */ struct mlxsw_afk_block { u16 encoding; /* block ID */ - struct mlxsw_afk_element_inst *instances; + const struct mlxsw_afk_element_inst *instances; unsigned int instances_count; bool high_entropy; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index eaad78605602..6fe185ea6732 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -7,7 +7,7 @@ #include "item.h" #include "core_acl_flex_keys.h" -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2), MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4), MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), @@ -15,7 +15,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2), MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4), MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), @@ -23,27 +23,27 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2), MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), @@ -51,35 +51,35 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12), MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3), MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4), MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4), MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), }; @@ -124,90 +124,90 @@ const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { .clear_block = mlxsw_sp1_afk_clear_block, }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = { MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1), MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = { MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1), MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = { MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2), MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = { MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = { MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = { MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 8, -1, true), /* RX_ACL_SYSTEM_PORT */ }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6), MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2), MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = { MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = { MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = { MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16), MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = { MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */ MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16), }; @@ -319,16 +319,16 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { .clear_block = mlxsw_sp2_afk_clear_block, }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = { MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12), MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */ }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { +static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), }; diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig index 85519690b837..831921b9d4d5 100644 --- a/drivers/net/ethernet/meta/Kconfig +++ b/drivers/net/ethernet/meta/Kconfig @@ -23,6 +23,7 @@ config FBNIC depends on !S390 depends on MAX_SKB_FRAGS < 22 depends on PCI_MSI + depends on PTP_1588_CLOCK_OPTIONAL select NET_DEVLINK select PAGE_POOL select PHYLINK diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile index ed4533a73c57..cadd4dac6620 100644 --- a/drivers/net/ethernet/meta/fbnic/Makefile +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -11,6 +11,7 @@ fbnic-y := fbnic_devlink.o \ fbnic_ethtool.o \ fbnic_fw.o \ fbnic_hw_stats.o \ + fbnic_hwmon.o \ fbnic_irq.o \ fbnic_mac.o \ fbnic_netdev.o \ @@ -18,4 +19,5 @@ fbnic-y := fbnic_devlink.o \ fbnic_phylink.o \ fbnic_rpc.o \ fbnic_tlv.o \ - fbnic_txrx.o + fbnic_txrx.o \ + fbnic_time.o diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index 0f9e8d79461c..fec567c8fe4a 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -6,6 +6,7 @@ #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/ptp_clock_kernel.h> #include <linux/types.h> #include <linux/workqueue.h> @@ -18,6 +19,7 @@ struct fbnic_dev { struct device *dev; struct net_device *netdev; + struct device *hwmon; u32 __iomem *uc_addr0; u32 __iomem *uc_addr4; @@ -30,6 +32,7 @@ struct fbnic_dev { struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES]; struct fbnic_fw_cap fw_cap; + struct fbnic_fw_completion *cmpl_data; /* Lock protecting Tx Mailbox queue to prevent possible races */ spinlock_t fw_tx_lock; @@ -49,6 +52,16 @@ struct fbnic_dev { /* Number of TCQs/RCQs available on hardware */ u16 max_num_queues; + /* Lock protecting writes to @time_high, @time_offset of fbnic_netdev, + * and the HW time CSR machinery. + */ + spinlock_t time_lock; + /* Externally accessible PTP clock, may be NULL */ + struct ptp_clock *ptp; + struct ptp_clock_info ptp_info; + /* Last @time_high refresh time in jiffies (to catch stalls) */ + unsigned long last_read; + /* Local copy of hardware statistics */ struct fbnic_hw_stats hw_stats; }; @@ -127,6 +140,9 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd); int fbnic_fw_enable_mbx(struct fbnic_dev *fbd); void fbnic_fw_disable_mbx(struct fbnic_dev *fbd); +void fbnic_hwmon_register(struct fbnic_dev *fbd); +void fbnic_hwmon_unregister(struct fbnic_dev *fbd); + int fbnic_pcs_irq_enable(struct fbnic_dev *fbd); void fbnic_pcs_irq_disable(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 21db509acbc1..79cdd231d327 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -413,6 +413,44 @@ enum { #define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */ #define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0) #define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */ + +/* Precision Time Protocol Registers */ +#define FBNIC_CSR_START_PTP 0x04800 /* CSR section delimiter */ +#define FBNIC_PTP_REG_BASE 0x04800 /* 0x12000 */ + +#define FBNIC_PTP_CTRL 0x04800 /* 0x12000 */ +#define FBNIC_PTP_CTRL_EN CSR_BIT(0) +#define FBNIC_PTP_CTRL_MONO_EN CSR_BIT(4) +#define FBNIC_PTP_CTRL_TQS_OUT_EN CSR_BIT(8) +#define FBNIC_PTP_CTRL_MAC_OUT_IVAL CSR_GENMASK(16, 12) +#define FBNIC_PTP_CTRL_TICK_IVAL CSR_GENMASK(23, 20) + +#define FBNIC_PTP_ADJUST 0x04801 /* 0x12004 */ +#define FBNIC_PTP_ADJUST_INIT CSR_BIT(0) +#define FBNIC_PTP_ADJUST_SUB_NUDGE CSR_BIT(8) +#define FBNIC_PTP_ADJUST_ADD_NUDGE CSR_BIT(16) +#define FBNIC_PTP_ADJUST_ADDEND_SET CSR_BIT(24) + +#define FBNIC_PTP_INIT_HI 0x04802 /* 0x12008 */ +#define FBNIC_PTP_INIT_LO 0x04803 /* 0x1200c */ + +#define FBNIC_PTP_NUDGE_NS 0x04804 /* 0x12010 */ +#define FBNIC_PTP_NUDGE_SUBNS 0x04805 /* 0x12014 */ + +#define FBNIC_PTP_ADD_VAL_NS 0x04806 /* 0x12018 */ +#define FBNIC_PTP_ADD_VAL_NS_MASK CSR_GENMASK(15, 0) +#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */ + +#define FBNIC_PTP_CTR_VAL_HI 0x04808 /* 0x12020 */ +#define FBNIC_PTP_CTR_VAL_LO 0x04809 /* 0x12024 */ + +#define FBNIC_PTP_MONO_PTP_CTR_HI 0x0480a /* 0x12028 */ +#define FBNIC_PTP_MONO_PTP_CTR_LO 0x0480b /* 0x1202c */ + +#define FBNIC_PTP_CDC_FIFO_STATUS 0x0480c /* 0x12030 */ +#define FBNIC_PTP_SPARE 0x0480d /* 0x12034 */ +#define FBNIC_CSR_END_PTP 0x0480d /* CSR section delimiter */ + /* Rx Buffer Registers */ #define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */ enum { @@ -548,6 +586,7 @@ enum { }; #define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16) +#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28) #define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30) #define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 5d980e178941..1117d5a32867 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -6,6 +6,35 @@ #include "fbnic_netdev.h" #include "fbnic_tlv.h" +static int +fbnic_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *tsinfo) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp); + + tsinfo->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + tsinfo->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + tsinfo->rx_filters = + BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + static void fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -64,8 +93,33 @@ fbnic_get_eth_mac_stats(struct net_device *netdev, &mac_stats->eth_mac.FrameTooLongErrors); } +static void fbnic_get_ts_stats(struct net_device *netdev, + struct ethtool_ts_stats *ts_stats) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + u64 ts_packets, ts_lost; + struct fbnic_ring *ring; + unsigned int start; + int i; + + ts_stats->pkts = fbn->tx_stats.ts_packets; + ts_stats->lost = fbn->tx_stats.ts_lost; + for (i = 0; i < fbn->num_tx_queues; i++) { + ring = fbn->tx[i]; + do { + start = u64_stats_fetch_begin(&ring->stats.syncp); + ts_packets = ring->stats.ts_packets; + ts_lost = ring->stats.ts_lost; + } while (u64_stats_fetch_retry(&ring->stats.syncp, start)); + ts_stats->pkts += ts_packets; + ts_stats->lost += ts_lost; + } +} + static const struct ethtool_ops fbnic_ethtool_ops = { .get_drvinfo = fbnic_get_drvinfo, + .get_ts_info = fbnic_get_ts_info, + .get_ts_stats = fbnic_get_ts_stats, .get_eth_mac_stats = fbnic_get_eth_mac_stats, }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h index 221faf8c6756..7cd8841920e4 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -44,6 +44,13 @@ struct fbnic_fw_cap { u8 link_fec; }; +struct fbnic_fw_completion { + struct { + s32 millivolts; + s32 millidegrees; + } tsene; +}; + void fbnic_mbx_init(struct fbnic_dev *fbd); void fbnic_mbx_clean(struct fbnic_dev *fbd); void fbnic_mbx_poll(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c new file mode 100644 index 000000000000..bcd1086e3768 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/hwmon.h> + +#include "fbnic.h" +#include "fbnic_mac.h" + +static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type) +{ + if (type == hwmon_temp) + return FBNIC_SENSOR_TEMP; + if (type == hwmon_in) + return FBNIC_SENSOR_VOLTAGE; + + return -EOPNOTSUPP; +} + +static umode_t fbnic_hwmon_is_visible(const void *drvdata, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type == hwmon_temp && attr == hwmon_temp_input) + return 0444; + if (type == hwmon_in && attr == hwmon_in_input) + return 0444; + + return 0; +} + +static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct fbnic_dev *fbd = dev_get_drvdata(dev); + const struct fbnic_mac *mac = fbd->mac; + int id; + + id = fbnic_hwmon_sensor_id(type); + return id < 0 ? id : mac->get_sensor(fbd, id, val); +} + +static const struct hwmon_ops fbnic_hwmon_ops = { + .is_visible = fbnic_hwmon_is_visible, + .read = fbnic_hwmon_read, +}; + +static const struct hwmon_channel_info *fbnic_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), + HWMON_CHANNEL_INFO(in, HWMON_I_INPUT), + NULL +}; + +static const struct hwmon_chip_info fbnic_chip_info = { + .ops = &fbnic_hwmon_ops, + .info = fbnic_hwmon_info, +}; + +void fbnic_hwmon_register(struct fbnic_dev *fbd) +{ + if (!IS_REACHABLE(CONFIG_HWMON)) + return; + + fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic", + fbd, &fbnic_chip_info, + NULL); + if (IS_ERR(fbd->hwmon)) { + dev_notice(fbd->dev, + "Failed to register hwmon device %pe\n", + fbd->hwmon); + fbd->hwmon = NULL; + } +} + +void fbnic_hwmon_unregister(struct fbnic_dev *fbd) +{ + if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon) + return; + + hwmon_device_unregister(fbd->hwmon); + fbd->hwmon = NULL; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 7b654d0a6dac..80b82ff12c4d 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -686,6 +686,27 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset, MAC_STAT_TX_BROADCAST); } +static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val) +{ + struct fbnic_fw_completion fw_cmpl; + s32 *sensor; + + switch (id) { + case FBNIC_SENSOR_TEMP: + sensor = &fw_cmpl.tsene.millidegrees; + break; + case FBNIC_SENSOR_VOLTAGE: + sensor = &fw_cmpl.tsene.millivolts; + break; + default: + return -EINVAL; + } + + *val = *sensor; + + return 0; +} + static const struct fbnic_mac fbnic_mac_asic = { .init_regs = fbnic_mac_init_regs, .pcs_enable = fbnic_pcs_enable_asic, @@ -695,6 +716,7 @@ static const struct fbnic_mac fbnic_mac_asic = { .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats, .link_down = fbnic_mac_link_down_asic, .link_up = fbnic_mac_link_up_asic, + .get_sensor = fbnic_mac_get_sensor_asic, }; /** diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h index 476239a9d381..05a591653e09 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h @@ -47,6 +47,11 @@ enum { #define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1) #define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1) +enum fbnic_sensor_id { + FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */ + FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */ +}; + /* This structure defines the interface hooks for the MAC. The MAC hooks * will be configured as a const struct provided with a set of function * pointers. @@ -83,6 +88,8 @@ struct fbnic_mac { void (*link_down)(struct fbnic_dev *fbd); void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause); + + int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val); }; int fbnic_mac_init(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index a400616a24d4..c08798fad203 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -42,18 +42,24 @@ int __fbnic_open(struct fbnic_net *fbn) goto free_resources; } - err = fbnic_fw_init_heartbeat(fbd, false); + err = fbnic_time_start(fbn); if (err) goto release_ownership; + err = fbnic_fw_init_heartbeat(fbd, false); + if (err) + goto time_stop; + err = fbnic_pcs_irq_enable(fbd); if (err) - goto release_ownership; + goto time_stop; /* Pull the BMC config and initialize the RPC */ fbnic_bmc_rpc_init(fbd); fbnic_rss_reinit(fbd, fbn); return 0; +time_stop: + fbnic_time_stop(fbn); release_ownership: fbnic_fw_xmit_ownership_msg(fbn->fbd, false); free_resources: @@ -82,6 +88,7 @@ static int fbnic_stop(struct net_device *netdev) fbnic_down(fbn); fbnic_pcs_irq_disable(fbn->fbd); + fbnic_time_stop(fbn); fbnic_fw_xmit_ownership_msg(fbn->fbd, false); fbnic_free_resources(fbn); @@ -317,6 +324,84 @@ void fbnic_clear_rx_mode(struct net_device *netdev) __dev_mc_unsync(netdev, NULL); } +static int fbnic_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + *config = fbn->hwtstamp_config; + + return 0; +} + +static int fbnic_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + int old_rx_filter; + + if (config->source != HWTSTAMP_SOURCE_NETDEV) + return -EOPNOTSUPP; + + if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config)) + return 0; + + /* Upscale the filters */ + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + break; + case HWTSTAMP_FILTER_NTP_ALL: + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + default: + return -ERANGE; + } + + /* Configure */ + old_rx_filter = fbn->hwtstamp_config.rx_filter; + memcpy(&fbn->hwtstamp_config, config, sizeof(*config)); + + if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) { + fbnic_rss_reinit(fbn->fbd, fbn); + fbnic_write_rules(fbn->fbd); + } + + /* Save / report back filter configuration + * Note that our filter configuration is inexact. Instead of + * filtering for a specific UDP port or L2 Ethertype we are + * filtering in all UDP or all non-IP packets for timestamping. So + * if anything other than FILTER_ALL is requested we report + * FILTER_SOME indicating that we will be timestamping a few + * additional packets. + */ + if (config->rx_filter > HWTSTAMP_FILTER_ALL) + config->rx_filter = HWTSTAMP_FILTER_SOME; + + return 0; +} + static void fbnic_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) { @@ -394,6 +479,8 @@ static const struct net_device_ops fbnic_netdev_ops = { .ndo_set_mac_address = fbnic_set_mac, .ndo_set_rx_mode = fbnic_set_rx_mode, .ndo_get_stats64 = fbnic_get_stats64, + .ndo_hwtstamp_get = fbnic_hwtstamp_get, + .ndo_hwtstamp_set = fbnic_hwtstamp_set, }; static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h index 6c27da09a612..b8417b300778 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -33,6 +33,15 @@ struct fbnic_net { u8 fec; u8 link_mode; + /* Cached top bits of the HW time counter for 40b -> 64b conversion */ + u32 time_high; + /* Protect readers of @time_offset, writers take @time_lock. */ + struct u64_stats_sync time_seq; + /* Offset in ns between free running NIC PHC and time set via PTP + * clock callbacks + */ + s64 time_offset; + u16 num_tx_queues; u16 num_rx_queues; @@ -45,6 +54,9 @@ struct fbnic_net { struct fbnic_queue_stats rx_stats; u64 link_down_events; + /* Time stampinn filter config */ + struct kernel_hwtstamp_config hwtstamp_config; + struct list_head napis; }; @@ -60,6 +72,12 @@ void fbnic_reset_queues(struct fbnic_net *fbn, unsigned int tx, unsigned int rx); void fbnic_set_ethtool_ops(struct net_device *dev); +int fbnic_ptp_setup(struct fbnic_dev *fbd); +void fbnic_ptp_destroy(struct fbnic_dev *fbd); +void fbnic_time_init(struct fbnic_net *fbn); +int fbnic_time_start(struct fbnic_net *fbn); +void fbnic_time_stop(struct fbnic_net *fbn); + void __fbnic_set_rx_mode(struct net_device *netdev); void fbnic_clear_rx_mode(struct net_device *netdev); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index a4809fe0fc24..2de5a6fde7e8 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -289,6 +289,8 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fbnic_devlink_register(fbd); + fbnic_hwmon_register(fbd); + if (!fbd->dsn) { dev_warn(&pdev->dev, "Reading serial number failed\n"); goto init_failure_mode; @@ -300,14 +302,20 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_failure_mode; } + err = fbnic_ptp_setup(fbd); + if (err) + goto ifm_free_netdev; + err = fbnic_netdev_register(netdev); if (err) { dev_err(&pdev->dev, "Netdev registration failed: %d\n", err); - goto ifm_free_netdev; + goto ifm_destroy_ptp; } return 0; +ifm_destroy_ptp: + fbnic_ptp_destroy(fbd); ifm_free_netdev: fbnic_netdev_free(fbd); init_failure_mode: @@ -342,9 +350,11 @@ static void fbnic_remove(struct pci_dev *pdev) fbnic_netdev_unregister(netdev); cancel_delayed_work_sync(&fbd->service_task); + fbnic_ptp_destroy(fbd); fbnic_netdev_free(fbd); } + fbnic_hwmon_unregister(fbd); fbnic_devlink_unregister(fbd); fbnic_fw_disable_mbx(fbd); fbnic_free_irqs(fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c index c8aa29fc052b..337b8b3aef2f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c @@ -244,6 +244,12 @@ void fbnic_bmc_rpc_init(struct fbnic_dev *fbd) ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \ ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0)) +#define FBNIC_TSTAMP_MASK(_all, _udp, _ether) \ + (((_all) ? ((1u << FBNIC_NUM_HASH_OPT) - 1) : 0) | \ + ((_udp) ? (1u << FBNIC_UDP6_HASH_OPT) | \ + (1u << FBNIC_UDP4_HASH_OPT) : 0) | \ + ((_ether) ? (1u << FBNIC_ETHER_HASH_OPT) : 0)) + void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn) { static const u32 act1_value[FBNIC_NUM_HASH_OPT] = { @@ -255,6 +261,7 @@ void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn) FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */ 0 /* Ether */ }; + u32 tstamp_mask = 0; unsigned int i; /* To support scenarios where a BMC is present we must write the @@ -264,6 +271,28 @@ void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn) BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES); BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT); + /* Set timestamp mask with 1b per flow type */ + if (fbn->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + switch (fbn->hwtstamp_config.rx_filter) { + case HWTSTAMP_FILTER_ALL: + tstamp_mask = FBNIC_TSTAMP_MASK(1, 1, 1); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 1); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 0); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tstamp_mask = FBNIC_TSTAMP_MASK(0, 0, 1); + break; + default: + netdev_warn(fbn->netdev, "Unsupported hwtstamp_rx_filter\n"); + break; + } + } + /* Program RSS hash enable mask for host in action TCAM/table. */ for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST; i < FBNIC_RSS_EN_NUM_ENTRIES; i++) { @@ -287,6 +316,8 @@ void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn) if (!dest) dest = FBNIC_RPC_ACT_TBL0_DROP; + else if (tstamp_mask & (1u << flow_type)) + dest |= FBNIC_RPC_ACT_TBL0_TS_ENA; if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID) dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_time.c b/drivers/net/ethernet/meta/fbnic/fbnic_time.c new file mode 100644 index 000000000000..39d99677b71e --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_time.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/bitfield.h> +#include <linux/jiffies.h> +#include <linux/limits.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/timer.h> + +#include "fbnic.h" +#include "fbnic_csr.h" +#include "fbnic_netdev.h" + +/* FBNIC timing & PTP implementation + * Datapath uses truncated 40b timestamps for scheduling and event reporting. + * We need to promote those to full 64b, hence we periodically cache the top + * 32bit of the HW time counter. Since this makes our time reporting non-atomic + * we leave the HW clock free running and adjust time offsets in SW as needed. + * Time offset is 64bit - we need a seq counter for 32bit machines. + * Time offset and the cache of top bits are independent so we don't need + * a coherent snapshot of both - READ_ONCE()/WRITE_ONCE() + writer side lock + * are enough. + */ + +/* Period of refresh of top bits of timestamp, give ourselves a 8x margin. + * This should translate to once a minute. + * The use of nsecs_to_jiffies() should be safe for a <=40b nsec value. + */ +#define FBNIC_TS_HIGH_REFRESH_JIF nsecs_to_jiffies((1ULL << 40) / 16) + +static struct fbnic_dev *fbnic_from_ptp_info(struct ptp_clock_info *ptp) +{ + return container_of(ptp, struct fbnic_dev, ptp_info); +} + +/* This function is "slow" because we could try guessing which high part + * is correct based on low instead of re-reading, and skip reading @hi + * twice altogether if @lo is far enough from 0. + */ +static u64 __fbnic_time_get_slow(struct fbnic_dev *fbd) +{ + u32 hi, lo; + + lockdep_assert_held(&fbd->time_lock); + + do { + hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI); + lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO); + } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI)); + + return (u64)hi << 32 | lo; +} + +static void __fbnic_time_set_addend(struct fbnic_dev *fbd, u64 addend) +{ + lockdep_assert_held(&fbd->time_lock); + + fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_NS, + FIELD_PREP(FBNIC_PTP_ADD_VAL_NS_MASK, addend >> 32)); + fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_SUBNS, (u32)addend); +} + +static void fbnic_ptp_fresh_check(struct fbnic_dev *fbd) +{ + if (time_is_after_jiffies(fbd->last_read + + FBNIC_TS_HIGH_REFRESH_JIF * 3 / 2)) + return; + + dev_warn(fbd->dev, "NIC timestamp refresh stall, delayed by %lu sec\n", + (jiffies - fbd->last_read - FBNIC_TS_HIGH_REFRESH_JIF) / HZ); +} + +static void fbnic_ptp_refresh_time(struct fbnic_dev *fbd, struct fbnic_net *fbn) +{ + unsigned long flags; + u32 hi; + + spin_lock_irqsave(&fbd->time_lock, flags); + hi = fbnic_rd32(fbn->fbd, FBNIC_PTP_CTR_VAL_HI); + if (!fbnic_present(fbd)) + goto out; /* Don't bother handling, reset is pending */ + /* Let's keep high cached value a bit lower to avoid race with + * incoming timestamps. The logic in fbnic_ts40_to_ns() will + * take care of overflow in this case. It will make cached time + * ~1 minute lower and incoming timestamp will always be later + * then cached time. + */ + WRITE_ONCE(fbn->time_high, hi - 16); + fbd->last_read = jiffies; + out: + spin_unlock_irqrestore(&fbd->time_lock, flags); +} + +static long fbnic_ptp_do_aux_work(struct ptp_clock_info *ptp) +{ + struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp); + struct fbnic_net *fbn; + + fbn = netdev_priv(fbd->netdev); + + fbnic_ptp_fresh_check(fbd); + fbnic_ptp_refresh_time(fbd, fbn); + + return FBNIC_TS_HIGH_REFRESH_JIF; +} + +static int fbnic_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp); + u64 addend, dclk_period; + unsigned long flags; + + /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */ + dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ; + addend = adjust_by_scaled_ppm(dclk_period, scaled_ppm); + + spin_lock_irqsave(&fbd->time_lock, flags); + __fbnic_time_set_addend(fbd, addend); + fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_ADDEND_SET); + + /* Flush, make sure FBNIC_PTP_ADD_VAL_* is stable for at least 4 clks */ + fbnic_rd32(fbd, FBNIC_PTP_SPARE); + spin_unlock_irqrestore(&fbd->time_lock, flags); + + return fbnic_present(fbd) ? 0 : -EIO; +} + +static int fbnic_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp); + struct fbnic_net *fbn; + unsigned long flags; + + fbn = netdev_priv(fbd->netdev); + + spin_lock_irqsave(&fbd->time_lock, flags); + u64_stats_update_begin(&fbn->time_seq); + WRITE_ONCE(fbn->time_offset, READ_ONCE(fbn->time_offset) + delta); + u64_stats_update_end(&fbn->time_seq); + spin_unlock_irqrestore(&fbd->time_lock, flags); + + return 0; +} + +static int +fbnic_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp); + struct fbnic_net *fbn; + unsigned long flags; + u64 time_ns; + u32 hi, lo; + + fbn = netdev_priv(fbd->netdev); + + spin_lock_irqsave(&fbd->time_lock, flags); + + do { + hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI); + ptp_read_system_prets(sts); + lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO); + ptp_read_system_postts(sts); + /* Similarly to comment above __fbnic_time_get_slow() + * - this can be optimized if needed. + */ + } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI)); + + time_ns = ((u64)hi << 32 | lo) + fbn->time_offset; + spin_unlock_irqrestore(&fbd->time_lock, flags); + + if (!fbnic_present(fbd)) + return -EIO; + + *ts = ns_to_timespec64(time_ns); + + return 0; +} + +static int +fbnic_ptp_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts) +{ + struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp); + struct fbnic_net *fbn; + unsigned long flags; + u64 dev_ns, host_ns; + int ret; + + fbn = netdev_priv(fbd->netdev); + + host_ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&fbd->time_lock, flags); + + dev_ns = __fbnic_time_get_slow(fbd); + + if (fbnic_present(fbd)) { + u64_stats_update_begin(&fbn->time_seq); + WRITE_ONCE(fbn->time_offset, host_ns - dev_ns); + u64_stats_update_end(&fbn->time_seq); + ret = 0; + } else { + ret = -EIO; + } + spin_unlock_irqrestore(&fbd->time_lock, flags); + + return ret; +} + +static const struct ptp_clock_info fbnic_ptp_info = { + .owner = THIS_MODULE, + /* 1,000,000,000 - 1 PPB to ensure increment is positive + * after max negative adjustment. + */ + .max_adj = 999999999, + .do_aux_work = fbnic_ptp_do_aux_work, + .adjfine = fbnic_ptp_adjfine, + .adjtime = fbnic_ptp_adjtime, + .gettimex64 = fbnic_ptp_gettimex64, + .settime64 = fbnic_ptp_settime64, +}; + +static void fbnic_ptp_reset(struct fbnic_dev *fbd) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + u64 dclk_period; + + fbnic_wr32(fbd, FBNIC_PTP_CTRL, + FBNIC_PTP_CTRL_EN | + FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1)); + + /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */ + dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ; + + __fbnic_time_set_addend(fbd, dclk_period); + + fbnic_wr32(fbd, FBNIC_PTP_INIT_HI, 0); + fbnic_wr32(fbd, FBNIC_PTP_INIT_LO, 0); + + fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_INIT); + + fbnic_wr32(fbd, FBNIC_PTP_CTRL, + FBNIC_PTP_CTRL_EN | + FBNIC_PTP_CTRL_TQS_OUT_EN | + FIELD_PREP(FBNIC_PTP_CTRL_MAC_OUT_IVAL, 3) | + FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1)); + + fbnic_rd32(fbd, FBNIC_PTP_SPARE); + + fbn->time_offset = 0; + fbn->time_high = 0; +} + +void fbnic_time_init(struct fbnic_net *fbn) +{ + /* This is not really a statistic, but the lockng primitive fits + * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() / + * WRITE_ONCE() behavior. + */ + u64_stats_init(&fbn->time_seq); +} + +int fbnic_time_start(struct fbnic_net *fbn) +{ + fbnic_ptp_refresh_time(fbn->fbd, fbn); + /* Assume that fbnic_ptp_do_aux_work() will never be called if not + * scheduled here + */ + return ptp_schedule_worker(fbn->fbd->ptp, FBNIC_TS_HIGH_REFRESH_JIF); +} + +void fbnic_time_stop(struct fbnic_net *fbn) +{ + ptp_cancel_worker_sync(fbn->fbd->ptp); + fbnic_ptp_fresh_check(fbn->fbd); +} + +int fbnic_ptp_setup(struct fbnic_dev *fbd) +{ + struct device *dev = fbd->dev; + unsigned long flags; + + spin_lock_init(&fbd->time_lock); + + spin_lock_irqsave(&fbd->time_lock, flags); /* Appease lockdep */ + fbnic_ptp_reset(fbd); + spin_unlock_irqrestore(&fbd->time_lock, flags); + + memcpy(&fbd->ptp_info, &fbnic_ptp_info, sizeof(fbnic_ptp_info)); + + fbd->ptp = ptp_clock_register(&fbd->ptp_info, dev); + if (IS_ERR(fbd->ptp)) + dev_err(dev, "Failed to register PTP: %pe\n", fbd->ptp); + + return PTR_ERR_OR_ZERO(fbd->ptp); +} + +void fbnic_ptp_destroy(struct fbnic_dev *fbd) +{ + if (!fbd->ptp) + return; + ptp_clock_unregister(fbd->ptp); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index 6a6d7e22f1a7..b5050fabe8fe 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -12,9 +12,14 @@ #include "fbnic_netdev.h" #include "fbnic_txrx.h" +enum { + FBNIC_XMIT_CB_TS = 0x01, +}; + struct fbnic_xmit_cb { u32 bytecount; u8 desc_count; + u8 flags; int hw_head; }; @@ -43,6 +48,46 @@ static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val) writel(val, csr_base + csr); } +/** + * fbnic_ts40_to_ns() - convert descriptor timestamp to PHC time + * @fbn: netdev priv of the FB NIC + * @ts40: timestamp read from a descriptor + * + * Return: u64 value of PHC time in nanoseconds + * + * Convert truncated 40 bit device timestamp as read from a descriptor + * to the full PHC time in nanoseconds. + */ +static __maybe_unused u64 fbnic_ts40_to_ns(struct fbnic_net *fbn, u64 ts40) +{ + unsigned int s; + u64 time_ns; + s64 offset; + u8 ts_top; + u32 high; + + do { + s = u64_stats_fetch_begin(&fbn->time_seq); + offset = READ_ONCE(fbn->time_offset); + } while (u64_stats_fetch_retry(&fbn->time_seq, s)); + + high = READ_ONCE(fbn->time_high); + + /* Bits 63..40 from periodic clock reads, 39..0 from ts40 */ + time_ns = (u64)(high >> 8) << 40 | ts40; + + /* Compare bits 32-39 between periodic reads and ts40, + * see if HW clock may have wrapped since last read. We are sure + * that periodic reads are always at least ~1 minute behind, so + * this logic works perfectly fine. + */ + ts_top = ts40 >> 32; + if (ts_top < (u8)high && (u8)high - ts_top > U8_MAX / 2) + time_ns += 1ULL << 40; + + return time_ns + offset; +} + static unsigned int fbnic_desc_unused(struct fbnic_ring *ring) { return (ring->head - ring->tail - 1) & ring->size_mask; @@ -110,11 +155,32 @@ static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd) #define FBNIC_TWD_TYPE(_type) \ cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type)) +static bool fbnic_tx_tstamp(struct sk_buff *skb) +{ + struct fbnic_net *fbn; + + if (!unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + return false; + + fbn = netdev_priv(skb->dev); + if (fbn->hwtstamp_config.tx_type == HWTSTAMP_TX_OFF) + return false; + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + FBNIC_XMIT_CB(skb)->flags |= FBNIC_XMIT_CB_TS; + FBNIC_XMIT_CB(skb)->hw_head = -1; + + return true; +} + static bool fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) { unsigned int l2len, i3len; + if (fbnic_tx_tstamp(skb)) + *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_TS); + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) return false; @@ -205,6 +271,9 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) ring->tail = tail; + /* Record SW timestamp */ + skb_tx_timestamp(skb); + /* Verify there is room for another packet */ fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC); @@ -316,7 +385,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, struct fbnic_ring *ring, bool discard, unsigned int hw_head) { - u64 total_bytes = 0, total_packets = 0; + u64 total_bytes = 0, total_packets = 0, ts_lost = 0; unsigned int head = ring->head; struct netdev_queue *txq; unsigned int clean_desc; @@ -331,6 +400,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, if (desc_cnt > clean_desc) break; + if (unlikely(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)) { + FBNIC_XMIT_CB(skb)->hw_head = hw_head; + if (likely(!discard)) + break; + ts_lost++; + } + ring->tx_buf[head] = NULL; clean_desc -= desc_cnt; @@ -368,6 +444,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, if (unlikely(discard)) { u64_stats_update_begin(&ring->stats.syncp); ring->stats.dropped += total_packets; + ring->stats.ts_lost += ts_lost; u64_stats_update_end(&ring->stats.syncp); netdev_tx_completed_queue(txq, total_packets, total_bytes); @@ -384,6 +461,56 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, FBNIC_TX_DESC_WAKEUP); } +static void fbnic_clean_tsq(struct fbnic_napi_vector *nv, + struct fbnic_ring *ring, + u64 tcd, int *ts_head, int *head0) +{ + struct skb_shared_hwtstamps hwtstamp; + struct fbnic_net *fbn; + struct sk_buff *skb; + int head; + u64 ns; + + head = (*ts_head < 0) ? ring->head : *ts_head; + + do { + unsigned int desc_cnt; + + if (head == ring->tail) { + if (unlikely(net_ratelimit())) + netdev_err(nv->napi.dev, + "Tx timestamp without matching packet\n"); + return; + } + + skb = ring->tx_buf[head]; + desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; + + head += desc_cnt; + head &= ring->size_mask; + } while (!(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)); + + fbn = netdev_priv(nv->napi.dev); + ns = fbnic_ts40_to_ns(fbn, FIELD_GET(FBNIC_TCD_TYPE1_TS_MASK, tcd)); + + memset(&hwtstamp, 0, sizeof(hwtstamp)); + hwtstamp.hwtstamp = ns_to_ktime(ns); + + *ts_head = head; + + FBNIC_XMIT_CB(skb)->flags &= ~FBNIC_XMIT_CB_TS; + if (*head0 < 0) { + head = FBNIC_XMIT_CB(skb)->hw_head; + if (head >= 0) + *head0 = head; + } + + skb_tstamp_tx(skb, &hwtstamp); + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.ts_packets++; + u64_stats_update_end(&ring->stats.syncp); +} + static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx, struct page *page) { @@ -417,10 +544,12 @@ static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx, } static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget, - struct fbnic_q_triad *qt, s32 head0) + struct fbnic_q_triad *qt, s32 ts_head, s32 head0) { if (head0 >= 0) fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0); + else if (ts_head >= 0) + fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head); } static void @@ -428,9 +557,9 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt, int napi_budget) { struct fbnic_ring *cmpl = &qt->cmpl; + s32 head0 = -1, ts_head = -1; __le64 *raw_tcd, done; u32 head = cmpl->head; - s32 head0 = -1; done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE); raw_tcd = &cmpl->desc[head & cmpl->size_mask]; @@ -453,6 +582,12 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt, * they are skipped for now. */ break; + case FBNIC_TCD_TYPE_1: + if (WARN_ON_ONCE(tcd & FBNIC_TCD_TWQ1)) + break; + + fbnic_clean_tsq(nv, &qt->sub0, tcd, &ts_head, &head0); + break; default: break; } @@ -472,7 +607,7 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt, } /* Unmap and free processed buffers */ - fbnic_clean_twq(nv, napi_budget, qt, head0); + fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0); } static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget, @@ -707,6 +842,10 @@ static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv, /* Set MAC header specific fields */ skb->protocol = eth_type_trans(skb, nv->napi.dev); + /* Add timestamp if present */ + if (pkt->hwtstamp) + skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp; + return skb; } @@ -717,6 +856,23 @@ static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd) PKT_HASH_TYPE_L2; } +static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd, + struct fbnic_pkt_buff *pkt) +{ + struct fbnic_net *fbn; + u64 ns, ts; + + if (!FIELD_GET(FBNIC_RCD_OPT_META_TS, rcd)) + return; + + fbn = netdev_priv(nv->napi.dev); + ts = FIELD_GET(FBNIC_RCD_OPT_META_TS_MASK, rcd); + ns = fbnic_ts40_to_ns(fbn, ts); + + /* Add timestamp to shared info */ + pkt->hwtstamp = ns_to_ktime(ns); +} + static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv, u64 rcd, struct sk_buff *skb, struct fbnic_q_triad *qt) @@ -781,6 +937,8 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd)) break; + fbnic_rx_tstamp(nv, rcd, pkt); + /* We currently ignore the action table index */ break; case FBNIC_RCD_TYPE_META: @@ -907,6 +1065,8 @@ static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, fbn->tx_stats.bytes += stats->bytes; fbn->tx_stats.packets += stats->packets; fbn->tx_stats.dropped += stats->dropped; + fbn->tx_stats.ts_lost += stats->ts_lost; + fbn->tx_stats.ts_packets += stats->ts_packets; } static void fbnic_remove_tx_ring(struct fbnic_net *fbn, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index 2f91f68d11d5..8d626287c3f4 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -47,6 +47,7 @@ struct fbnic_net; struct fbnic_pkt_buff { struct xdp_buff buff; + ktime_t hwtstamp; u32 data_truesize; u16 data_len; u16 nr_frags; @@ -56,6 +57,8 @@ struct fbnic_queue_stats { u64 packets; u64 bytes; u64 dropped; + u64 ts_packets; + u64 ts_lost; struct u64_stats_sync syncp; }; diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index ddd87ef71caf..c7b0b09c2b09 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1247,7 +1247,7 @@ static struct platform_driver ks8842_platform_driver = { .name = DRV_NAME, }, .probe = ks8842_probe, - .remove_new = ks8842_remove, + .remove = ks8842_remove, }; module_platform_driver(ks8842_platform_driver); diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index 7fa1820db9cc..bb5138806c3f 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -216,22 +216,6 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np) } /** - * ks8851_dbg_dumpkkt - dump initial packet contents to debug - * @ks: The device state - * @rxpkt: The data for the received packet - * - * Dump the initial data from the packet to dev_dbg(). - */ -static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) -{ - netdev_dbg(ks->netdev, - "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", - rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], - rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], - rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); -} - -/** * ks8851_rx_pkts - receive packets from the host * @ks: The device information. * @rxq: Queue of packets received in this function. @@ -296,8 +280,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq) ks->rdfifo(ks, rxpkt, rxalign + 8); - if (netif_msg_pktdata(ks)) - ks8851_dbg_dumpkkt(ks, rxpkt); + netif_dbg(ks, pktdata, ks->netdev, + "pkt %12ph\n", &rxpkt[4]); skb->protocol = eth_type_trans(skb, ks->netdev); __skb_queue_tail(rxq, skb); diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c index 381b9cd285eb..78695be2570b 100644 --- a/drivers/net/ethernet/micrel/ks8851_par.c +++ b/drivers/net/ethernet/micrel/ks8851_par.c @@ -334,7 +334,7 @@ static struct platform_driver ks8851_driver = { .pm = &ks8851_pm_ops, }, .probe = ks8851_probe_par, - .remove_new = ks8851_remove_par, + .remove = ks8851_remove_par, }; module_platform_driver(ks8851_driver); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 534d4716d5f7..3234a960fcc3 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -1285,7 +1285,7 @@ static void lan966x_remove(struct platform_device *pdev) static struct platform_driver lan966x_driver = { .probe = lan966x_probe, - .remove_new = lan966x_remove, + .remove = lan966x_remove, .driver = { .name = "lan966x-switch", .of_match_table = lan966x_match, diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index 288de95add18..3435ca86dd70 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -11,7 +11,7 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \ sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o \ - sparx5_psfp.o sparx5_mirror.o + sparx5_psfp.o sparx5_mirror.o sparx5_regs.o sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c index 76a8bb596aec..b2a8d04ab509 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c @@ -15,9 +15,7 @@ #define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */ /* DSM calendar information */ -#define SPX5_DSM_CAL_LEN 64 #define SPX5_DSM_CAL_EMPTY 0xFFFF -#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13 #define SPX5_DSM_CAL_TAXIS 8 #define SPX5_DSM_CAL_BW_LOSS 553 @@ -37,19 +35,6 @@ static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, }; -struct sparx5_calendar_data { - u32 schedule[SPX5_DSM_CAL_LEN]; - u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; - u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; - u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; - u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; - u32 new_slots[SPX5_DSM_CAL_LEN]; - u32 temp_sched[SPX5_DSM_CAL_LEN]; - u32 indices[SPX5_DSM_CAL_LEN]; - u32 short_list[SPX5_DSM_CAL_LEN]; - u32 long_list[SPX5_DSM_CAL_LEN]; -}; - static u32 sparx5_target_bandwidth(struct sparx5 *sparx5) { switch (sparx5->target_ct) { @@ -131,18 +116,24 @@ static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, { struct sparx5_port *port; - if (portno >= SPX5_PORTS) { + if (portno >= sparx5->data->consts->n_ports) { /* Internal ports */ - if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) { + if (portno == + sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0) || + portno == + sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1)) { /* Equals 1.25G */ return SPX5_CAL_SPEED_2G5; - } else if (portno == SPX5_PORT_VD0) { + } else if (portno == + sparx5_get_internal_port(sparx5, SPX5_PORT_VD0)) { /* IPMC only idle BW */ return SPX5_CAL_SPEED_NONE; - } else if (portno == SPX5_PORT_VD1) { + } else if (portno == + sparx5_get_internal_port(sparx5, SPX5_PORT_VD1)) { /* OAM only idle BW */ return SPX5_CAL_SPEED_NONE; - } else if (portno == SPX5_PORT_VD2) { + } else if (portno == + sparx5_get_internal_port(sparx5, SPX5_PORT_VD2)) { /* IPinIP gets only idle BW */ return SPX5_CAL_SPEED_NONE; } @@ -159,6 +150,7 @@ static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, /* Auto configure the QSYS calendar based on port configuration */ int sparx5_config_auto_calendar(struct sparx5 *sparx5) { + const struct sparx5_consts *consts = sparx5->data->consts; u32 cal[7], value, idx, portno; u32 max_core_bw; u32 total_bw = 0, used_port_bw = 0; @@ -174,7 +166,7 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5) } /* Setup the calendar with the bandwidth to each port */ - for (portno = 0; portno < SPX5_PORTS_ALL; portno++) { + for (portno = 0; portno < consts->n_ports_all; portno++) { u64 reg, offset, this_bw; spd = sparx5_get_port_cal_speed(sparx5, portno); @@ -182,7 +174,7 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5) continue; this_bw = sparx5_cal_speed_to_value(spd); - if (portno < SPX5_PORTS) + if (portno < consts->n_ports) used_port_bw += this_bw; else /* Internal ports are granted half the value */ @@ -208,12 +200,13 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5) } /* Halt the calendar while changing it */ - spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10), - QSYS_CAL_CTRL_CAL_MODE, - sparx5, QSYS_CAL_CTRL); + if (is_sparx5(sparx5)) + spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10), + QSYS_CAL_CTRL_CAL_MODE, + sparx5, QSYS_CAL_CTRL); /* Assign port bandwidth to auto calendar */ - for (idx = 0; idx < ARRAY_SIZE(cal); idx++) + for (idx = 0; idx < consts->n_auto_cals; idx++) spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx)); /* Increase grant rate of all ports to account for @@ -278,8 +271,8 @@ static u32 sparx5_dsm_cp_cal(u32 *sched) return SPX5_DSM_CAL_EMPTY; } -static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, - struct sparx5_calendar_data *data) +int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, + struct sparx5_calendar_data *data) { bool slow_mode; u32 gcd, idx, sum, min, factor; @@ -304,7 +297,7 @@ static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) { u32 portno = data->taxi_ports[idx]; - if (portno < SPX5_TAXI_PORT_MAX) { + if (portno < sparx5->data->consts->n_ports_all) { data->taxi_speeds[idx] = sparx5_cal_speed_to_value (sparx5_get_port_cal_speed(sparx5, portno)); } else { @@ -565,6 +558,7 @@ update_err: /* Configure the DSM calendar based on port configuration */ int sparx5_config_dsm_calendar(struct sparx5 *sparx5) { + const struct sparx5_ops *ops = sparx5->data->ops; int taxi; struct sparx5_calendar_data *data; int err = 0; @@ -573,8 +567,8 @@ int sparx5_config_dsm_calendar(struct sparx5 *sparx5) if (!data) return -ENOMEM; - for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) { - err = sparx5_dsm_calendar_calc(sparx5, taxi, data); + for (taxi = 0; taxi < sparx5->data->consts->n_dsm_cal_taxis; ++taxi) { + err = ops->dsm_calendar_calc(sparx5, taxi, data); if (err) { dev_err(sparx5->dev, "DSM calendar calculation failed\n"); goto cal_out; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c index 2d763664dcda..10224ad63a78 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c @@ -234,10 +234,11 @@ static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev, struct dcb_app *)) { struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; struct sparx5_port *port_itr; int err, i; - for (i = 0; i < SPX5_PORTS; i++) { + for (i = 0; i < sparx5->data->consts->n_ports; i++) { port_itr = port->sparx5->ports[i]; if (!port_itr) continue; @@ -386,7 +387,7 @@ int sparx5_dcb_init(struct sparx5 *sparx5) struct sparx5_port *port; int i; - for (i = 0; i < SPX5_PORTS; i++) { + for (i = 0; i < sparx5->data->consts->n_ports; i++) { port = sparx5->ports[i]; if (!port) continue; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index d898a7238b48..832f4ae57c83 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -505,8 +505,8 @@ static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32 static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno) { u64 *portstats = &sparx5->stats[portno * sparx5->num_stats]; - u32 tinst = sparx5_port_dev_index(portno); - u32 dev = sparx5_to_high_dev(portno); + u32 tinst = sparx5_port_dev_index(sparx5, portno); + u32 dev = sparx5_to_high_dev(sparx5, portno); void __iomem *inst; inst = spx5_inst_get(sparx5, dev, tinst); @@ -819,8 +819,8 @@ static void sparx5_get_eth_phy_stats(struct net_device *ndev, portstats = &sparx5->stats[portno * sparx5->num_stats]; if (sparx5_is_baser(port->conf.portmode)) { - u32 tinst = sparx5_port_dev_index(portno); - u32 dev = sparx5_to_high_dev(portno); + u32 tinst = sparx5_port_dev_index(sparx5, portno); + u32 dev = sparx5_to_high_dev(sparx5, portno); inst = spx5_inst_get(sparx5, dev, tinst); sparx5_get_dev_phy_stats(portstats, inst, tinst); @@ -844,8 +844,8 @@ static void sparx5_get_eth_mac_stats(struct net_device *ndev, portstats = &sparx5->stats[portno * sparx5->num_stats]; if (sparx5_is_baser(port->conf.portmode)) { - u32 tinst = sparx5_port_dev_index(portno); - u32 dev = sparx5_to_high_dev(portno); + u32 tinst = sparx5_port_dev_index(sparx5, portno); + u32 dev = sparx5_to_high_dev(sparx5, portno); inst = spx5_inst_get(sparx5, dev, tinst); sparx5_get_dev_mac_stats(portstats, inst, tinst); @@ -912,8 +912,8 @@ static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev, portstats = &sparx5->stats[portno * sparx5->num_stats]; if (sparx5_is_baser(port->conf.portmode)) { - u32 tinst = sparx5_port_dev_index(portno); - u32 dev = sparx5_to_high_dev(portno); + u32 tinst = sparx5_port_dev_index(sparx5, portno); + u32 dev = sparx5_to_high_dev(sparx5, portno); inst = spx5_inst_get(sparx5, dev, tinst); sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst); @@ -944,8 +944,8 @@ static void sparx5_get_eth_rmon_stats(struct net_device *ndev, portstats = &sparx5->stats[portno * sparx5->num_stats]; if (sparx5_is_baser(port->conf.portmode)) { - u32 tinst = sparx5_port_dev_index(portno); - u32 dev = sparx5_to_high_dev(portno); + u32 tinst = sparx5_port_dev_index(sparx5, portno); + u32 dev = sparx5_to_high_dev(sparx5, portno); inst = spx5_inst_get(sparx5, dev, tinst); sparx5_get_dev_rmon_stats(portstats, inst, tinst); @@ -1027,8 +1027,8 @@ static void sparx5_get_sset_data(struct net_device *ndev, portstats = &sparx5->stats[portno * sparx5->num_stats]; if (sparx5_is_baser(port->conf.portmode)) { - u32 tinst = sparx5_port_dev_index(portno); - u32 dev = sparx5_to_high_dev(portno); + u32 tinst = sparx5_port_dev_index(sparx5, portno); + u32 dev = sparx5_to_high_dev(sparx5, portno); inst = spx5_inst_get(sparx5, dev, tinst); sparx5_get_dev_misc_stats(portstats, inst, tinst); @@ -1122,7 +1122,7 @@ static void sparx5_update_stats(struct sparx5 *sparx5) { int idx; - for (idx = 0; idx < SPX5_PORTS; idx++) + for (idx = 0; idx < sparx5->data->consts->n_ports; idx++) if (sparx5->ports[idx]) sparx5_update_port_stats(sparx5, idx); } @@ -1189,7 +1189,7 @@ static int sparx5_get_ts_info(struct net_device *dev, struct sparx5 *sparx5 = port->sparx5; struct sparx5_phc *phc; - if (!sparx5->ptp) + if (!sparx5->ptp && is_sparx5(sparx5)) return ethtool_op_get_ts_info(dev, info); phc = &sparx5->phc[SPARX5_PHC_PORT]; @@ -1228,6 +1228,7 @@ const struct ethtool_ops sparx5_ethtool_ops = { int sparx_stats_init(struct sparx5 *sparx5) { + const struct sparx5_consts *consts = sparx5->data->consts; char queue_name[32]; int portno; @@ -1235,14 +1236,15 @@ int sparx_stats_init(struct sparx5 *sparx5) sparx5->num_stats = spx5_stats_count; sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout); sparx5->stats = devm_kcalloc(sparx5->dev, - SPX5_PORTS_ALL * sparx5->num_stats, + consts->n_ports_all * + sparx5->num_stats, sizeof(u64), GFP_KERNEL); if (!sparx5->stats) return -ENOMEM; mutex_init(&sparx5->queue_stats_lock); sparx5_config_stats(sparx5); - for (portno = 0; portno < SPX5_PORTS; portno++) + for (portno = 0; portno < consts->n_ports; portno++) if (sparx5->ports[portno]) sparx5_config_port_stats(sparx5, portno); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 61df874b7623..88f7509f0980 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -156,7 +156,9 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx /* Now do the normal processing of the skb */ sparx5_ifh_parse((u32 *)skb->data, &fi); /* Map to port netdev */ - port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL; + port = fi.src_port < sparx5->data->consts->n_ports ? + sparx5->ports[fi.src_port] : + NULL; if (!port || !port->ndev) { dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); sparx5_xtr_flush(sparx5, XTR_QUEUE); @@ -296,7 +298,7 @@ static void sparx5_fdma_rx_init(struct sparx5 *sparx5, fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb; fdma->ops.nextptr_cb = &fdma_nextptr_cb; /* Fetch a netdev for SKB and NAPI use, any will do */ - for (idx = 0; idx < SPX5_PORTS; ++idx) { + for (idx = 0; idx < sparx5->data->consts->n_ports; ++idx) { struct sparx5_port *port = sparx5->ports[idx]; if (port && port->ndev) { @@ -362,7 +364,9 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); /* CPU ports capture setup */ - for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0); + portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1); + portno++) { /* ASM CPU port: No preamble, IFH, enable padding */ spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c index 75868b3f548e..f5584244612c 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -80,15 +80,16 @@ static void sparx5_mact_select(struct sparx5 *sparx5, int sparx5_mact_learn(struct sparx5 *sparx5, int pgid, const unsigned char mac[ETH_ALEN], u16 vid) { + const struct sparx5_consts *consts = sparx5->data->consts; int addr, type, ret; - if (pgid < SPX5_PORTS) { + if (pgid < consts->n_ports) { type = MAC_ENTRY_ADDR_TYPE_UPSID_PN; addr = pgid % 32; addr += (pgid / 32) << 5; /* Add upsid */ } else { type = MAC_ENTRY_ADDR_TYPE_MC_IDX; - addr = pgid - SPX5_PORTS; + addr = pgid - consts->n_ports; } mutex_lock(&sparx5->lock); @@ -128,7 +129,8 @@ int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr) struct sparx5_port *port = netdev_priv(dev); struct sparx5 *sparx5 = port->sparx5; - return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid); + return sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU), + addr, port->pvid); } static int sparx5_mact_get(struct sparx5 *sparx5, @@ -371,7 +373,7 @@ static void sparx5_mact_handle_entry(struct sparx5 *sparx5, return; port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2); - if (port >= SPX5_PORTS) + if (port >= sparx5->data->consts->n_ports) return; if (!test_bit(port, sparx5->bridge_mask)) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index b64c814eac11..d1e9bc030c80 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -29,8 +29,8 @@ #include "sparx5_port.h" #include "sparx5_qos.h" -#define QLIM_WM(fraction) \ - ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100) +const struct sparx5_regs *regs; + #define IO_RANGES 3 struct initial_port_config { @@ -45,12 +45,6 @@ struct sparx5_ram_config { u32 init_val; }; -struct sparx5_main_io_resource { - enum sparx5_target id; - phys_addr_t offset; - int range; -}; - static const struct sparx5_main_io_resource sparx5_main_iomap[] = { { TARGET_CPU, 0, 0 }, /* 0x600000000 */ { TARGET_FDMA, 0x80000, 0 }, /* 0x600080000 */ @@ -214,23 +208,45 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = { { TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */ }; +bool is_sparx5(struct sparx5 *sparx5) +{ + switch (sparx5->target_ct) { + case SPX5_TARGET_CT_7546: + case SPX5_TARGET_CT_7549: + case SPX5_TARGET_CT_7552: + case SPX5_TARGET_CT_7556: + case SPX5_TARGET_CT_7558: + case SPX5_TARGET_CT_7546TSN: + case SPX5_TARGET_CT_7549TSN: + case SPX5_TARGET_CT_7552TSN: + case SPX5_TARGET_CT_7556TSN: + case SPX5_TARGET_CT_7558TSN: + return true; + default: + return false; + } +} + static int sparx5_create_targets(struct sparx5 *sparx5) { + const struct sparx5_main_io_resource *iomap = sparx5->data->iomap; + int iomap_size = sparx5->data->iomap_size; + int ioranges = sparx5->data->ioranges; struct resource *iores[IO_RANGES]; void __iomem *iomem[IO_RANGES]; void __iomem *begin[IO_RANGES]; int range_id[IO_RANGES]; int idx, jdx; - for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) { - const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx]; + for (idx = 0, jdx = 0; jdx < iomap_size; jdx++) { + const struct sparx5_main_io_resource *io = &iomap[jdx]; - if (idx == iomap->range) { + if (idx == io->range) { range_id[idx] = jdx; idx++; } } - for (idx = 0; idx < IO_RANGES; idx++) { + for (idx = 0; idx < ioranges; idx++) { iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM, idx); if (!iores[idx]) { @@ -245,12 +261,12 @@ static int sparx5_create_targets(struct sparx5 *sparx5) iores[idx]->name); return -ENOMEM; } - begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset; + begin[idx] = iomem[idx] - iomap[range_id[idx]].offset; } - for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) { - const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx]; + for (jdx = 0; jdx < iomap_size; jdx++) { + const struct sparx5_main_io_resource *io = &iomap[jdx]; - sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset; + sparx5->regs[io->id] = begin[io->range] + io->offset; } return 0; } @@ -465,44 +481,45 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5) return -ENODEV; } - switch (freq) { - case SPX5_CORE_CLOCK_250MHZ: - clk_div = 10; - pol_upd_int = 312; - break; - case SPX5_CORE_CLOCK_500MHZ: - clk_div = 5; - pol_upd_int = 624; - break; - case SPX5_CORE_CLOCK_625MHZ: - clk_div = 4; - pol_upd_int = 780; - break; - default: - dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n", - sparx5->coreclock, sparx5->target_ct); - return -EINVAL; + if (is_sparx5(sparx5)) { + switch (freq) { + case SPX5_CORE_CLOCK_250MHZ: + clk_div = 10; + pol_upd_int = 312; + break; + case SPX5_CORE_CLOCK_500MHZ: + clk_div = 5; + pol_upd_int = 624; + break; + case SPX5_CORE_CLOCK_625MHZ: + clk_div = 4; + pol_upd_int = 780; + break; + default: + dev_err(sparx5->dev, + "%d coreclock not supported on (%#04x)\n", + sparx5->coreclock, sparx5->target_ct); + return -EINVAL; + } + + /* Configure the LCPLL */ + spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1), + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, + sparx5, CLKGEN_LCPLL1_CORE_CLK_CFG); } /* Update state with chosen frequency */ sparx5->coreclock = freq; - - /* Configure the LCPLL */ - spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1), - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA | - CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, - sparx5, - CLKGEN_LCPLL1_CORE_CLK_CFG); - clk_period = sparx5_clk_period(freq); spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100), @@ -525,7 +542,7 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5) sparx5, LRN_AUTOAGE_CFG_1); - for (idx = 0; idx < 3; idx++) + for (idx = 0; idx < sparx5->data->consts->n_sio_clks; idx++) spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100), GCB_SIO_CLOCK_SYS_CLK_PERIOD, sparx5, @@ -545,25 +562,36 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5) return 0; } +static u32 qlim_wm(struct sparx5 *sparx5, int fraction) +{ + return (sparx5->data->consts->buf_size / SPX5_BUFFER_CELL_SZ - 100) * + fraction / 100; +} + static int sparx5_qlim_set(struct sparx5 *sparx5) { + const struct sparx5_consts *consts = sparx5->data->consts; u32 res, dp, prio; for (res = 0; res < 2; res++) { for (prio = 0; prio < 8; prio++) spx5_wr(0xFFF, sparx5, - QRES_RES_CFG(prio + 630 + res * 1024)); + QRES_RES_CFG(prio + + consts->qres_max_prio_idx + + res * 1024)); for (dp = 0; dp < 4; dp++) spx5_wr(0xFFF, sparx5, - QRES_RES_CFG(dp + 638 + res * 1024)); + QRES_RES_CFG(dp + + consts->qres_max_colour_idx + + res * 1024)); } /* Set 80,90,95,100% of memory size for top watermarks */ - spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0)); - spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0)); - spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0)); - spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0)); + spx5_wr(qlim_wm(sparx5, 80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0)); + spx5_wr(qlim_wm(sparx5, 90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0)); + spx5_wr(qlim_wm(sparx5, 95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0)); + spx5_wr(qlim_wm(sparx5, 100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0)); return 0; } @@ -585,7 +613,7 @@ static void sparx5_board_init(struct sparx5 *sparx5) GCB_HW_SGPIO_SD_CFG); /* Refer to LOS SGPIO */ - for (idx = 0; idx < SPX5_PORTS; idx++) + for (idx = 0; idx < sparx5->data->consts->n_ports; idx++) if (sparx5->ports[idx]) if (sparx5->ports[idx]->conf.sd_sgpio != ~0) spx5_wr(sparx5->ports[idx]->conf.sd_sgpio, @@ -596,12 +624,14 @@ static void sparx5_board_init(struct sparx5 *sparx5) static int sparx5_start(struct sparx5 *sparx5) { u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + const struct sparx5_consts *consts = sparx5->data->consts; + const struct sparx5_ops *ops = sparx5->data->ops; char queue_name[32]; u32 idx; int err; /* Setup own UPSIDs */ - for (idx = 0; idx < 3; idx++) { + for (idx = 0; idx < consts->n_own_upsids; idx++) { spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx)); spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx)); spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx)); @@ -609,7 +639,7 @@ static int sparx5_start(struct sparx5 *sparx5) } /* Enable CPU ports */ - for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++) + for (idx = consts->n_ports; idx < consts->n_ports_all; idx++) spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1), QFWD_SWITCH_PORT_MODE_PORT_ENA, sparx5, @@ -619,13 +649,14 @@ static int sparx5_start(struct sparx5 *sparx5) sparx5_update_fwd(sparx5); /* CPU copy CPU pgids */ - spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), - sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU)); - spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), - sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST)); + spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5, + ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_CPU))); + spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5, + ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_BCAST))); /* Recalc injected frame FCS */ - for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++) + for (idx = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0); + idx <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1); idx++) spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1), ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, sparx5, ANA_CL_FILTER_CTRL(idx)); @@ -640,7 +671,8 @@ static int sparx5_start(struct sparx5 *sparx5) sparx5_vlan_init(sparx5); /* Add host mode BC address (points only to CPU) */ - sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID); + sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU), broadcast, + NULL_VID); /* Enable queue limitation watermarks */ sparx5_qlim_set(sparx5); @@ -720,7 +752,7 @@ static int sparx5_start(struct sparx5 *sparx5) if (sparx5->ptp_irq >= 0) { err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq, - NULL, sparx5_ptp_irq_handler, + NULL, ops->ptp_irq_handler, IRQF_ONESHOT, "sparx5-ptp", sparx5); if (err) @@ -759,6 +791,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev) sparx5->dev = &pdev->dev; spin_lock_init(&sparx5->tx_lock); + sparx5->data = device_get_match_data(sparx5->dev); + if (!sparx5->data) + return -EINVAL; + + regs = sparx5->data->regs; + /* Do switch core reset if available */ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); if (IS_ERR(reset)) @@ -937,15 +975,69 @@ static void mchp_sparx5_remove(struct platform_device *pdev) destroy_workqueue(sparx5->mact_queue); } +static const struct sparx5_regs sparx5_regs = { + .tsize = sparx5_tsize, + .gaddr = sparx5_gaddr, + .gcnt = sparx5_gcnt, + .gsize = sparx5_gsize, + .raddr = sparx5_raddr, + .rcnt = sparx5_rcnt, + .fpos = sparx5_fpos, + .fsize = sparx5_fsize, +}; + +static const struct sparx5_consts sparx5_consts = { + .n_ports = 65, + .n_ports_all = 70, + .n_hsch_l1_elems = 64, + .n_hsch_queues = 8, + .n_lb_groups = 10, + .n_pgids = 2113, /* (2048 + n_ports) */ + .n_sio_clks = 3, + .n_own_upsids = 3, + .n_auto_cals = 7, + .n_filters = 1024, + .n_gates = 1024, + .n_sdlbs = 4096, + .n_dsm_cal_taxis = 8, + .buf_size = 4194280, + .qres_max_prio_idx = 630, + .qres_max_colour_idx = 638, + .tod_pin = 4, +}; + +static const struct sparx5_ops sparx5_ops = { + .is_port_2g5 = &sparx5_port_is_2g5, + .is_port_5g = &sparx5_port_is_5g, + .is_port_10g = &sparx5_port_is_10g, + .is_port_25g = &sparx5_port_is_25g, + .get_port_dev_index = &sparx5_port_dev_mapping, + .get_port_dev_bit = &sparx5_port_dev_mapping, + .get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate, + .get_sdlb_group = &sparx5_get_sdlb_group, + .set_port_mux = &sparx5_port_mux_set, + .ptp_irq_handler = &sparx5_ptp_irq_handler, + .dsm_calendar_calc = &sparx5_dsm_calendar_calc, +}; + +static const struct sparx5_match_data sparx5_desc = { + .iomap = sparx5_main_iomap, + .iomap_size = ARRAY_SIZE(sparx5_main_iomap), + .ioranges = 3, + .regs = &sparx5_regs, + .consts = &sparx5_consts, + .ops = &sparx5_ops, +}; + static const struct of_device_id mchp_sparx5_match[] = { - { .compatible = "microchip,sparx5-switch" }, + { .compatible = "microchip,sparx5-switch", .data = &sparx5_desc }, { } }; MODULE_DEVICE_TABLE(of, mchp_sparx5_match); static struct platform_driver mchp_sparx5_driver = { .probe = mchp_sparx5_probe, - .remove_new = mchp_sparx5_remove, + .remove = mchp_sparx5_remove, .driver = { .name = "sparx5-switch", .of_match_table = mchp_sparx5_match, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 3309060b1e4c..364ae92969bc 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -52,24 +52,23 @@ enum sparx5_vlan_port_type { }; #define SPX5_PORTS 65 -#define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */ -#define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */ -#define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */ -#define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */ -#define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */ -#define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/ -#define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */ - -#define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */ -#define PGID_UC_FLOOD (PGID_BASE + 0) -#define PGID_MC_FLOOD (PGID_BASE + 1) -#define PGID_IPV4_MC_DATA (PGID_BASE + 2) -#define PGID_IPV4_MC_CTRL (PGID_BASE + 3) -#define PGID_IPV6_MC_DATA (PGID_BASE + 4) -#define PGID_IPV6_MC_CTRL (PGID_BASE + 5) -#define PGID_BCAST (PGID_BASE + 6) -#define PGID_CPU (PGID_BASE + 7) -#define PGID_MCAST_START (PGID_BASE + 8) +#define SPX5_PORTS_ALL 70 /* Total number of ports */ + +#define SPX5_PORT_CPU_0 0 /* CPU Port 0 */ +#define SPX5_PORT_CPU_1 1 /* CPU Port 1 */ +#define SPX5_PORT_VD0 2 /* VD0/Port used for IPMC */ +#define SPX5_PORT_VD1 3 /* VD1/Port used for AFI/OAM */ +#define SPX5_PORT_VD2 4 /* VD2/Port used for IPinIP*/ + +#define PGID_UC_FLOOD 0 +#define PGID_MC_FLOOD 1 +#define PGID_IPV4_MC_DATA 2 +#define PGID_IPV4_MC_CTRL 3 +#define PGID_IPV6_MC_DATA 4 +#define PGID_IPV6_MC_CTRL 5 +#define PGID_BCAST 6 +#define PGID_CPU 7 +#define PGID_MCAST_START 8 #define PGID_TABLE_SIZE 3290 @@ -100,8 +99,24 @@ enum sparx5_vlan_port_type { #define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6 #define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7 +#define SPX5_DSM_CAL_LEN 64 +#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13 + struct sparx5; +struct sparx5_calendar_data { + u32 schedule[SPX5_DSM_CAL_LEN]; + u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 new_slots[SPX5_DSM_CAL_LEN]; + u32 temp_sched[SPX5_DSM_CAL_LEN]; + u32 indices[SPX5_DSM_CAL_LEN]; + u32 short_list[SPX5_DSM_CAL_LEN]; + u32 long_list[SPX5_DSM_CAL_LEN]; +}; + /* Frame DMA receive state: * For each DB, there is a SKB, and the skb data pointer is mapped in * the DB. Once a frame is received the skb is given to the upper layers @@ -226,6 +241,69 @@ struct sparx5_mall_entry { #define SPARX5_SKB_CB(skb) \ ((struct sparx5_skb_cb *)((skb)->cb)) +struct sparx5_regs { + const unsigned int *tsize; + const unsigned int *gaddr; + const unsigned int *gcnt; + const unsigned int *gsize; + const unsigned int *raddr; + const unsigned int *rcnt; + const unsigned int *fpos; + const unsigned int *fsize; +}; + +struct sparx5_consts { + u32 n_ports; /* Number of front ports */ + u32 n_ports_all; /* Number of front ports + internal ports */ + u32 n_hsch_l1_elems; /* Number of HSCH layer 1 elements */ + u32 n_hsch_queues; /* Number of HSCH queues */ + u32 n_lb_groups; /* Number of leacky bucket groupd */ + u32 n_pgids; /* Number of PGID's */ + u32 n_sio_clks; /* Number of serial IO clocks */ + u32 n_own_upsids; /* Number of own UPSID's */ + u32 n_auto_cals; /* Number of auto calendars */ + u32 n_filters; /* Number of PSFP filters */ + u32 n_gates; /* Number of PSFP gates */ + u32 n_sdlbs; /* Number of service dual leaky buckets */ + u32 n_dsm_cal_taxis; /* Number of DSM calendar taxis */ + u32 buf_size; /* Amount of QLIM watermark memory */ + u32 qres_max_prio_idx; /* Maximum QRES prio index */ + u32 qres_max_colour_idx; /* Maximum QRES colour index */ + u32 tod_pin; /* PTP TOD pin */ +}; + +struct sparx5_ops { + bool (*is_port_2g5)(int portno); + bool (*is_port_5g)(int portno); + bool (*is_port_10g)(int portno); + bool (*is_port_25g)(int portno); + u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port); + u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port); + u32 (*get_hsch_max_group_rate)(int grp); + struct sparx5_sdlb_group *(*get_sdlb_group)(int idx); + int (*set_port_mux)(struct sparx5 *sparx5, struct sparx5_port *port, + struct sparx5_port_config *conf); + + irqreturn_t (*ptp_irq_handler)(int irq, void *args); + int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi, + struct sparx5_calendar_data *data); +}; + +struct sparx5_main_io_resource { + enum sparx5_target id; + phys_addr_t offset; + int range; +}; + +struct sparx5_match_data { + const struct sparx5_regs *regs; + const struct sparx5_consts *consts; + const struct sparx5_ops *ops; + const struct sparx5_main_io_resource *iomap; + int ioranges; + int iomap_size; +}; + struct sparx5 { struct platform_device *pdev; struct device *dev; @@ -293,8 +371,12 @@ struct sparx5 { struct list_head mall_entries; /* Common root for debugfs */ struct dentry *debugfs_root; + const struct sparx5_match_data *data; }; +/* sparx5_main.c */ +bool is_sparx5(struct sparx5 *sparx5); + /* sparx5_switchdev.c */ int sparx5_register_notifier_blocks(struct sparx5 *sparx5); void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); @@ -355,6 +437,9 @@ void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port); /* sparx5_calendar.c */ int sparx5_config_auto_calendar(struct sparx5 *sparx5); int sparx5_config_dsm_calendar(struct sparx5 *sparx5); +int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, + struct sparx5_calendar_data *data); + /* sparx5_ethtool.c */ void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); @@ -375,7 +460,7 @@ void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp); void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op); void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type); void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset); -void sparx5_set_port_ifh(void *ifh_hdr, u16 portno); +void sparx5_set_port_ifh(struct sparx5 *sparx5, void *ifh_hdr, u16 portno); bool sparx5_netdevice_check(const struct net_device *dev); struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno); int sparx5_register_netdevs(struct sparx5 *sparx5); @@ -413,6 +498,7 @@ enum sparx5_pgid_type { void sparx5_pgid_init(struct sparx5 *spx5); int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx); int sparx5_pgid_free(struct sparx5 *spx5, u16 idx); +int sparx5_get_pgid(struct sparx5 *sparx5, int pgid); /* sparx5_pool.c */ struct sparx5_pool_entry { @@ -426,6 +512,11 @@ int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id); int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx, u32 *id); +/* sparx5_port.c */ +int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port, + struct sparx5_port_config *conf); +int sparx5_get_internal_port(struct sparx5 *sparx5, int port); + /* sparx5_sdlb.c */ #define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF #define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1) @@ -444,6 +535,7 @@ struct sparx5_sdlb_group { }; extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT]; +struct sparx5_sdlb_group *sparx5_get_sdlb_group(int idx); int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval, u64 rate); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h index 22acc1f3380c..0e8b18bcf179 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0+ * Microchip Sparx5 Switch driver * - * Copyright (c) 2021 Microchip Technology Inc. + * Copyright (c) 2024 Microchip Technology Inc. */ -/* This file is autogenerated by cml-utils 2023-02-10 11:18:53 +0100. - * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada +/* This file is autogenerated by cml-utils 2024-10-04 10:40:40 +0200. + * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b */ #ifndef _SPARX5_MAIN_REGS_H_ @@ -15,6 +15,8 @@ #include <linux/types.h> #include <linux/bug.h> +#include "sparx5_regs.h" + enum sparx5_target { TARGET_ANA_AC = 1, TARGET_ANA_ACL = 2, @@ -52,14 +54,27 @@ enum sparx5_target { TARGET_VCAP_SUPER = 326, TARGET_VOP = 327, TARGET_XQS = 331, - NUM_TARGETS = 332 + NUM_TARGETS = 517 }; +/* sparx5_main.c + * + * This is used by the register macros to access chip differences (if any) in: + * target size, register address, register count, group address, group count, + * group size, field position and field size. + */ +extern const struct sparx5_regs *regs; + +/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */ +#define spx5_field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) +#define spx5_field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask)) + #define __REG(...) __VA_ARGS__ -/* ANA_AC:RAM_CTRL:RAM_INIT */ -#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC,\ - 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4) +/* ANA_AC:RAM_CTRL:RAM_INIT */ +#define ANA_AC_RAM_INIT \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_RAM_CTRL], 0, 1, 4, 0,\ + 0, 1, 4) #define ANA_AC_RAM_INIT_RAM_INIT BIT(1) #define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\ @@ -73,9 +88,10 @@ enum sparx5_target { #define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x) -/* ANA_AC:PS_COMMON:OWN_UPSID */ -#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC,\ - 0, 1, 894472, 0, 1, 352, 52, r, 3, 4) +/* ANA_AC:PS_COMMON:OWN_UPSID */ +#define ANA_AC_OWN_UPSID(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PS_COMMON], 0, 1, 352,\ + 52, r, regs->rcnt[RC_ANA_AC_OWN_UPSID], 4) #define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0) #define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\ @@ -83,75 +99,86 @@ enum sparx5_target { #define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\ FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x) -/* ANA_AC:MIRROR_PROBE:PROBE_CFG */ -#define ANA_AC_PROBE_CFG(g) \ - __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 0, 0, 1, 4) +/* ANA_AC:MIRROR_PROBE:PROBE_CFG */ +#define ANA_AC_PROBE_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \ + 32, 0, 0, 1, 4) -#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27) +#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27) #define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_SET(x)\ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x) #define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_GET(x)\ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x) -#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19) +#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19) #define ANA_AC_PROBE_CFG_PROBE_CPU_SET_SET(x)\ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x) #define ANA_AC_PROBE_CFG_PROBE_CPU_SET_GET(x)\ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x) -#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6) +#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6) #define ANA_AC_PROBE_CFG_PROBE_VID_SET(x)\ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VID, x) #define ANA_AC_PROBE_CFG_PROBE_VID_GET(x)\ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VID, x) -#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4) +#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4) #define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_SET(x)\ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x) #define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_GET(x)\ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x) -#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2) +#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2) #define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_SET(x)\ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x) #define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_GET(x)\ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x) -#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0) +#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0) #define ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(x)\ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x) #define ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(x)\ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x) -/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */ -#define ANA_AC_PROBE_PORT_CFG(g) \ - __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 8, 0, 1, 4) +/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */ +#define ANA_AC_PROBE_PORT_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \ + 32, 8, 0, 1, 4) -/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */ -#define ANA_AC_PROBE_PORT_CFG1(g) \ - __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 12, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */ +#define ANA_AC_PROBE_PORT_CFG1(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \ + 32, 12, 0, 1, 4) -/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */ -#define ANA_AC_PROBE_PORT_CFG2(g) \ - __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 16, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */ +#define ANA_AC_PROBE_PORT_CFG2(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \ + 32, 16, 0, 1, 4) -#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0) +#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0) #define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_SET(x)\ FIELD_PREP(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x) #define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_GET(x)\ FIELD_GET(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x) -/* ANA_AC:SRC:SRC_CFG */ -#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\ - 0, 1, 849920, g, 102, 16, 0, 0, 1, 4) +/* ANA_AC:SRC:SRC_CFG */ +#define ANA_AC_SRC_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \ + regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 0, 0, 1, 4) -/* ANA_AC:SRC:SRC_CFG1 */ -#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC,\ - 0, 1, 849920, g, 102, 16, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_AC:SRC:SRC_CFG1 */ +#define ANA_AC_SRC_CFG1(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \ + regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 4, 0, 1, 4) -/* ANA_AC:SRC:SRC_CFG2 */ -#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC,\ - 0, 1, 849920, g, 102, 16, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_AC:SRC:SRC_CFG2 */ +#define ANA_AC_SRC_CFG2(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \ + regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 8, 0, 1, 4) #define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0) #define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\ @@ -159,17 +186,22 @@ enum sparx5_target { #define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\ FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x) -/* ANA_AC:PGID:PGID_CFG */ -#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC,\ - 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4) +/* ANA_AC:PGID:PGID_CFG */ +#define ANA_AC_PGID_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \ + regs->gcnt[GC_ANA_AC_PGID], 16, 0, 0, 1, 4) -/* ANA_AC:PGID:PGID_CFG1 */ -#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC,\ - 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_AC:PGID:PGID_CFG1 */ +#define ANA_AC_PGID_CFG1(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \ + regs->gcnt[GC_ANA_AC_PGID], 16, 4, 0, 1, 4) -/* ANA_AC:PGID:PGID_CFG2 */ -#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC,\ - 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_AC:PGID:PGID_CFG2 */ +#define ANA_AC_PGID_CFG2(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \ + regs->gcnt[GC_ANA_AC_PGID], 16, 8, 0, 1, 4) #define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0) #define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\ @@ -177,9 +209,10 @@ enum sparx5_target { #define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\ FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x) -/* ANA_AC:PGID:PGID_MISC_CFG */ -#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC,\ - 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4) +/* ANA_AC:PGID:PGID_MISC_CFG */ +#define ANA_AC_PGID_MISC_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \ + regs->gcnt[GC_ANA_AC_PGID], 16, 12, 0, 1, 4) #define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4) #define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\ @@ -199,9 +232,10 @@ enum sparx5_target { #define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\ FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x) -/* ANA_AC:TSN_SF:TSN_SF */ -#define ANA_AC_TSN_SF __REG(TARGET_ANA_AC,\ - 0, 1, 839136, 0, 1, 4, 0, 0, 1, 4) +/* ANA_AC:TSN_SF:TSN_SF */ +#define ANA_AC_TSN_SF \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF], 0, 1, 4, 0, \ + 0, 1, 4) #define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY BIT(9) #define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_SET(x)\ @@ -209,21 +243,24 @@ enum sparx5_target { #define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_GET(x)\ FIELD_GET(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x) -#define ANA_AC_TSN_SF_PORT_NUM GENMASK(8, 0) +#define ANA_AC_TSN_SF_PORT_NUM\ + GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_PORT_NUM] + 0 - 1, 0) #define ANA_AC_TSN_SF_PORT_NUM_SET(x)\ - FIELD_PREP(ANA_AC_TSN_SF_PORT_NUM, x) + spx5_field_prep(ANA_AC_TSN_SF_PORT_NUM, x) #define ANA_AC_TSN_SF_PORT_NUM_GET(x)\ - FIELD_GET(ANA_AC_TSN_SF_PORT_NUM, x) + spx5_field_get(ANA_AC_TSN_SF_PORT_NUM, x) -/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */ -#define ANA_AC_TSN_SF_CFG(g) __REG(TARGET_ANA_AC,\ - 0, 1, 839680, g, 1024, 4, 0, 0, 1, 4) +/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */ +#define ANA_AC_TSN_SF_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF_CFG], g, \ + regs->gcnt[GC_ANA_AC_TSN_SF_CFG], 4, 0, 0, 1, 4) -#define ANA_AC_TSN_SF_CFG_TSN_SGID GENMASK(25, 16) +#define ANA_AC_TSN_SF_CFG_TSN_SGID\ + GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_CFG_TSN_SGID] + 16 - 1, 16) #define ANA_AC_TSN_SF_CFG_TSN_SGID_SET(x)\ - FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_SGID, x) + spx5_field_prep(ANA_AC_TSN_SF_CFG_TSN_SGID, x) #define ANA_AC_TSN_SF_CFG_TSN_SGID_GET(x)\ - FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_SGID, x) + spx5_field_get(ANA_AC_TSN_SF_CFG_TSN_SGID, x) #define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU GENMASK(15, 2) #define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(x)\ @@ -243,9 +280,10 @@ enum sparx5_target { #define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_GET(x)\ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x) -/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */ -#define ANA_AC_TSN_SF_STATUS __REG(TARGET_ANA_AC,\ - 0, 1, 839072, 0, 1, 16, 0, 0, 1, 4) +/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */ +#define ANA_AC_TSN_SF_STATUS \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF_STATUS], 0, 1, \ + 16, 0, 0, 1, 4) #define ANA_AC_TSN_SF_STATUS_FRM_LEN GENMASK(25, 12) #define ANA_AC_TSN_SF_STATUS_FRM_LEN_SET(x)\ @@ -259,11 +297,12 @@ enum sparx5_target { #define ANA_AC_TSN_SF_STATUS_DLB_DROP_GET(x)\ FIELD_GET(ANA_AC_TSN_SF_STATUS_DLB_DROP, x) -#define ANA_AC_TSN_SF_STATUS_TSN_SFID GENMASK(10, 1) +#define ANA_AC_TSN_SF_STATUS_TSN_SFID\ + GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] + 1 - 1, 1) #define ANA_AC_TSN_SF_STATUS_TSN_SFID_SET(x)\ - FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSN_SFID, x) + spx5_field_prep(ANA_AC_TSN_SF_STATUS_TSN_SFID, x) #define ANA_AC_TSN_SF_STATUS_TSN_SFID_GET(x)\ - FIELD_GET(ANA_AC_TSN_SF_STATUS_TSN_SFID, x) + spx5_field_get(ANA_AC_TSN_SF_STATUS_TSN_SFID, x) #define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD BIT(0) #define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_SET(x)\ @@ -271,15 +310,17 @@ enum sparx5_target { #define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_GET(x)\ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x) -/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */ -#define ANA_AC_SG_ACCESS_CTRL __REG(TARGET_ANA_AC,\ - 0, 1, 839140, 0, 1, 12, 0, 0, 1, 4) +/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */ +#define ANA_AC_SG_ACCESS_CTRL \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_ACCESS], 0, 1, 12, \ + 0, 0, 1, 4) -#define ANA_AC_SG_ACCESS_CTRL_SGID GENMASK(9, 0) +#define ANA_AC_SG_ACCESS_CTRL_SGID\ + GENMASK(regs->fsize[FW_ANA_AC_SG_ACCESS_CTRL_SGID] + 0 - 1, 0) #define ANA_AC_SG_ACCESS_CTRL_SGID_SET(x)\ - FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_SGID, x) + spx5_field_prep(ANA_AC_SG_ACCESS_CTRL_SGID, x) #define ANA_AC_SG_ACCESS_CTRL_SGID_GET(x)\ - FIELD_GET(ANA_AC_SG_ACCESS_CTRL_SGID, x) + spx5_field_get(ANA_AC_SG_ACCESS_CTRL_SGID, x) #define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28) #define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(x)\ @@ -287,9 +328,10 @@ enum sparx5_target { #define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(x)\ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x) -/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */ -#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD __REG(TARGET_ANA_AC,\ - 0, 1, 839140, 0, 1, 12, 8, 0, 1, 4) +/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */ +#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_ACCESS], 0, 1, 12, \ + 8, 0, 1, 4) #define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS GENMASK(15, 0) #define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_SET(x)\ @@ -303,17 +345,20 @@ enum sparx5_target { #define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_GET(x)\ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x) -/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */ -#define ANA_AC_SG_CONFIG_REG_1 __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 48, 0, 1, 4) +/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */ +#define ANA_AC_SG_CONFIG_REG_1 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 48, 0, 1, 4) -/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */ -#define ANA_AC_SG_CONFIG_REG_2 __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 52, 0, 1, 4) +/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */ +#define ANA_AC_SG_CONFIG_REG_2 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 52, 0, 1, 4) -/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */ -#define ANA_AC_SG_CONFIG_REG_3 __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 56, 0, 1, 4) +/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */ +#define ANA_AC_SG_CONFIG_REG_3 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 56, 0, 1, 4) #define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB GENMASK(15, 0) #define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(x)\ @@ -369,17 +414,20 @@ enum sparx5_target { #define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_GET(x)\ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x) -/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */ -#define ANA_AC_SG_CONFIG_REG_4 __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 60, 0, 1, 4) +/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */ +#define ANA_AC_SG_CONFIG_REG_4 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 60, 0, 1, 4) -/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */ -#define ANA_AC_SG_CONFIG_REG_5 __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 64, 0, 1, 4) +/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */ +#define ANA_AC_SG_CONFIG_REG_5 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 64, 0, 1, 4) -/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */ -#define ANA_AC_SG_GCL_GS_CONFIG(r) __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 0, r, 4, 4) +/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */ +#define ANA_AC_SG_GCL_GS_CONFIG(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 0, r, 4, 4) #define ANA_AC_SG_GCL_GS_CONFIG_IPS GENMASK(3, 0) #define ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(x)\ @@ -393,25 +441,30 @@ enum sparx5_target { #define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_GET(x)\ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x) -/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */ -#define ANA_AC_SG_GCL_TI_CONFIG(r) __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 16, r, 4, 4) +/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */ +#define ANA_AC_SG_GCL_TI_CONFIG(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 16, r, 4, 4) -/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */ -#define ANA_AC_SG_GCL_OCT_CONFIG(r) __REG(TARGET_ANA_AC,\ - 0, 1, 851584, 0, 1, 128, 32, r, 4, 4) +/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */ +#define ANA_AC_SG_GCL_OCT_CONFIG(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\ + 32, r, 4, 4) -/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */ -#define ANA_AC_SG_STATUS_REG_1 __REG(TARGET_ANA_AC,\ - 0, 1, 839088, 0, 1, 16, 0, 0, 1, 4) +/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */ +#define ANA_AC_SG_STATUS_REG_1 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \ + 0, 0, 1, 4) -/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */ -#define ANA_AC_SG_STATUS_REG_2 __REG(TARGET_ANA_AC,\ - 0, 1, 839088, 0, 1, 16, 4, 0, 1, 4) +/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */ +#define ANA_AC_SG_STATUS_REG_2 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \ + 4, 0, 1, 4) -/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */ -#define ANA_AC_SG_STATUS_REG_3 __REG(TARGET_ANA_AC,\ - 0, 1, 839088, 0, 1, 16, 8, 0, 1, 4) +/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */ +#define ANA_AC_SG_STATUS_REG_3 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \ + 8, 0, 1, 4) #define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB GENMASK(15, 0) #define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_SET(x)\ @@ -443,23 +496,27 @@ enum sparx5_target { #define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_GET(x)\ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x) -/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */ -#define ANA_AC_SG_STATUS_REG_4 __REG(TARGET_ANA_AC,\ - 0, 1, 839088, 0, 1, 16, 12, 0, 1, 4) +/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */ +#define ANA_AC_SG_STATUS_REG_4 \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \ + 12, 0, 1, 4) -/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */ -#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC,\ - 0, 1, 851552, 0, 1, 20, 0, r, 4, 4) +/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */ +#define ANA_AC_PORT_SGE_CFG(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_PORT],\ + 0, 1, 20, 0, r, 4, 4) -#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0) +#define ANA_AC_PORT_SGE_CFG_MASK\ + GENMASK(regs->fsize[FW_ANA_AC_PORT_SGE_CFG_MASK] + 0 - 1, 0) #define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\ - FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x) + spx5_field_prep(ANA_AC_PORT_SGE_CFG_MASK, x) #define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\ - FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x) + spx5_field_get(ANA_AC_PORT_SGE_CFG_MASK, x) -/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */ -#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC,\ - 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4) +/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */ +#define ANA_AC_STAT_RESET \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_PORT],\ + 0, 1, 20, 16, 0, 1, 4) #define ANA_AC_STAT_RESET_RESET BIT(0) #define ANA_AC_STAT_RESET_RESET_SET(x)\ @@ -467,9 +524,10 @@ enum sparx5_target { #define ANA_AC_STAT_RESET_RESET_GET(x)\ FIELD_GET(ANA_AC_STAT_RESET_RESET, x) -/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */ -#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC,\ - 0, 1, 843776, g, 70, 64, 4, r, 4, 4) +/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */ +#define ANA_AC_PORT_STAT_CFG(g, r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_CNT_CFG_PORT], g,\ + regs->gcnt[GC_ANA_AC_STAT_CNT_CFG_PORT], 64, 4, r, 4, 4) #define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4) #define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\ @@ -489,13 +547,15 @@ enum sparx5_target { #define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\ FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x) -/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */ -#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC,\ - 0, 1, 843776, g, 70, 64, 20, r, 4, 4) +/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */ +#define ANA_AC_PORT_STAT_LSB_CNT(g, r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_CNT_CFG_PORT], g,\ + regs->gcnt[GC_ANA_AC_STAT_CNT_CFG_PORT], 64, 20, r, 4, 4) -/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */ -#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) __REG(TARGET_ANA_AC,\ - 0, 1, 893792, 0, 1, 24, 0, r, 2, 4) +/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */ +#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \ + 0, 1, 24, 0, r, 2, 4) #define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE GENMASK(2, 0) #define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_SET(x)\ @@ -503,9 +563,10 @@ enum sparx5_target { #define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_GET(x)\ FIELD_GET(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x) -/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */ -#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) __REG(TARGET_ANA_AC,\ - 0, 1, 893792, 0, 1, 24, 8, r, 2, 4) +/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */ +#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \ + 0, 1, 24, 8, r, 2, 4) #define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE BIT(0) #define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_SET(x)\ @@ -513,9 +574,10 @@ enum sparx5_target { #define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_GET(x)\ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x) -/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */ -#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) __REG(TARGET_ANA_AC,\ - 0, 1, 893792, 0, 1, 24, 16, r, 2, 4) +/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */ +#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) \ + __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \ + 0, 1, 24, 16, r, 2, 4) #define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK GENMASK(3, 0) #define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_SET(x)\ @@ -523,9 +585,10 @@ enum sparx5_target { #define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_GET(x)\ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x) -/* ANA_ACL:COMMON:VCAP_S2_CFG */ -#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL,\ - 0, 1, 32768, 0, 1, 592, 0, r, 70, 4) +/* ANA_ACL:COMMON:VCAP_S2_CFG */ +#define ANA_ACL_VCAP_S2_CFG(r) \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \ + 0, r, regs->rcnt[RC_ANA_ACL_VCAP_S2_CFG], 4) #define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28) #define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\ @@ -611,9 +674,10 @@ enum sparx5_target { #define ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(x)\ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x) -/* ANA_ACL:COMMON:SWAP_IP_CTRL */ -#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL,\ - 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4) +/* ANA_ACL:COMMON:SWAP_IP_CTRL */ +#define ANA_ACL_SWAP_IP_CTRL \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \ + 412, 0, 1, 4) #define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18) #define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\ @@ -645,9 +709,10 @@ enum sparx5_target { #define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_GET(x)\ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x) -/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */ -#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL,\ - 0, 1, 32768, 0, 1, 592, 424, r, 4, 4) +/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */ +#define ANA_ACL_VCAP_S2_RLEG_STAT(r) \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \ + 424, r, 4, 4) #define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6) #define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\ @@ -661,9 +726,10 @@ enum sparx5_target { #define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_GET(x)\ FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x) -/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */ -#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL,\ - 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4) +/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */ +#define ANA_ACL_VCAP_S2_FRAGMENT_CFG \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \ + 440, 0, 1, 4) #define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5) #define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\ @@ -683,9 +749,10 @@ enum sparx5_target { #define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_GET(x)\ FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x) -/* ANA_ACL:COMMON:OWN_UPSID */ -#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL,\ - 0, 1, 32768, 0, 1, 592, 580, r, 3, 4) +/* ANA_ACL:COMMON:OWN_UPSID */ +#define ANA_ACL_OWN_UPSID(r) \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \ + 580, r, regs->rcnt[RC_ANA_ACL_OWN_UPSID], 4) #define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0) #define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\ @@ -693,9 +760,10 @@ enum sparx5_target { #define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\ FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x) -/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */ -#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL,\ - 0, 1, 34200, g, 134, 16, 0, r, 4, 4) +/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */ +#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_KEY_SEL], g, \ + regs->gcnt[GC_ANA_ACL_KEY_SEL], 16, 0, r, 4, 4) #define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13) #define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\ @@ -745,17 +813,20 @@ enum sparx5_target { #define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(x)\ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x) -/* ANA_ACL:CNT_A:CNT_A */ -#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL,\ - 0, 1, 0, g, 4096, 4, 0, 0, 1, 4) +/* ANA_ACL:CNT_A:CNT_A */ +#define ANA_ACL_CNT_A(g) \ + __REG(TARGET_ANA_ACL, 0, 1, 0, g, regs->gcnt[GC_ANA_ACL_CNT_A], 4, 0, \ + 0, 1, 4) -/* ANA_ACL:CNT_B:CNT_B */ -#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL,\ - 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4) +/* ANA_ACL:CNT_B:CNT_B */ +#define ANA_ACL_CNT_B(g) \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_CNT_B], g, \ + regs->gcnt[GC_ANA_ACL_CNT_B], 4, 0, 0, 1, 4) -/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */ -#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL,\ - 0, 1, 36408, 0, 1, 16, 0, r, 4, 4) +/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */ +#define ANA_ACL_SEC_LOOKUP_STICKY(r) \ + __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_STICKY], 0, 1, 16, \ + 0, r, 4, 4) #define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17) #define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\ @@ -865,9 +936,10 @@ enum sparx5_target { #define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x) -/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */ -#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL,\ - 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4) +/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */ +#define ANA_AC_POL_POL_UPD_INT_CFG \ + __REG(TARGET_ANA_AC_POL, 0, 1, regs->gaddr[GA_ANA_AC_POL_POL_ALL_CFG], \ + 0, 1, 1160, 1148, 0, 1, 4) #define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0) #define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\ @@ -875,9 +947,10 @@ enum sparx5_target { #define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\ FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x) -/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */ -#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\ - 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4) +/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */ +#define ANA_AC_POL_BDLB_DLB_CTRL \ + __REG(TARGET_ANA_AC_POL, 0, 1, regs->gaddr[GA_ANA_AC_POL_COMMON_BDLB], \ + 0, 1, 8, 0, 0, 1, 4) #define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19) #define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\ @@ -903,9 +976,10 @@ enum sparx5_target { #define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x) -/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */ -#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\ - 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4) +/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */ +#define ANA_AC_POL_SLB_DLB_CTRL \ + __REG(TARGET_ANA_AC_POL, 0, 1, \ + regs->gaddr[GA_ANA_AC_POL_COMMON_BUM_SLB], 0, 1, 20, 0, 0, 1, 4) #define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19) #define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\ @@ -931,19 +1005,22 @@ enum sparx5_target { #define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x) -/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */ -#define ANA_AC_SDLB_XLB_START(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 295468, g, 10, 24, 0, 0, 1, 4) +/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */ +#define ANA_AC_SDLB_XLB_START(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \ + g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 0, 0, 1, 4) -#define ANA_AC_SDLB_XLB_START_LBSET_START GENMASK(12, 0) +#define ANA_AC_SDLB_XLB_START_LBSET_START\ + GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_START_LBSET_START] + 0 - 1, 0) #define ANA_AC_SDLB_XLB_START_LBSET_START_SET(x)\ - FIELD_PREP(ANA_AC_SDLB_XLB_START_LBSET_START, x) + spx5_field_prep(ANA_AC_SDLB_XLB_START_LBSET_START, x) #define ANA_AC_SDLB_XLB_START_LBSET_START_GET(x)\ - FIELD_GET(ANA_AC_SDLB_XLB_START_LBSET_START, x) + spx5_field_get(ANA_AC_SDLB_XLB_START_LBSET_START, x) -/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */ -#define ANA_AC_SDLB_PUP_INTERVAL(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 295468, g, 10, 24, 4, 0, 1, 4) +/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */ +#define ANA_AC_SDLB_PUP_INTERVAL(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \ + g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 4, 0, 1, 4) #define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL GENMASK(19, 0) #define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(x)\ @@ -951,9 +1028,10 @@ enum sparx5_target { #define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_GET(x)\ FIELD_GET(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x) -/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */ -#define ANA_AC_SDLB_PUP_CTRL(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 295468, g, 10, 24, 8, 0, 1, 4) +/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */ +#define ANA_AC_SDLB_PUP_CTRL(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \ + g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 8, 0, 1, 4) #define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT GENMASK(18, 0) #define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_SET(x)\ @@ -967,19 +1045,22 @@ enum sparx5_target { #define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(x)\ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x) -/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */ -#define ANA_AC_SDLB_LBGRP_MISC(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 295468, g, 10, 24, 12, 0, 1, 4) +/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */ +#define ANA_AC_SDLB_LBGRP_MISC(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \ + g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 12, 0, 1, 4) -#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT GENMASK(12, 8) +#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT\ + GENMASK(regs->fsize[FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] + 8 - 1, 8) #define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(x)\ - FIELD_PREP(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x) + spx5_field_prep(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x) #define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_GET(x)\ - FIELD_GET(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x) + spx5_field_get(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x) -/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */ -#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 295468, g, 10, 24, 16, 0, 1, 4) +/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */ +#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \ + g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 16, 0, 1, 4) #define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS GENMASK(12, 0) #define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(x)\ @@ -987,9 +1068,10 @@ enum sparx5_target { #define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_GET(x)\ FIELD_GET(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x) -/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */ -#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 295468, g, 10, 24, 20, 0, 1, 4) +/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */ +#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \ + g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 20, 0, 1, 4) #define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING BIT(0) #define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_SET(x)\ @@ -1003,15 +1085,17 @@ enum sparx5_target { #define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_GET(x)\ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x) -#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT GENMASK(28, 16) +#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT\ + GENMASK(regs->fsize[FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] + 16 - 1, 16) #define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_SET(x)\ - FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x) + spx5_field_prep(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x) #define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_GET(x)\ - FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x) + spx5_field_get(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x) -/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */ -#define ANA_AC_SDLB_PUP_TOKENS(g, r) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 0, g, 4616, 64, 0, r, 2, 4) +/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */ +#define ANA_AC_SDLB_PUP_TOKENS(g, r) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \ + regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 0, r, 2, 4) #define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS GENMASK(12, 0) #define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(x)\ @@ -1019,9 +1103,10 @@ enum sparx5_target { #define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_GET(x)\ FIELD_GET(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x) -/* ANA_AC_SDLB:LBSET_TBL:THRES */ -#define ANA_AC_SDLB_THRES(g, r) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 0, g, 4616, 64, 8, r, 2, 4) +/* ANA_AC_SDLB:LBSET_TBL:THRES */ +#define ANA_AC_SDLB_THRES(g, r) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \ + regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 8, r, 2, 4) #define ANA_AC_SDLB_THRES_THRES GENMASK(9, 0) #define ANA_AC_SDLB_THRES_THRES_SET(x)\ @@ -1035,25 +1120,29 @@ enum sparx5_target { #define ANA_AC_SDLB_THRES_THRES_HYS_GET(x)\ FIELD_GET(ANA_AC_SDLB_THRES_THRES_HYS, x) -/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */ -#define ANA_AC_SDLB_XLB_NEXT(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 0, g, 4616, 64, 16, 0, 1, 4) +/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */ +#define ANA_AC_SDLB_XLB_NEXT(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \ + regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 16, 0, 1, 4) -#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT GENMASK(12, 0) +#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT\ + GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] + 0 - 1, 0) #define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(x)\ - FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x) + spx5_field_prep(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x) #define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(x)\ - FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x) + spx5_field_get(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x) -#define ANA_AC_SDLB_XLB_NEXT_LBGRP GENMASK(27, 24) +#define ANA_AC_SDLB_XLB_NEXT_LBGRP\ + GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] + 24 - 1, 24) #define ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(x)\ - FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBGRP, x) + spx5_field_prep(ANA_AC_SDLB_XLB_NEXT_LBGRP, x) #define ANA_AC_SDLB_XLB_NEXT_LBGRP_GET(x)\ - FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBGRP, x) + spx5_field_get(ANA_AC_SDLB_XLB_NEXT_LBGRP, x) -/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */ -#define ANA_AC_SDLB_INH_CTRL(g, r) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 0, g, 4616, 64, 20, r, 2, 4) +/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */ +#define ANA_AC_SDLB_INH_CTRL(g, r) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \ + regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 20, r, 2, 4) #define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX GENMASK(12, 0) #define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(x)\ @@ -1073,19 +1162,22 @@ enum sparx5_target { #define ANA_AC_SDLB_INH_CTRL_INH_LB_GET(x)\ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_LB, x) -/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */ -#define ANA_AC_SDLB_INH_LBSET_ADDR(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 0, g, 4616, 64, 28, 0, 1, 4) +/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */ +#define ANA_AC_SDLB_INH_LBSET_ADDR(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \ + regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 28, 0, 1, 4) -#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR GENMASK(12, 0) +#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR\ + GENMASK(regs->fsize[FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] + 0 - 1, 0) #define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_SET(x)\ - FIELD_PREP(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x) + spx5_field_prep(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x) #define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_GET(x)\ - FIELD_GET(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x) + spx5_field_get(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x) -/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */ -#define ANA_AC_SDLB_DLB_MISC(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 0, g, 4616, 64, 32, 0, 1, 4) +/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */ +#define ANA_AC_SDLB_DLB_MISC(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \ + regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 32, 0, 1, 4) #define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA BIT(0) #define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_SET(x)\ @@ -1105,9 +1197,10 @@ enum sparx5_target { #define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_GET(x)\ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x) -/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */ -#define ANA_AC_SDLB_DLB_CFG(g) __REG(TARGET_ANA_AC_SDLB,\ - 0, 1, 0, g, 4616, 64, 36, 0, 1, 4) +/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */ +#define ANA_AC_SDLB_DLB_CFG(g) \ + __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \ + regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 36, 0, 1, 4) #define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA BIT(11) #define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_SET(x)\ @@ -1157,9 +1250,10 @@ enum sparx5_target { #define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_GET(x)\ FIELD_GET(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x) -/* ANA_CL:PORT:FILTER_CTRL */ -#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 4, 0, 1, 4) +/* ANA_CL:PORT:FILTER_CTRL */ +#define ANA_CL_FILTER_CTRL(g) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 4, 0, 1, 4) #define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2) #define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\ @@ -1179,9 +1273,10 @@ enum sparx5_target { #define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\ FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x) -/* ANA_CL:PORT:VLAN_FILTER_CTRL */ -#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 8, r, 3, 4) +/* ANA_CL:PORT:VLAN_FILTER_CTRL */ +#define ANA_CL_VLAN_FILTER_CTRL(g, r) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 8, r, 3, 4) #define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10) #define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\ @@ -1249,9 +1344,10 @@ enum sparx5_target { #define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x) -/* ANA_CL:PORT:ETAG_FILTER_CTRL */ -#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 20, 0, 1, 4) +/* ANA_CL:PORT:ETAG_FILTER_CTRL */ +#define ANA_CL_ETAG_FILTER_CTRL(g) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 20, 0, 1, 4) #define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1) #define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\ @@ -1265,9 +1361,10 @@ enum sparx5_target { #define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\ FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x) -/* ANA_CL:PORT:VLAN_CTRL */ -#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 32, 0, 1, 4) +/* ANA_CL:PORT:VLAN_CTRL */ +#define ANA_CL_VLAN_CTRL(g) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 32, 0, 1, 4) #define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26) #define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\ @@ -1335,9 +1432,10 @@ enum sparx5_target { #define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x) -/* ANA_CL:PORT:VLAN_CTRL_2 */ -#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 36, 0, 1, 4) +/* ANA_CL:PORT:VLAN_CTRL_2 */ +#define ANA_CL_VLAN_CTRL_2(g) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 36, 0, 1, 4) #define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0) #define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\ @@ -1345,9 +1443,10 @@ enum sparx5_target { #define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\ FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x) -/* ANA_CL:PORT:PCP_DEI_MAP_CFG */ -#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 108, r, 16, 4) +/* ANA_CL:PORT:PCP_DEI_MAP_CFG */ +#define ANA_CL_PCP_DEI_MAP_CFG(g, r) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 108, r, 16, 4) #define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL GENMASK(4, 3) #define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(x)\ @@ -1361,9 +1460,10 @@ enum sparx5_target { #define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_GET(x)\ FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x) -/* ANA_CL:PORT:QOS_CFG */ -#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 172, 0, 1, 4) +/* ANA_CL:PORT:QOS_CFG */ +#define ANA_CL_QOS_CFG(g) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 172, 0, 1, 4) #define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA BIT(17) #define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_SET(x)\ @@ -1437,13 +1537,15 @@ enum sparx5_target { #define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_GET(x)\ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x) -/* ANA_CL:PORT:CAPTURE_BPDU_CFG */ -#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 196, 0, 1, 4) +/* ANA_CL:PORT:CAPTURE_BPDU_CFG */ +#define ANA_CL_CAPTURE_BPDU_CFG(g) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 196, 0, 1, 4) -/* ANA_CL:PORT:ADV_CL_CFG_2 */ -#define ANA_CL_ADV_CL_CFG_2(g, r) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 200, r, 6, 4) +/* ANA_CL:PORT:ADV_CL_CFG_2 */ +#define ANA_CL_ADV_CL_CFG_2(g, r) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 200, r, 6, 4) #define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA BIT(1) #define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_SET(x)\ @@ -1457,9 +1559,10 @@ enum sparx5_target { #define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_GET(x)\ FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x) -/* ANA_CL:PORT:ADV_CL_CFG */ -#define ANA_CL_ADV_CL_CFG(g, r) __REG(TARGET_ANA_CL,\ - 0, 1, 131072, g, 70, 512, 224, r, 6, 4) +/* ANA_CL:PORT:ADV_CL_CFG */ +#define ANA_CL_ADV_CL_CFG(g, r) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \ + regs->gcnt[GC_ANA_CL_PORT], 512, 224, r, 6, 4) #define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL GENMASK(30, 26) #define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(x)\ @@ -1503,9 +1606,10 @@ enum sparx5_target { #define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(x)\ FIELD_GET(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x) -/* ANA_CL:COMMON:OWN_UPSID */ -#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL,\ - 0, 1, 166912, 0, 1, 756, 0, r, 3, 4) +/* ANA_CL:COMMON:OWN_UPSID */ +#define ANA_CL_OWN_UPSID(r) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, 0,\ + r, regs->rcnt[RC_ANA_CL_OWN_UPSID], 4) #define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0) #define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\ @@ -1513,9 +1617,10 @@ enum sparx5_target { #define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\ FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x) -/* ANA_CL:COMMON:DSCP_CFG */ -#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL,\ - 0, 1, 166912, 0, 1, 756, 256, r, 64, 4) +/* ANA_CL:COMMON:DSCP_CFG */ +#define ANA_CL_DSCP_CFG(r) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, \ + 256, r, 64, 4) #define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL GENMASK(12, 7) #define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_SET(x)\ @@ -1547,9 +1652,10 @@ enum sparx5_target { #define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x) -/* ANA_CL:COMMON:QOS_MAP_CFG */ -#define ANA_CL_QOS_MAP_CFG(r) __REG(TARGET_ANA_CL,\ - 0, 1, 166912, 0, 1, 756, 512, r, 32, 4) +/* ANA_CL:COMMON:QOS_MAP_CFG */ +#define ANA_CL_QOS_MAP_CFG(r) \ + __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, \ + 512, r, 32, 4) #define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL GENMASK(9, 4) #define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(x)\ @@ -1557,9 +1663,10 @@ enum sparx5_target { #define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\ FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x) -/* ANA_L2:COMMON:FWD_CFG */ -#define ANA_L2_FWD_CFG __REG(TARGET_ANA_L2,\ - 0, 1, 566024, 0, 1, 700, 0, 0, 1, 4) +/* ANA_L2:COMMON:FWD_CFG */ +#define ANA_L2_FWD_CFG \ + __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \ + regs->gsize[GW_ANA_L2_COMMON], 0, 0, 1, 4) #define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL GENMASK(21, 20) #define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_SET(x)\ @@ -1633,17 +1740,22 @@ enum sparx5_target { #define ANA_L2_FWD_CFG_FWD_ENA_GET(x)\ FIELD_GET(ANA_L2_FWD_CFG_FWD_ENA, x) -/* ANA_L2:COMMON:AUTO_LRN_CFG */ -#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2,\ - 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4) +/* ANA_L2:COMMON:AUTO_LRN_CFG */ +#define ANA_L2_AUTO_LRN_CFG \ + __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \ + regs->gsize[GW_ANA_L2_COMMON], 24, 0, 1, 4) -/* ANA_L2:COMMON:AUTO_LRN_CFG1 */ -#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2,\ - 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_L2:COMMON:AUTO_LRN_CFG1 */ +#define ANA_L2_AUTO_LRN_CFG1 \ + __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \ + regs->gsize[GW_ANA_L2_COMMON], 28, 0, 1, 4) -/* ANA_L2:COMMON:AUTO_LRN_CFG2 */ -#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2,\ - 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_L2:COMMON:AUTO_LRN_CFG2 */ +#define ANA_L2_AUTO_LRN_CFG2 \ + __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \ + regs->gsize[GW_ANA_L2_COMMON], 32, 0, 1, 4) #define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0) #define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\ @@ -1651,9 +1763,11 @@ enum sparx5_target { #define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\ FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x) -/* ANA_L2:COMMON:OWN_UPSID */ -#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2,\ - 0, 1, 566024, 0, 1, 700, 672, r, 3, 4) +/* ANA_L2:COMMON:OWN_UPSID */ +#define ANA_L2_OWN_UPSID(r) \ + __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \ + regs->gsize[GW_ANA_L2_COMMON], 672, r, \ + regs->rcnt[RC_ANA_L2_OWN_UPSID], 4) #define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0) #define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\ @@ -1661,29 +1775,34 @@ enum sparx5_target { #define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\ FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x) -/* ANA_L2:ISDX:DLB_CFG */ -#define ANA_L2_DLB_CFG(g) __REG(TARGET_ANA_L2,\ - 0, 1, 0, g, 4096, 128, 56, 0, 1, 4) +/* ANA_L2:ISDX:DLB_CFG */ +#define ANA_L2_DLB_CFG(g) \ + __REG(TARGET_ANA_L2, 0, 1, 0, g, regs->gcnt[GC_ANA_L2_ISDX], 128, 56, \ + 0, 1, 4) -#define ANA_L2_DLB_CFG_DLB_IDX GENMASK(12, 0) +#define ANA_L2_DLB_CFG_DLB_IDX\ + GENMASK(regs->fsize[FW_ANA_L2_DLB_CFG_DLB_IDX] + 0 - 1, 0) #define ANA_L2_DLB_CFG_DLB_IDX_SET(x)\ - FIELD_PREP(ANA_L2_DLB_CFG_DLB_IDX, x) + spx5_field_prep(ANA_L2_DLB_CFG_DLB_IDX, x) #define ANA_L2_DLB_CFG_DLB_IDX_GET(x)\ - FIELD_GET(ANA_L2_DLB_CFG_DLB_IDX, x) + spx5_field_get(ANA_L2_DLB_CFG_DLB_IDX, x) -/* ANA_L2:ISDX:TSN_CFG */ -#define ANA_L2_TSN_CFG(g) __REG(TARGET_ANA_L2,\ - 0, 1, 0, g, 4096, 128, 100, 0, 1, 4) +/* ANA_L2:ISDX:TSN_CFG */ +#define ANA_L2_TSN_CFG(g) \ + __REG(TARGET_ANA_L2, 0, 1, 0, g, regs->gcnt[GC_ANA_L2_ISDX], 128, 100, \ + 0, 1, 4) -#define ANA_L2_TSN_CFG_TSN_SFID GENMASK(9, 0) +#define ANA_L2_TSN_CFG_TSN_SFID\ + GENMASK(regs->fsize[FW_ANA_L2_TSN_CFG_TSN_SFID] + 0 - 1, 0) #define ANA_L2_TSN_CFG_TSN_SFID_SET(x)\ - FIELD_PREP(ANA_L2_TSN_CFG_TSN_SFID, x) + spx5_field_prep(ANA_L2_TSN_CFG_TSN_SFID, x) #define ANA_L2_TSN_CFG_TSN_SFID_GET(x)\ - FIELD_GET(ANA_L2_TSN_CFG_TSN_SFID, x) + spx5_field_get(ANA_L2_TSN_CFG_TSN_SFID, x) -/* ANA_L3:COMMON:VLAN_CTRL */ -#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3,\ - 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4) +/* ANA_L3:COMMON:VLAN_CTRL */ +#define ANA_L3_VLAN_CTRL \ + __REG(TARGET_ANA_L3, 0, 1, regs->gaddr[GA_ANA_L3_COMMON], 0, 1, 184, 4,\ + 0, 1, 4) #define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0) #define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\ @@ -1691,9 +1810,10 @@ enum sparx5_target { #define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\ FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x) -/* ANA_L3:VLAN:VLAN_CFG */ -#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3,\ - 0, 1, 0, g, 5120, 64, 8, 0, 1, 4) +/* ANA_L3:VLAN:VLAN_CFG */ +#define ANA_L3_VLAN_CFG(g) \ + __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 8, 0, \ + 1, 4) #define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24) #define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\ @@ -1749,17 +1869,22 @@ enum sparx5_target { #define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x) -/* ANA_L3:VLAN:VLAN_MASK_CFG */ -#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3,\ - 0, 1, 0, g, 5120, 64, 16, 0, 1, 4) +/* ANA_L3:VLAN:VLAN_MASK_CFG */ +#define ANA_L3_VLAN_MASK_CFG(g) \ + __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 16, 0,\ + 1, 4) -/* ANA_L3:VLAN:VLAN_MASK_CFG1 */ -#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3,\ - 0, 1, 0, g, 5120, 64, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_L3:VLAN:VLAN_MASK_CFG1 */ +#define ANA_L3_VLAN_MASK_CFG1(g) \ + __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 20, 0,\ + 1, 4) -/* ANA_L3:VLAN:VLAN_MASK_CFG2 */ -#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3,\ - 0, 1, 0, g, 5120, 64, 24, 0, 1, 4) +/* SPARX5 ONLY */ +/* ANA_L3:VLAN:VLAN_MASK_CFG2 */ +#define ANA_L3_VLAN_MASK_CFG2(g) \ + __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 24, 0,\ + 1, 4) #define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0) #define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\ @@ -1767,365 +1892,455 @@ enum sparx5_target { #define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\ FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x) -/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */ -#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 0, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */ -#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 4, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */ -#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 8, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */ -#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 12, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */ -#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 16, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */ -#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 20, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_UC_CNT */ -#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 24, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_MC_CNT */ -#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 28, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_BC_CNT */ -#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 32, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */ -#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 36, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */ -#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 40, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */ -#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 44, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */ -#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 48, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */ -#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 52, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */ -#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 56, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */ -#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 60, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */ -#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 64, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */ -#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 68, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */ -#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 72, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */ -#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 76, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */ -#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 80, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */ -#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 84, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */ -#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 88, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */ -#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 92, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */ -#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 96, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */ -#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 100, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */ -#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 104, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_UC_CNT */ -#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 108, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_MC_CNT */ -#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 112, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_BC_CNT */ -#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 116, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */ -#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 120, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */ -#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 124, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */ -#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 128, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */ -#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 132, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */ -#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 136, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */ -#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 140, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */ -#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 144, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */ -#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 148, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */ -#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 152, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */ -#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 156, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */ -#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 160, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */ -#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 164, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */ -#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 168, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */ -#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 172, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */ -#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 176, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */ -#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 180, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */ -#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 184, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */ -#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 188, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */ -#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 192, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */ -#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 196, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */ -#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 200, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */ -#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 204, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */ -#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 208, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ -#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 212, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ -#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 216, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */ -#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 220, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */ -#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 224, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */ -#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 228, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */ -#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 232, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */ -#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 236, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */ -#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 240, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */ -#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 244, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */ -#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 248, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */ -#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 252, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */ -#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 256, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */ -#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 260, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */ -#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 264, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */ -#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 268, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */ -#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 272, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */ -#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 276, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */ -#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 280, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */ -#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 284, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */ -#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 288, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */ -#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 292, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */ -#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 296, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */ -#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 300, 0, 1, 4) - -/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */ -#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 304, 0, 1, 4) - -/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */ -#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 308, 0, 1, 4) - -/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */ -#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 312, 0, 1, 4) - -/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */ -#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 316, 0, 1, 4) - -/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */ -#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 320, 0, 1, 4) - -/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */ -#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 324, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */ -#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 328, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */ -#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 332, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */ -#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 336, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_DEFER_CNT */ -#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 340, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */ -#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 344, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */ -#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 348, 0, 1, 4) - -/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */ -#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 352, 0, 1, 4) - -/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */ -#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 356, 0, 1, 4) +/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */ +#define ASM_RX_IN_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 0, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */ +#define ASM_RX_SYMBOL_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 4, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */ +#define ASM_RX_PAUSE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 8, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */ +#define ASM_RX_UNSUP_OPCODE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 12, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */ +#define ASM_RX_OK_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 16, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */ +#define ASM_RX_BAD_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 20, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UC_CNT */ +#define ASM_RX_UC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 24, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_MC_CNT */ +#define ASM_RX_MC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 28, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_BC_CNT */ +#define ASM_RX_BC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 32, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */ +#define ASM_RX_CRC_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 36, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */ +#define ASM_RX_UNDERSIZE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 40, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */ +#define ASM_RX_FRAGMENTS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 44, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */ +#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 48, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 52, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */ +#define ASM_RX_OVERSIZE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 56, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */ +#define ASM_RX_JABBERS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 60, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */ +#define ASM_RX_SIZE64_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 64, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */ +#define ASM_RX_SIZE65TO127_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 68, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */ +#define ASM_RX_SIZE128TO255_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 72, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */ +#define ASM_RX_SIZE256TO511_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 76, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */ +#define ASM_RX_SIZE512TO1023_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 80, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */ +#define ASM_RX_SIZE1024TO1518_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 84, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */ +#define ASM_RX_SIZE1519TOMAX_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 88, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */ +#define ASM_RX_IPG_SHRINK_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 92, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */ +#define ASM_TX_OUT_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 96, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */ +#define ASM_TX_PAUSE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 100, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */ +#define ASM_TX_OK_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 104, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_UC_CNT */ +#define ASM_TX_UC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 108, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_MC_CNT */ +#define ASM_TX_MC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 112, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_BC_CNT */ +#define ASM_TX_BC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 116, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */ +#define ASM_TX_SIZE64_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 120, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */ +#define ASM_TX_SIZE65TO127_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 124, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */ +#define ASM_TX_SIZE128TO255_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 128, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */ +#define ASM_TX_SIZE256TO511_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 132, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */ +#define ASM_TX_SIZE512TO1023_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 136, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */ +#define ASM_TX_SIZE1024TO1518_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 140, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */ +#define ASM_TX_SIZE1519TOMAX_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 144, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */ +#define ASM_RX_ALIGNMENT_LOST_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 148, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */ +#define ASM_RX_TAGGED_FRMS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 152, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */ +#define ASM_RX_UNTAGGED_FRMS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 156, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */ +#define ASM_TX_TAGGED_FRMS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 160, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */ +#define ASM_TX_UNTAGGED_FRMS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 164, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */ +#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 168, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */ +#define ASM_PMAC_RX_PAUSE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 172, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */ +#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 176, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */ +#define ASM_PMAC_RX_OK_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 180, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */ +#define ASM_PMAC_RX_BAD_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 184, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */ +#define ASM_PMAC_RX_UC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 188, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */ +#define ASM_PMAC_RX_MC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 192, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */ +#define ASM_PMAC_RX_BC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 196, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */ +#define ASM_PMAC_RX_CRC_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 200, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */ +#define ASM_PMAC_RX_UNDERSIZE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 204, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */ +#define ASM_PMAC_RX_FRAGMENTS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 208, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ +#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 212, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 216, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */ +#define ASM_PMAC_RX_OVERSIZE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 220, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */ +#define ASM_PMAC_RX_JABBERS_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 224, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */ +#define ASM_PMAC_RX_SIZE64_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 228, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */ +#define ASM_PMAC_RX_SIZE65TO127_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 232, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */ +#define ASM_PMAC_RX_SIZE128TO255_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 236, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */ +#define ASM_PMAC_RX_SIZE256TO511_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 240, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */ +#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 244, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */ +#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 248, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */ +#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 252, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */ +#define ASM_PMAC_TX_PAUSE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 256, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */ +#define ASM_PMAC_TX_OK_BYTES_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 260, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */ +#define ASM_PMAC_TX_UC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 264, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */ +#define ASM_PMAC_TX_MC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 268, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */ +#define ASM_PMAC_TX_BC_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 272, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */ +#define ASM_PMAC_TX_SIZE64_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 276, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */ +#define ASM_PMAC_TX_SIZE65TO127_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 280, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */ +#define ASM_PMAC_TX_SIZE128TO255_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 284, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */ +#define ASM_PMAC_TX_SIZE256TO511_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 288, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */ +#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 292, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */ +#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 296, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */ +#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 300, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */ +#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 304, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */ +#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 308, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */ +#define ASM_MM_RX_SMD_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 312, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */ +#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 316, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */ +#define ASM_MM_RX_MERGE_FRAG_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 320, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */ +#define ASM_MM_TX_PFRAGMENT_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 324, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */ +#define ASM_TX_MULTI_COLL_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 328, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */ +#define ASM_TX_LATE_COLL_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 332, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */ +#define ASM_TX_XCOLL_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 336, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_DEFER_CNT */ +#define ASM_TX_DEFER_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 340, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */ +#define ASM_TX_XDEFER_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 344, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */ +#define ASM_TX_BACKOFF1_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 348, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */ +#define ASM_TX_CSENSE_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 352, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */ +#define ASM_RX_IN_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 356, 0, 1, 4) #define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\ @@ -2133,9 +2348,10 @@ enum sparx5_target { #define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */ -#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 360, 0, 1, 4) +/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */ +#define ASM_RX_OK_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 360, 0, 1, 4) #define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\ @@ -2143,9 +2359,10 @@ enum sparx5_target { #define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */ -#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 364, 0, 1, 4) +/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */ +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 364, 0, 1, 4) #define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\ @@ -2153,9 +2370,10 @@ enum sparx5_target { #define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */ -#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 368, 0, 1, 4) +/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */ +#define ASM_RX_BAD_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 368, 0, 1, 4) #define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\ @@ -2163,9 +2381,10 @@ enum sparx5_target { #define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */ -#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 372, 0, 1, 4) +/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */ +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 372, 0, 1, 4) #define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\ @@ -2173,9 +2392,10 @@ enum sparx5_target { #define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */ -#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 376, 0, 1, 4) +/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */ +#define ASM_TX_OUT_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 376, 0, 1, 4) #define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\ @@ -2183,9 +2403,10 @@ enum sparx5_target { #define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */ -#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 380, 0, 1, 4) +/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */ +#define ASM_TX_OK_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 380, 0, 1, 4) #define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\ @@ -2193,9 +2414,10 @@ enum sparx5_target { #define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */ -#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 384, 0, 1, 4) +/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */ +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 384, 0, 1, 4) #define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0) #define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\ @@ -2203,13 +2425,15 @@ enum sparx5_target { #define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) -/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */ -#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM,\ - 0, 1, 0, g, 65, 512, 388, 0, 1, 4) +/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */ +#define ASM_RX_SYNC_LOST_ERR_CNT(g) \ + __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \ + 388, 0, 1, 4) -/* ASM:CFG:STAT_CFG */ -#define ASM_STAT_CFG __REG(TARGET_ASM,\ - 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4) +/* ASM:CFG:STAT_CFG */ +#define ASM_STAT_CFG \ + __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_CFG], 0, 1, \ + regs->gsize[GW_ASM_CFG], 0, 0, 1, 4) #define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0) #define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\ @@ -2217,9 +2441,10 @@ enum sparx5_target { #define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\ FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x) -/* ASM:CFG:PORT_CFG */ -#define ASM_PORT_CFG(r) __REG(TARGET_ASM,\ - 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4) +/* ASM:CFG:PORT_CFG */ +#define ASM_PORT_CFG(r) \ + __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_CFG], 0, 1, \ + regs->gsize[GW_ASM_CFG], 540, r, regs->rcnt[RC_ASM_PORT_CFG], 4) #define ASM_PORT_CFG_CSC_STAT_DIS BIT(12) #define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\ @@ -2287,9 +2512,10 @@ enum sparx5_target { #define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\ FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x) -/* ASM:RAM_CTRL:RAM_INIT */ -#define ASM_RAM_INIT __REG(TARGET_ASM,\ - 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4) +/* ASM:RAM_CTRL:RAM_INIT */ +#define ASM_RAM_INIT \ + __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_RAM_CTRL], 0, 1, 4, 0, 0, 1,\ + 4) #define ASM_RAM_INIT_RAM_INIT BIT(1) #define ASM_RAM_INIT_RAM_INIT_SET(x)\ @@ -2303,9 +2529,10 @@ enum sparx5_target { #define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x) -/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */ -#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN,\ - 0, 1, 12, 0, 1, 36, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */ +#define CLKGEN_LCPLL1_CORE_CLK_CFG \ + __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4) #define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0) #define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\ @@ -2343,91 +2570,106 @@ enum sparx5_target { #define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x) -/* CPU:CPU_REGS:PROC_CTRL */ -#define CPU_PROC_CTRL __REG(TARGET_CPU,\ - 0, 1, 0, 0, 1, 204, 176, 0, 1, 4) +/* CPU:CPU_REGS:PROC_CTRL */ +#define CPU_PROC_CTRL \ + __REG(TARGET_CPU, 0, 1, 0, 0, 1, regs->gsize[GW_CPU_CPU_REGS], \ + regs->raddr[RA_CPU_PROC_CTRL], 0, 1, 4) -#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12) +#define CPU_PROC_CTRL_AARCH64_MODE_ENA\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_AARCH64_MODE_ENA]) #define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) + spx5_field_prep(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) #define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) + spx5_field_get(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) -#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS BIT(11) +#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS]) #define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) + spx5_field_prep(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) #define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) + spx5_field_get(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) -#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS BIT(10) +#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS]) #define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) + spx5_field_prep(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) #define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) + spx5_field_get(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) -#define CPU_PROC_CTRL_BE_EXCEP_MODE BIT(9) +#define CPU_PROC_CTRL_BE_EXCEP_MODE\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_BE_EXCEP_MODE]) #define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x) + spx5_field_prep(CPU_PROC_CTRL_BE_EXCEP_MODE, x) #define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x) + spx5_field_get(CPU_PROC_CTRL_BE_EXCEP_MODE, x) -#define CPU_PROC_CTRL_VINITHI BIT(8) +#define CPU_PROC_CTRL_VINITHI\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_VINITHI]) #define CPU_PROC_CTRL_VINITHI_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_VINITHI, x) + spx5_field_prep(CPU_PROC_CTRL_VINITHI, x) #define CPU_PROC_CTRL_VINITHI_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_VINITHI, x) + spx5_field_get(CPU_PROC_CTRL_VINITHI, x) -#define CPU_PROC_CTRL_CFGTE BIT(7) +#define CPU_PROC_CTRL_CFGTE\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_CFGTE]) #define CPU_PROC_CTRL_CFGTE_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_CFGTE, x) + spx5_field_prep(CPU_PROC_CTRL_CFGTE, x) #define CPU_PROC_CTRL_CFGTE_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_CFGTE, x) + spx5_field_get(CPU_PROC_CTRL_CFGTE, x) -#define CPU_PROC_CTRL_CP15S_DISABLE BIT(6) +#define CPU_PROC_CTRL_CP15S_DISABLE\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_CP15S_DISABLE]) #define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x) + spx5_field_prep(CPU_PROC_CTRL_CP15S_DISABLE, x) #define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x) + spx5_field_get(CPU_PROC_CTRL_CP15S_DISABLE, x) -#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE BIT(5) +#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE]) #define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) + spx5_field_prep(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) #define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) + spx5_field_get(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) +/* SPARX5 ONLY */ #define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA BIT(4) #define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\ FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x) #define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\ FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x) +/* SPARX5 ONLY */ #define CPU_PROC_CTRL_ACP_AWCACHE BIT(3) #define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\ FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x) #define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\ FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x) +/* SPARX5 ONLY */ #define CPU_PROC_CTRL_ACP_ARCACHE BIT(2) #define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\ FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x) #define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\ FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x) -#define CPU_PROC_CTRL_L2_FLUSH_REQ BIT(1) +#define CPU_PROC_CTRL_L2_FLUSH_REQ\ + BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_FLUSH_REQ]) #define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\ - FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x) + spx5_field_prep(CPU_PROC_CTRL_L2_FLUSH_REQ, x) #define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\ - FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x) + spx5_field_get(CPU_PROC_CTRL_L2_FLUSH_REQ, x) +/* SPARX5 ONLY */ #define CPU_PROC_CTRL_ACP_DISABLE BIT(0) #define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\ FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x) #define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\ FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x) -/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ -#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G,\ - t, 12, 0, 0, 1, 60, 0, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV10G_MAC_ENA_CFG(t) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 0, 0, 1, \ + 4) #define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4) #define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\ @@ -2441,9 +2683,10 @@ enum sparx5_target { #define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\ FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x) -/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ -#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G,\ - t, 12, 0, 0, 1, 60, 8, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV10G_MAC_MAXLEN_CFG(t) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 8, 0, 1, \ + 4) #define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) #define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ @@ -2457,9 +2700,10 @@ enum sparx5_target { #define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x) -/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */ -#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G,\ - t, 12, 0, 0, 1, 60, 12, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */ +#define DEV10G_MAC_NUM_TAGS_CFG(t) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 12, 0, 1, \ + 4) #define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0) #define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\ @@ -2467,9 +2711,10 @@ enum sparx5_target { #define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\ FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x) -/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */ -#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G,\ - t, 12, 0, 0, 1, 60, 16, r, 3, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV10G_MAC_TAGS_CFG(t, r) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 16, r, 3, \ + 4) #define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) #define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\ @@ -2483,9 +2728,10 @@ enum sparx5_target { #define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\ FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x) -/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ -#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G,\ - t, 12, 0, 0, 1, 60, 28, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV10G_MAC_ADV_CHK_CFG(t) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 28, 0, 1, \ + 4) #define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) #define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ @@ -2529,9 +2775,10 @@ enum sparx5_target { #define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) -/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */ -#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G,\ - t, 12, 0, 0, 1, 60, 48, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */ +#define DEV10G_MAC_TX_MONITOR_STICKY(t) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 48, 0, 1, \ + 4) #define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4) #define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\ @@ -2563,9 +2810,10 @@ enum sparx5_target { #define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x) -/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ -#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G,\ - t, 12, 436, 0, 1, 52, 0, 0, 1, 4) +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV10G_DEV_RST_CTRL(t) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 436, 0, 1, 52, 0, 0, 1,\ + 4) #define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) #define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ @@ -2621,9 +2869,10 @@ enum sparx5_target { #define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x) -/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ -#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G,\ - t, 12, 488, 0, 1, 32, 0, 0, 1, 4) +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ +#define DEV10G_PCS25G_CFG(t) \ + __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 488, 0, 1, 32, 0, 0, 1,\ + 4) #define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0) #define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\ @@ -2631,9 +2880,10 @@ enum sparx5_target { #define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\ FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x) -/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ -#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G,\ - t, 8, 0, 0, 1, 60, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV25G_MAC_ENA_CFG(t) \ + __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4) #define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4) #define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\ @@ -2647,9 +2897,10 @@ enum sparx5_target { #define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\ FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x) -/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ -#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G,\ - t, 8, 0, 0, 1, 60, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV25G_MAC_MAXLEN_CFG(t) \ + __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4) #define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) #define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ @@ -2663,9 +2914,10 @@ enum sparx5_target { #define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x) -/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ -#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G,\ - t, 8, 0, 0, 1, 60, 28, 0, 1, 4) +/* SPARX5 ONLY */ +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV25G_MAC_ADV_CHK_CFG(t) \ + __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4) #define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) #define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ @@ -2709,9 +2961,10 @@ enum sparx5_target { #define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) -/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ -#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G,\ - t, 8, 436, 0, 1, 52, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV25G_DEV_RST_CTRL(t) \ + __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4) #define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) #define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ @@ -2767,9 +3020,10 @@ enum sparx5_target { #define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x) -/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ -#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G,\ - t, 8, 488, 0, 1, 32, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ +#define DEV25G_PCS25G_CFG(t) \ + __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4) #define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0) #define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\ @@ -2777,9 +3031,10 @@ enum sparx5_target { #define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\ FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x) -/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */ -#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G,\ - t, 8, 488, 0, 1, 32, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */ +#define DEV25G_PCS25G_SD_CFG(t) \ + __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4) #define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8) #define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\ @@ -2799,9 +3054,10 @@ enum sparx5_target { #define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\ FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x) -/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */ -#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5,\ - t, 65, 0, 0, 1, 36, 0, 0, 1, 4) +/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV2G5_DEV_RST_CTRL(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 0, 0, 1, 36, 0, 0, 1, \ + 4) #define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23) #define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ @@ -2851,9 +3107,10 @@ enum sparx5_target { #define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x) -/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */ -#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 0, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV2G5_MAC_ENA_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 0, 0, 1, \ + 4) #define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4) #define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\ @@ -2867,9 +3124,10 @@ enum sparx5_target { #define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\ FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x) -/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */ -#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 4, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */ +#define DEV2G5_MAC_MODE_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 4, 0, 1, \ + 4) #define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) #define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\ @@ -2889,9 +3147,10 @@ enum sparx5_target { #define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\ FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x) -/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ -#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 8, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV2G5_MAC_MAXLEN_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 8, 0, 1, \ + 4) #define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) #define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ @@ -2899,9 +3158,10 @@ enum sparx5_target { #define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x) -/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */ -#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 12, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV2G5_MAC_TAGS_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 12, 0, 1,\ + 4) #define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) #define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\ @@ -2927,9 +3187,10 @@ enum sparx5_target { #define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x) -/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */ -#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 16, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */ +#define DEV2G5_MAC_TAGS_CFG2(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 16, 0, 1,\ + 4) #define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16) #define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\ @@ -2943,9 +3204,10 @@ enum sparx5_target { #define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\ FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x) -/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ -#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 20, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV2G5_MAC_ADV_CHK_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 20, 0, 1,\ + 4) #define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) #define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\ @@ -2953,9 +3215,10 @@ enum sparx5_target { #define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\ FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x) -/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */ -#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 24, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEV2G5_MAC_IFG_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 24, 0, 1,\ + 4) #define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) #define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\ @@ -2981,9 +3244,10 @@ enum sparx5_target { #define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\ FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x) -/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */ -#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 52, 0, 1, 36, 28, 0, 1, 4) +/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */ +#define DEV2G5_MAC_HDX_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 28, 0, 1,\ + 4) #define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) #define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\ @@ -3015,9 +3279,10 @@ enum sparx5_target { #define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\ FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */ -#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 0, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */ +#define DEV2G5_PCS1G_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 0, 0, 1, \ + 4) #define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4) #define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\ @@ -3037,9 +3302,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\ FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */ -#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 4, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */ +#define DEV2G5_PCS1G_MODE_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 4, 0, 1, \ + 4) #define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4) #define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\ @@ -3059,9 +3325,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\ FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ -#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 8, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ +#define DEV2G5_PCS1G_SD_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 8, 0, 1, \ + 4) #define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8) #define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\ @@ -3081,9 +3348,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\ FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */ -#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 12, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */ +#define DEV2G5_PCS1G_ANEG_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 12, 0, 1,\ + 4) #define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16) #define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\ @@ -3109,9 +3377,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */ -#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 20, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */ +#define DEV2G5_PCS1G_LB_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 20, 0, 1,\ + 4) #define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4) #define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\ @@ -3131,9 +3400,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\ FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */ -#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 32, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */ +#define DEV2G5_PCS1G_ANEG_STATUS(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 32, 0, 1,\ + 4) #define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16) #define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\ @@ -3159,9 +3429,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */ -#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 40, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */ +#define DEV2G5_PCS1G_LINK_STATUS(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 40, 0, 1,\ + 4) #define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12) #define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\ @@ -3187,9 +3458,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x) -/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */ -#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5,\ - t, 65, 88, 0, 1, 68, 48, 0, 1, 4) +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */ +#define DEV2G5_PCS1G_STICKY(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 48, 0, 1,\ + 4) #define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) #define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\ @@ -3203,9 +3475,10 @@ enum sparx5_target { #define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\ FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x) -/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */ -#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5,\ - t, 65, 164, 0, 1, 4, 0, 0, 1, 4) +/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */ +#define DEV2G5_PCS_FX100_CFG(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 164, 0, 1, 4, 0, 0, 1, \ + 4) #define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26) #define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\ @@ -3285,9 +3558,10 @@ enum sparx5_target { #define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\ FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x) -/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */ -#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5,\ - t, 65, 168, 0, 1, 4, 0, 0, 1, 4) +/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */ +#define DEV2G5_PCS_FX100_STATUS(t) \ + __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 168, 0, 1, 4, 0, 0, 1, \ + 4) #define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8) #define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\ @@ -3337,9 +3611,9 @@ enum sparx5_target { #define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x) -/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ -#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G,\ - t, 13, 0, 0, 1, 60, 0, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV5G_MAC_ENA_CFG(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 0, 0, 1, 4) #define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4) #define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\ @@ -3353,9 +3627,9 @@ enum sparx5_target { #define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\ FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x) -/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ -#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G,\ - t, 13, 0, 0, 1, 60, 8, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV5G_MAC_MAXLEN_CFG(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 8, 0, 1, 4) #define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) #define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ @@ -3369,9 +3643,10 @@ enum sparx5_target { #define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x) -/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ -#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G,\ - t, 13, 0, 0, 1, 60, 28, 0, 1, 4) +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV5G_MAC_ADV_CHK_CFG(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 28, 0, 1, \ + 4) #define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) #define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ @@ -3415,325 +3690,405 @@ enum sparx5_target { #define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) -/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */ -#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 0, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */ -#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 4, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */ -#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 8, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */ -#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 12, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */ -#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 16, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */ -#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 20, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */ -#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 24, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */ -#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 28, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */ -#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 32, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */ -#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 36, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */ -#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 40, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */ -#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 44, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */ -#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 48, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */ -#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 52, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */ -#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 56, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */ -#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 60, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */ -#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 64, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */ -#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 68, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */ -#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 72, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */ -#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 76, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */ -#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 80, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */ -#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 84, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */ -#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 88, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */ -#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 92, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */ -#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 96, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */ -#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 100, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */ -#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 104, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */ -#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 108, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */ -#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 112, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */ -#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 116, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */ -#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 120, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */ -#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 124, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */ -#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 128, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */ -#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 132, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */ -#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 136, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */ -#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 140, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */ -#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 144, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */ -#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 148, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */ -#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 152, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */ -#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 156, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */ -#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 160, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */ -#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 164, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */ -#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 168, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */ -#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 172, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */ -#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 176, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */ -#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 180, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ -#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 184, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ -#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 188, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */ -#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 192, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */ -#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 196, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */ -#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 200, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */ -#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 204, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */ -#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 208, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */ -#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 212, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */ -#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 216, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */ -#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 220, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */ -#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 224, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */ -#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 228, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */ -#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 232, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */ -#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 236, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */ -#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 240, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */ -#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 244, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */ -#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 248, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */ -#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 252, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */ -#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 256, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */ -#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 260, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */ -#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 264, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */ -#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 268, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */ -#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 272, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */ -#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 276, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */ -#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 280, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */ -#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 284, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */ -#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 288, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */ -#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 292, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */ -#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 296, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */ -#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 300, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */ -#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 304, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */ -#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 60, 0, 1, 312, 308, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */ -#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 0, 0, 1, 4) - -/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */ -#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 4, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */ +#define DEV5G_RX_SYMBOL_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 0, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */ +#define DEV5G_RX_PAUSE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 4, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */ +#define DEV5G_RX_UNSUP_OPCODE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 8, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */ +#define DEV5G_RX_UC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 12, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */ +#define DEV5G_RX_MC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 16, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */ +#define DEV5G_RX_BC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 20, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */ +#define DEV5G_RX_CRC_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 24, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */ +#define DEV5G_RX_UNDERSIZE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 28, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */ +#define DEV5G_RX_FRAGMENTS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 32, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */ +#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 36, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 40, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */ +#define DEV5G_RX_OVERSIZE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 44, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */ +#define DEV5G_RX_JABBERS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 48, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */ +#define DEV5G_RX_SIZE64_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 52, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */ +#define DEV5G_RX_SIZE65TO127_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 56, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */ +#define DEV5G_RX_SIZE128TO255_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 60, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */ +#define DEV5G_RX_SIZE256TO511_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 64, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */ +#define DEV5G_RX_SIZE512TO1023_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 68, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */ +#define DEV5G_RX_SIZE1024TO1518_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 72, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */ +#define DEV5G_RX_SIZE1519TOMAX_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 76, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */ +#define DEV5G_RX_IPG_SHRINK_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 80, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */ +#define DEV5G_TX_PAUSE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 84, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */ +#define DEV5G_TX_UC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 88, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */ +#define DEV5G_TX_MC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 92, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */ +#define DEV5G_TX_BC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 96, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */ +#define DEV5G_TX_SIZE64_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 100, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */ +#define DEV5G_TX_SIZE65TO127_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 104, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */ +#define DEV5G_TX_SIZE128TO255_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 108, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */ +#define DEV5G_TX_SIZE256TO511_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 112, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */ +#define DEV5G_TX_SIZE512TO1023_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 116, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */ +#define DEV5G_TX_SIZE1024TO1518_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 120, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */ +#define DEV5G_TX_SIZE1519TOMAX_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 124, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */ +#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 128, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */ +#define DEV5G_RX_TAGGED_FRMS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 132, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */ +#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 136, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */ +#define DEV5G_TX_TAGGED_FRMS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 140, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */ +#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 144, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */ +#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 148, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */ +#define DEV5G_PMAC_RX_PAUSE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 152, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */ +#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 156, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */ +#define DEV5G_PMAC_RX_UC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 160, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */ +#define DEV5G_PMAC_RX_MC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 164, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */ +#define DEV5G_PMAC_RX_BC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 168, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */ +#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 172, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */ +#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 176, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */ +#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 180, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ +#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 184, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 188, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */ +#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 192, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */ +#define DEV5G_PMAC_RX_JABBERS_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 196, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */ +#define DEV5G_PMAC_RX_SIZE64_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 200, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */ +#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 204, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */ +#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 208, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */ +#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 212, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */ +#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 216, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */ +#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 220, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */ +#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 224, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */ +#define DEV5G_PMAC_TX_PAUSE_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 228, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */ +#define DEV5G_PMAC_TX_UC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 232, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */ +#define DEV5G_PMAC_TX_MC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 236, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */ +#define DEV5G_PMAC_TX_BC_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 240, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */ +#define DEV5G_PMAC_TX_SIZE64_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 244, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */ +#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 248, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */ +#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 252, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */ +#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 256, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */ +#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 260, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */ +#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 264, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */ +#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 268, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */ +#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 272, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */ +#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 276, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */ +#define DEV5G_MM_RX_SMD_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 280, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */ +#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 284, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */ +#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 288, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */ +#define DEV5G_MM_TX_PFRAGMENT_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 292, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */ +#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 296, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */ +#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 300, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */ +#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 304, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */ +#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 308, 0, 1,\ + 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */ +#define DEV5G_RX_IN_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 0, 0, 1, \ + 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */ +#define DEV5G_RX_IN_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 4, 0, 1, \ + 4) #define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\ @@ -3741,13 +4096,15 @@ enum sparx5_target { #define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) -/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */ -#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 8, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */ +#define DEV5G_RX_OK_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 8, 0, 1, \ + 4) -/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */ -#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 12, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */ +#define DEV5G_RX_OK_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 12, 0, 1, \ + 4) #define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\ @@ -3755,13 +4112,15 @@ enum sparx5_target { #define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) -/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */ -#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 16, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */ +#define DEV5G_RX_BAD_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 16, 0, 1, \ + 4) -/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */ -#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 20, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */ +#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 20, 0, 1, \ + 4) #define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\ @@ -3769,13 +4128,15 @@ enum sparx5_target { #define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) -/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */ -#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 24, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */ +#define DEV5G_TX_OUT_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 24, 0, 1, \ + 4) -/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */ -#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 28, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */ +#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 28, 0, 1, \ + 4) #define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\ @@ -3783,13 +4144,15 @@ enum sparx5_target { #define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) -/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */ -#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 32, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */ +#define DEV5G_TX_OK_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 32, 0, 1, \ + 4) -/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */ -#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 36, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */ +#define DEV5G_TX_OK_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 36, 0, 1, \ + 4) #define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\ @@ -3797,13 +4160,15 @@ enum sparx5_target { #define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) -/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */ -#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 40, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */ +#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 40, 0, 1, \ + 4) -/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */ -#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 44, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */ +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 44, 0, 1, \ + 4) #define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\ @@ -3811,13 +4176,15 @@ enum sparx5_target { #define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) -/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */ -#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 48, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */ +#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 48, 0, 1, \ + 4) -/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */ -#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 52, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */ +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 52, 0, 1, \ + 4) #define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\ @@ -3825,13 +4192,15 @@ enum sparx5_target { #define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) -/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */ -#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 56, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */ +#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 56, 0, 1, \ + 4) -/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */ -#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\ - t, 13, 372, 0, 1, 64, 60, 0, 1, 4) +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */ +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 60, 0, 1, \ + 4) #define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0) #define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\ @@ -3839,9 +4208,10 @@ enum sparx5_target { #define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\ FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) -/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ -#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G,\ - t, 13, 436, 0, 1, 52, 0, 0, 1, 4) +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV5G_DEV_RST_CTRL(t) \ + __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 436, 0, 1, 52, 0, 0, 1, \ + 4) #define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) #define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ @@ -3897,9 +4267,9 @@ enum sparx5_target { #define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x) -/* DSM:RAM_CTRL:RAM_INIT */ -#define DSM_RAM_INIT __REG(TARGET_DSM,\ - 0, 1, 0, 0, 1, 4, 0, 0, 1, 4) +/* DSM:RAM_CTRL:RAM_INIT */ +#define DSM_RAM_INIT \ + __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4) #define DSM_RAM_INIT_RAM_INIT BIT(1) #define DSM_RAM_INIT_RAM_INIT_SET(x)\ @@ -3913,9 +4283,10 @@ enum sparx5_target { #define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x) -/* DSM:CFG:BUF_CFG */ -#define DSM_BUF_CFG(r) __REG(TARGET_DSM,\ - 0, 1, 20, 0, 1, 3528, 0, r, 67, 4) +/* DSM:CFG:BUF_CFG */ +#define DSM_BUF_CFG(r) \ + __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, \ + regs->rcnt[RC_DSM_BUF_CFG], 4) #define DSM_BUF_CFG_CSC_STAT_DIS BIT(13) #define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\ @@ -3941,9 +4312,10 @@ enum sparx5_target { #define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\ FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x) -/* DSM:CFG:DEV_TX_STOP_WM_CFG */ -#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM,\ - 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4) +/* DSM:CFG:DEV_TX_STOP_WM_CFG */ +#define DSM_DEV_TX_STOP_WM_CFG(r) \ + __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, \ + regs->rcnt[RC_DSM_DEV_TX_STOP_WM_CFG], 4) #define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9) #define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\ @@ -3969,9 +4341,10 @@ enum sparx5_target { #define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x) -/* DSM:CFG:RX_PAUSE_CFG */ -#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM,\ - 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4) +/* DSM:CFG:RX_PAUSE_CFG */ +#define DSM_RX_PAUSE_CFG(r) \ + __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, \ + regs->rcnt[RC_DSM_RX_PAUSE_CFG], 4) #define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1) #define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\ @@ -3985,9 +4358,10 @@ enum sparx5_target { #define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\ FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x) -/* DSM:CFG:MAC_CFG */ -#define DSM_MAC_CFG(r) __REG(TARGET_DSM,\ - 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4) +/* DSM:CFG:MAC_CFG */ +#define DSM_MAC_CFG(r) \ + __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, \ + regs->rcnt[RC_DSM_MAC_CFG], 4) #define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16) #define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\ @@ -4013,9 +4387,10 @@ enum sparx5_target { #define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\ FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x) -/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */ -#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM,\ - 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4) +/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */ +#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) \ + __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, \ + regs->rcnt[RC_DSM_MAC_ADDR_BASE_HIGH_CFG], 4) #define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0) #define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\ @@ -4023,9 +4398,10 @@ enum sparx5_target { #define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\ FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x) -/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */ -#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM,\ - 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4) +/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */ +#define DSM_MAC_ADDR_BASE_LOW_CFG(r) \ + __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, \ + regs->rcnt[RC_DSM_MAC_ADDR_BASE_LOW_CFG], 4) #define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0) #define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\ @@ -4033,9 +4409,10 @@ enum sparx5_target { #define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\ FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x) -/* DSM:CFG:TAXI_CAL_CFG */ -#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM,\ - 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4) +/* DSM:CFG:TAXI_CAL_CFG */ +#define DSM_TAXI_CAL_CFG(r) \ + __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, \ + regs->rcnt[RC_DSM_TAXI_CAL_CFG], 4) #define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15) #define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\ @@ -4067,9 +4444,10 @@ enum sparx5_target { #define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x) -/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */ -#define EACL_VCAP_ES2_KEY_SEL(g, r) __REG(TARGET_EACL,\ - 0, 1, 149504, g, 138, 8, 0, r, 2, 4) +/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */ +#define EACL_VCAP_ES2_KEY_SEL(g, r) \ + __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_ES2_KEY_SELECT_PROFILE], \ + g, regs->gcnt[GC_EACL_ES2_KEY_SELECT_PROFILE], 8, 0, r, 2, 4) #define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL GENMASK(7, 5) #define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(x)\ @@ -4095,13 +4473,15 @@ enum sparx5_target { #define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(x)\ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x) -/* EACL:CNT_TBL:ES2_CNT */ -#define EACL_ES2_CNT(g) __REG(TARGET_EACL,\ - 0, 1, 122880, g, 2048, 4, 0, 0, 1, 4) +/* EACL:CNT_TBL:ES2_CNT */ +#define EACL_ES2_CNT(g) \ + __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_CNT_TBL], g, \ + regs->gcnt[GC_EACL_CNT_TBL], 4, 0, 0, 1, 4) -/* EACL:POL_CFG:POL_EACL_CFG */ -#define EACL_POL_EACL_CFG __REG(TARGET_EACL,\ - 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4) +/* EACL:POL_CFG:POL_EACL_CFG */ +#define EACL_POL_EACL_CFG \ + __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_POL_CFG], 0, 1, 780, 768, \ + 0, 1, 4) #define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5) #define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\ @@ -4139,9 +4519,10 @@ enum sparx5_target { #define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\ FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x) -/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */ -#define EACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_EACL,\ - 0, 1, 118696, 0, 1, 8, 0, r, 2, 4) +/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */ +#define EACL_SEC_LOOKUP_STICKY(r) \ + __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_ES2_STICKY], 0, 1, 8, 0, \ + r, 2, 4) #define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7) #define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\ @@ -4191,9 +4572,10 @@ enum sparx5_target { #define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x) -/* EACL:RAM_CTRL:RAM_INIT */ -#define EACL_RAM_INIT __REG(TARGET_EACL,\ - 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4) +/* EACL:RAM_CTRL:RAM_INIT */ +#define EACL_RAM_INIT \ + __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_RAM_CTRL], 0, 1, 4, 0, 0, \ + 1, 4) #define EACL_RAM_INIT_RAM_INIT BIT(1) #define EACL_RAM_INIT_RAM_INIT_SET(x)\ @@ -4207,9 +4589,10 @@ enum sparx5_target { #define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x) -/* FDMA:FDMA:FDMA_CH_ACTIVATE */ -#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 0, 0, 1, 4) +/* FDMA:FDMA:FDMA_CH_ACTIVATE */ +#define FDMA_CH_ACTIVATE \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 0, 0, 1, \ + 4) #define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0) #define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\ @@ -4217,9 +4600,10 @@ enum sparx5_target { #define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) -/* FDMA:FDMA:FDMA_CH_RELOAD */ -#define FDMA_CH_RELOAD __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 4, 0, 1, 4) +/* FDMA:FDMA:FDMA_CH_RELOAD */ +#define FDMA_CH_RELOAD \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 4, 0, 1, \ + 4) #define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0) #define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\ @@ -4227,9 +4611,10 @@ enum sparx5_target { #define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x) -/* FDMA:FDMA:FDMA_CH_DISABLE */ -#define FDMA_CH_DISABLE __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 8, 0, 1, 4) +/* FDMA:FDMA:FDMA_CH_DISABLE */ +#define FDMA_CH_DISABLE \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 8, 0, 1, \ + 4) #define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0) #define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\ @@ -4237,49 +4622,58 @@ enum sparx5_target { #define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x) -/* FDMA:FDMA:FDMA_DCB_LLP */ -#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 52, r, 8, 4) - -/* FDMA:FDMA:FDMA_DCB_LLP1 */ -#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 84, r, 8, 4) - -/* FDMA:FDMA:FDMA_DCB_LLP_PREV */ -#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 116, r, 8, 4) - -/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */ -#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 148, r, 8, 4) - -/* FDMA:FDMA:FDMA_CH_CFG */ -#define FDMA_CH_CFG(r) __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 224, r, 8, 4) - -#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7) +/* FDMA:FDMA:FDMA_DCB_LLP */ +#define FDMA_DCB_LLP(r) \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 52, r, 8, \ + 4) + +/* FDMA:FDMA:FDMA_DCB_LLP1 */ +#define FDMA_DCB_LLP1(r) \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 84, r, 8, \ + 4) + +/* FDMA:FDMA:FDMA_DCB_LLP_PREV */ +#define FDMA_DCB_LLP_PREV(r) \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 116, r, 8,\ + 4) + +/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */ +#define FDMA_DCB_LLP_PREV1(r) \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 148, r, 8,\ + 4) + +/* FDMA:FDMA:FDMA_CH_CFG */ +#define FDMA_CH_CFG(r) \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 224, r, 8,\ + 4) + +#define FDMA_CH_CFG_CH_XTR_STATUS_MODE\ + BIT(regs->fpos[FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE]) #define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\ - FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) + spx5_field_prep(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) #define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\ - FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) + spx5_field_get(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) -#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(6) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY\ + BIT(regs->fpos[FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY]) #define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\ - FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + spx5_field_prep(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) #define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\ - FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + spx5_field_get(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) -#define FDMA_CH_CFG_CH_INJ_PORT BIT(5) +#define FDMA_CH_CFG_CH_INJ_PORT\ + BIT(regs->fpos[FP_FDMA_CH_CFG_CH_INJ_PORT]) #define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\ - FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x) + spx5_field_prep(FDMA_CH_CFG_CH_INJ_PORT, x) #define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\ - FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x) + spx5_field_get(FDMA_CH_CFG_CH_INJ_PORT, x) -#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(4, 1) +#define FDMA_CH_CFG_CH_DCB_DB_CNT\ + GENMASK(regs->fsize[FW_FDMA_CH_CFG_CH_DCB_DB_CNT] + 1 - 1, 1) #define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\ - FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + spx5_field_prep(FDMA_CH_CFG_CH_DCB_DB_CNT, x) #define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\ - FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + spx5_field_get(FDMA_CH_CFG_CH_DCB_DB_CNT, x) #define FDMA_CH_CFG_CH_MEM BIT(0) #define FDMA_CH_CFG_CH_MEM_SET(x)\ @@ -4287,9 +4681,10 @@ enum sparx5_target { #define FDMA_CH_CFG_CH_MEM_GET(x)\ FIELD_GET(FDMA_CH_CFG_CH_MEM, x) -/* FDMA:FDMA:FDMA_CH_TRANSLATE */ -#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 256, r, 8, 4) +/* FDMA:FDMA:FDMA_CH_TRANSLATE */ +#define FDMA_CH_TRANSLATE(r) \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 256, r, 8,\ + 4) #define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0) #define FDMA_CH_TRANSLATE_OFFSET_SET(x)\ @@ -4297,9 +4692,10 @@ enum sparx5_target { #define FDMA_CH_TRANSLATE_OFFSET_GET(x)\ FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x) -/* FDMA:FDMA:FDMA_XTR_CFG */ -#define FDMA_XTR_CFG __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 364, 0, 1, 4) +/* FDMA:FDMA:FDMA_XTR_CFG */ +#define FDMA_XTR_CFG \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 364, 0, 1,\ + 4) #define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11) #define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\ @@ -4313,9 +4709,10 @@ enum sparx5_target { #define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\ FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x) -/* FDMA:FDMA:FDMA_PORT_CTRL */ -#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 376, r, 2, 4) +/* FDMA:FDMA:FDMA_PORT_CTRL */ +#define FDMA_PORT_CTRL(r) \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 376, r, 2,\ + 4) #define FDMA_PORT_CTRL_INJ_STOP BIT(4) #define FDMA_PORT_CTRL_INJ_STOP_SET(x)\ @@ -4347,9 +4744,10 @@ enum sparx5_target { #define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\ FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x) -/* FDMA:FDMA:FDMA_INTR_DCB */ -#define FDMA_INTR_DCB __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 384, 0, 1, 4) +/* FDMA:FDMA:FDMA_INTR_DCB */ +#define FDMA_INTR_DCB \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 384, 0, 1,\ + 4) #define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0) #define FDMA_INTR_DCB_INTR_DCB_SET(x)\ @@ -4357,9 +4755,10 @@ enum sparx5_target { #define FDMA_INTR_DCB_INTR_DCB_GET(x)\ FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x) -/* FDMA:FDMA:FDMA_INTR_DCB_ENA */ -#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 388, 0, 1, 4) +/* FDMA:FDMA:FDMA_INTR_DCB_ENA */ +#define FDMA_INTR_DCB_ENA \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 388, 0, 1,\ + 4) #define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0) #define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\ @@ -4367,9 +4766,10 @@ enum sparx5_target { #define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\ FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x) -/* FDMA:FDMA:FDMA_INTR_DB */ -#define FDMA_INTR_DB __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 392, 0, 1, 4) +/* FDMA:FDMA:FDMA_INTR_DB */ +#define FDMA_INTR_DB \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 392, 0, 1,\ + 4) #define FDMA_INTR_DB_INTR_DB GENMASK(7, 0) #define FDMA_INTR_DB_INTR_DB_SET(x)\ @@ -4377,9 +4777,10 @@ enum sparx5_target { #define FDMA_INTR_DB_INTR_DB_GET(x)\ FIELD_GET(FDMA_INTR_DB_INTR_DB, x) -/* FDMA:FDMA:FDMA_INTR_DB_ENA */ -#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 396, 0, 1, 4) +/* FDMA:FDMA:FDMA_INTR_DB_ENA */ +#define FDMA_INTR_DB_ENA \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 396, 0, 1,\ + 4) #define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0) #define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\ @@ -4387,9 +4788,10 @@ enum sparx5_target { #define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) -/* FDMA:FDMA:FDMA_INTR_ERR */ -#define FDMA_INTR_ERR __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 400, 0, 1, 4) +/* FDMA:FDMA:FDMA_INTR_ERR */ +#define FDMA_INTR_ERR \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 400, 0, 1,\ + 4) #define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8) #define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\ @@ -4403,9 +4805,10 @@ enum sparx5_target { #define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\ FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x) -/* FDMA:FDMA:FDMA_ERRORS */ -#define FDMA_ERRORS __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) +/* FDMA:FDMA:FDMA_ERRORS */ +#define FDMA_ERRORS \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 412, 0, 1,\ + 4) #define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30) #define FDMA_ERRORS_ERR_XTR_WR_SET(x)\ @@ -4455,9 +4858,10 @@ enum sparx5_target { #define FDMA_ERRORS_ERR_CH_WR_GET(x)\ FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x) -/* FDMA:FDMA:FDMA_ERRORS_2 */ -#define FDMA_ERRORS_2 __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 416, 0, 1, 4) +/* FDMA:FDMA:FDMA_ERRORS_2 */ +#define FDMA_ERRORS_2 \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 416, 0, 1,\ + 4) #define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0) #define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\ @@ -4465,9 +4869,10 @@ enum sparx5_target { #define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\ FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x) -/* FDMA:FDMA:FDMA_CTRL */ -#define FDMA_CTRL __REG(TARGET_FDMA,\ - 0, 1, 8, 0, 1, 428, 424, 0, 1, 4) +/* FDMA:FDMA:FDMA_CTRL */ +#define FDMA_CTRL \ + __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 424, 0, 1,\ + 4) #define FDMA_CTRL_NRESET BIT(0) #define FDMA_CTRL_NRESET_SET(x)\ @@ -4475,9 +4880,10 @@ enum sparx5_target { #define FDMA_CTRL_NRESET_GET(x)\ FIELD_GET(FDMA_CTRL_NRESET, x) -/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */ -#define GCB_CHIP_ID __REG(TARGET_GCB,\ - 0, 1, 0, 0, 1, 424, 0, 0, 1, 4) +/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */ +#define GCB_CHIP_ID \ + __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], 0, 0, \ + 1, 4) #define GCB_CHIP_ID_REV_ID GENMASK(31, 28) #define GCB_CHIP_ID_REV_ID_SET(x)\ @@ -4503,10 +4909,12 @@ enum sparx5_target { #define GCB_CHIP_ID_ONE_GET(x)\ FIELD_GET(GCB_CHIP_ID_ONE, x) -/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */ -#define GCB_SOFT_RST __REG(TARGET_GCB,\ - 0, 1, 0, 0, 1, 424, 8, 0, 1, 4) +/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */ +#define GCB_SOFT_RST \ + __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], \ + regs->raddr[RA_GCB_SOFT_RST], 0, 1, 4) +/* SPARX5 ONLY */ #define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2) #define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\ FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x) @@ -4525,9 +4933,11 @@ enum sparx5_target { #define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\ FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x) -/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */ -#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB,\ - 0, 1, 0, 0, 1, 424, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */ +#define GCB_HW_SGPIO_SD_CFG \ + __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], 20, 0, \ + 1, 4) #define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1) #define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\ @@ -4541,19 +4951,23 @@ enum sparx5_target { #define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\ FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x) -/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */ -#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB,\ - 0, 1, 0, 0, 1, 424, 24, r, 65, 4) +/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */ +#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) \ + __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], \ + regs->raddr[RA_GCB_HW_SGPIO_TO_SD_MAP_CFG], r, \ + regs->rcnt[RC_GCB_HW_SGPIO_TO_SD_MAP_CFG], 4) -#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0) +#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL\ + GENMASK(regs->fsize[FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] + 0 - 1, 0) #define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\ - FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) + spx5_field_prep(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) #define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\ - FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) + spx5_field_get(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) -/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */ -#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB,\ - 0, 1, 876, g, 3, 280, 20, 0, 1, 4) +/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */ +#define GCB_SIO_CLOCK(g) \ + __REG(TARGET_GCB, 0, 1, regs->gaddr[GA_GCB_SIO_CTRL], g, \ + regs->gcnt[GC_GCB_SIO_CTRL], 280, 20, 0, 1, 4) #define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8) #define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\ @@ -4567,9 +4981,10 @@ enum sparx5_target { #define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\ FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x) -/* HSCH:HSCH_CFG:CIR_CFG */ -#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH,\ - 0, 1, 0, g, 5040, 32, 0, 0, 1, 4) +/* HSCH:HSCH_CFG:CIR_CFG */ +#define HSCH_CIR_CFG(g) \ + __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 0, 0, \ + 1, 4) #define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6) #define HSCH_CIR_CFG_CIR_RATE_SET(x)\ @@ -4583,9 +4998,10 @@ enum sparx5_target { #define HSCH_CIR_CFG_CIR_BURST_GET(x)\ FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x) -/* HSCH:HSCH_CFG:EIR_CFG */ -#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH,\ - 0, 1, 0, g, 5040, 32, 4, 0, 1, 4) +/* HSCH:HSCH_CFG:EIR_CFG */ +#define HSCH_EIR_CFG(g) \ + __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 4, 0, \ + 1, 4) #define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6) #define HSCH_EIR_CFG_EIR_RATE_SET(x)\ @@ -4599,15 +5015,17 @@ enum sparx5_target { #define HSCH_EIR_CFG_EIR_BURST_GET(x)\ FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x) -/* HSCH:HSCH_CFG:SE_CFG */ -#define HSCH_SE_CFG(g) __REG(TARGET_HSCH,\ - 0, 1, 0, g, 5040, 32, 8, 0, 1, 4) +/* HSCH:HSCH_CFG:SE_CFG */ +#define HSCH_SE_CFG(g) \ + __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 8, 0, \ + 1, 4) -#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6) +#define HSCH_SE_CFG_SE_DWRR_CNT\ + GENMASK(regs->fsize[FW_HSCH_SE_CFG_SE_DWRR_CNT] + 6 - 1, 6) #define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\ - FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x) + spx5_field_prep(HSCH_SE_CFG_SE_DWRR_CNT, x) #define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\ - FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x) + spx5_field_get(HSCH_SE_CFG_SE_DWRR_CNT, x) #define HSCH_SE_CFG_SE_AVB_ENA BIT(5) #define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\ @@ -4633,19 +5051,22 @@ enum sparx5_target { #define HSCH_SE_CFG_SE_STOP_GET(x)\ FIELD_GET(HSCH_SE_CFG_SE_STOP, x) -/* HSCH:HSCH_CFG:SE_CONNECT */ -#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH,\ - 0, 1, 0, g, 5040, 32, 12, 0, 1, 4) +/* HSCH:HSCH_CFG:SE_CONNECT */ +#define HSCH_SE_CONNECT(g) \ + __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 12, 0,\ + 1, 4) -#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0) +#define HSCH_SE_CONNECT_SE_LEAK_LINK\ + GENMASK(regs->fsize[FW_HSCH_SE_CONNECT_SE_LEAK_LINK] + 0 - 1, 0) #define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\ - FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x) + spx5_field_prep(HSCH_SE_CONNECT_SE_LEAK_LINK, x) #define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\ - FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x) + spx5_field_get(HSCH_SE_CONNECT_SE_LEAK_LINK, x) -/* HSCH:HSCH_CFG:SE_DLB_SENSE */ -#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH,\ - 0, 1, 0, g, 5040, 32, 16, 0, 1, 4) +/* HSCH:HSCH_CFG:SE_DLB_SENSE */ +#define HSCH_SE_DLB_SENSE(g) \ + __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 16, 0,\ + 1, 4) #define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10) #define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\ @@ -4653,11 +5074,12 @@ enum sparx5_target { #define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x) -#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT\ + GENMASK(regs->fsize[FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] + 3 - 1, 3) #define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\ - FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) + spx5_field_prep(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) #define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\ - FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) + spx5_field_get(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) #define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2) #define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\ @@ -4677,9 +5099,10 @@ enum sparx5_target { #define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x) -/* HSCH:HSCH_DWRR:DWRR_ENTRY */ -#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH,\ - 0, 1, 162816, g, 72, 4, 0, 0, 1, 4) +/* HSCH:HSCH_DWRR:DWRR_ENTRY */ +#define HSCH_DWRR_ENTRY(g) \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_DWRR], g, \ + regs->gcnt[GC_HSCH_HSCH_DWRR], 4, 0, 0, 1, 4) #define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20) #define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\ @@ -4693,15 +5116,17 @@ enum sparx5_target { #define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x) -/* HSCH:HSCH_MISC:HSCH_CFG_CFG */ -#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH,\ - 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4) +/* HSCH:HSCH_MISC:HSCH_CFG_CFG */ +#define HSCH_HSCH_CFG_CFG \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_MISC], 0, 1, 648, \ + 284, 0, 1, 4) -#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14) +#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX\ + GENMASK(regs->fsize[FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] + 14 - 1, 14) #define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\ - FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) + spx5_field_prep(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) #define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\ - FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) + spx5_field_get(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) #define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12) #define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\ @@ -4715,9 +5140,11 @@ enum sparx5_target { #define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\ FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x) -/* HSCH:HSCH_MISC:SYS_CLK_PER */ -#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH,\ - 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4) +/* SPARX5 ONLY */ +/* HSCH:HSCH_MISC:SYS_CLK_PER */ +#define HSCH_SYS_CLK_PER \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_MISC], 0, 1, 648, \ + 640, 0, 1, 4) #define HSCH_SYS_CLK_PER_100PS GENMASK(7, 0) #define HSCH_SYS_CLK_PER_100PS_SET(x)\ @@ -4725,9 +5152,10 @@ enum sparx5_target { #define HSCH_SYS_CLK_PER_100PS_GET(x)\ FIELD_GET(HSCH_SYS_CLK_PER_100PS, x) -/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */ -#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH,\ - 0, 1, 161664, g, 4, 32, 0, r, 4, 4) +/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */ +#define HSCH_HSCH_TIMER_CFG(g, r) \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_LEAK_LISTS], g, 4, \ + 32, 0, r, 4, 4) #define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0) #define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\ @@ -4735,15 +5163,17 @@ enum sparx5_target { #define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\ FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x) -/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */ -#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH,\ - 0, 1, 161664, g, 4, 32, 16, r, 4, 4) +/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */ +#define HSCH_HSCH_LEAK_CFG(g, r) \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_LEAK_LISTS], g, 4, \ + 32, 16, r, 4, 4) -#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1) +#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST\ + GENMASK(regs->fsize[FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] + 1 - 1, 1) #define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\ - FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) + spx5_field_prep(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) #define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\ - FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) + spx5_field_get(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) #define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0) #define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\ @@ -4751,9 +5181,10 @@ enum sparx5_target { #define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x) -/* HSCH:SYSTEM:FLUSH_CTRL */ -#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH,\ - 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4) +/* HSCH:SYSTEM:FLUSH_CTRL */ +#define HSCH_FLUSH_CTRL \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 4, 0, \ + 1, 4) #define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27) #define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\ @@ -4773,11 +5204,12 @@ enum sparx5_target { #define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x) -#define HSCH_FLUSH_CTRL_FLUSH_PORT GENMASK(24, 18) +#define HSCH_FLUSH_CTRL_FLUSH_PORT\ + GENMASK(regs->fsize[FW_HSCH_FLUSH_CTRL_FLUSH_PORT] + 18 - 1, 18) #define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\ - FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x) + spx5_field_prep(HSCH_FLUSH_CTRL_FLUSH_PORT, x) #define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\ - FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x) + spx5_field_get(HSCH_FLUSH_CTRL_FLUSH_PORT, x) #define HSCH_FLUSH_CTRL_FLUSH_QUEUE BIT(17) #define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\ @@ -4791,15 +5223,17 @@ enum sparx5_target { #define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x) -#define HSCH_FLUSH_CTRL_FLUSH_HIER GENMASK(15, 0) +#define HSCH_FLUSH_CTRL_FLUSH_HIER\ + GENMASK(regs->fsize[FW_HSCH_FLUSH_CTRL_FLUSH_HIER] + 0 - 1, 0) #define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\ - FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x) + spx5_field_prep(HSCH_FLUSH_CTRL_FLUSH_HIER, x) #define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\ - FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x) + spx5_field_get(HSCH_FLUSH_CTRL_FLUSH_HIER, x) -/* HSCH:SYSTEM:PORT_MODE */ -#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH,\ - 0, 1, 184000, 0, 1, 312, 8, r, 70, 4) +/* HSCH:SYSTEM:PORT_MODE */ +#define HSCH_PORT_MODE(r) \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 8, r, \ + regs->rcnt[RC_HSCH_PORT_MODE], 4) #define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4) #define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\ @@ -4831,9 +5265,10 @@ enum sparx5_target { #define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\ FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x) -/* HSCH:SYSTEM:OUTB_SHARE_ENA */ -#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH,\ - 0, 1, 184000, 0, 1, 312, 288, r, 5, 4) +/* HSCH:SYSTEM:OUTB_SHARE_ENA */ +#define HSCH_OUTB_SHARE_ENA(r) \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 288, \ + r, 5, 4) #define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0) #define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\ @@ -4841,9 +5276,10 @@ enum sparx5_target { #define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\ FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x) -/* HSCH:MMGT:RESET_CFG */ -#define HSCH_RESET_CFG __REG(TARGET_HSCH,\ - 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4) +/* HSCH:MMGT:RESET_CFG */ +#define HSCH_RESET_CFG \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_MMGT], 0, 1, 16, 8, 0, 1, \ + 4) #define HSCH_RESET_CFG_CORE_ENA BIT(0) #define HSCH_RESET_CFG_CORE_ENA_SET(x)\ @@ -4851,9 +5287,10 @@ enum sparx5_target { #define HSCH_RESET_CFG_CORE_ENA_GET(x)\ FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x) -/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */ -#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH,\ - 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4) +/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */ +#define HSCH_TAS_STATEMACHINE_CFG \ + __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_TAS_CONFIG], 0, 1, \ + regs->gsize[GW_HSCH_TAS_CONFIG], 8, 0, 1, 4) #define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0) #define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\ @@ -4861,9 +5298,9 @@ enum sparx5_target { #define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\ FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x) -/* LRN:COMMON:COMMON_ACCESS_CTRL */ -#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 0, 0, 1, 4) +/* LRN:COMMON:COMMON_ACCESS_CTRL */ +#define LRN_COMMON_ACCESS_CTRL \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4) #define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20) #define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\ @@ -4877,11 +5314,12 @@ enum sparx5_target { #define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x) -#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW\ + GENMASK(regs->fsize[FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] + 5 - 1, 5) #define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\ - FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) + spx5_field_prep(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) #define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\ - FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) + spx5_field_get(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) #define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD GENMASK(4, 1) #define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\ @@ -4895,9 +5333,9 @@ enum sparx5_target { #define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\ FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x) -/* LRN:COMMON:MAC_ACCESS_CFG_0 */ -#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 4, 0, 1, 4) +/* LRN:COMMON:MAC_ACCESS_CFG_0 */ +#define LRN_MAC_ACCESS_CFG_0 \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4) #define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16) #define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\ @@ -4911,13 +5349,13 @@ enum sparx5_target { #define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\ FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x) -/* LRN:COMMON:MAC_ACCESS_CFG_1 */ -#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 8, 0, 1, 4) +/* LRN:COMMON:MAC_ACCESS_CFG_1 */ +#define LRN_MAC_ACCESS_CFG_1 \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4) -/* LRN:COMMON:MAC_ACCESS_CFG_2 */ -#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 12, 0, 1, 4) +/* LRN:COMMON:MAC_ACCESS_CFG_2 */ +#define LRN_MAC_ACCESS_CFG_2 \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4) #define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28) #define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\ @@ -4991,19 +5429,20 @@ enum sparx5_target { #define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x) -/* LRN:COMMON:MAC_ACCESS_CFG_3 */ -#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 16, 0, 1, 4) +/* LRN:COMMON:MAC_ACCESS_CFG_3 */ +#define LRN_MAC_ACCESS_CFG_3 \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4) -#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0) +#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX\ + GENMASK(regs->fsize[FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] + 0 - 1, 0) #define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\ - FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) + spx5_field_prep(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) #define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\ - FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) + spx5_field_get(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) -/* LRN:COMMON:SCAN_NEXT_CFG */ -#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 20, 0, 1, 4) +/* LRN:COMMON:SCAN_NEXT_CFG */ +#define LRN_SCAN_NEXT_CFG \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4) #define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19) #define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\ @@ -5095,9 +5534,9 @@ enum sparx5_target { #define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\ FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x) -/* LRN:COMMON:SCAN_NEXT_CFG_1 */ -#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 24, 0, 1, 4) +/* LRN:COMMON:SCAN_NEXT_CFG_1 */ +#define LRN_SCAN_NEXT_CFG_1 \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4) #define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16) #define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\ @@ -5111,9 +5550,9 @@ enum sparx5_target { #define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\ FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x) -/* LRN:COMMON:AUTOAGE_CFG */ -#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 36, r, 4, 4) +/* LRN:COMMON:AUTOAGE_CFG */ +#define LRN_AUTOAGE_CFG(r) \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4) #define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28) #define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\ @@ -5127,9 +5566,9 @@ enum sparx5_target { #define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\ FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x) -/* LRN:COMMON:AUTOAGE_CFG_1 */ -#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 52, 0, 1, 4) +/* LRN:COMMON:AUTOAGE_CFG_1 */ +#define LRN_AUTOAGE_CFG_1 \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4) #define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25) #define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\ @@ -5173,15 +5612,16 @@ enum sparx5_target { #define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\ FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x) -/* LRN:COMMON:AUTOAGE_CFG_2 */ -#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN,\ - 0, 1, 0, 0, 1, 72, 56, 0, 1, 4) +/* LRN:COMMON:AUTOAGE_CFG_2 */ +#define LRN_AUTOAGE_CFG_2 \ + __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4) -#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4) +#define LRN_AUTOAGE_CFG_2_NEXT_ROW\ + GENMASK(regs->fsize[FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] + 4 - 1, 4) #define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\ - FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) + spx5_field_prep(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) #define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\ - FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) + spx5_field_get(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) #define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS GENMASK(3, 0) #define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\ @@ -5189,9 +5629,10 @@ enum sparx5_target { #define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\ FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x) -/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */ -#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP,\ - 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */ +#define PCEP_RCTRL_2_OUT_0 \ + __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4) #define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0) #define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\ @@ -5253,9 +5694,10 @@ enum sparx5_target { #define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\ FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x) -/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */ -#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP,\ - 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LWR_OUT_0 \ + __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4) #define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0) #define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\ @@ -5269,13 +5711,15 @@ enum sparx5_target { #define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\ FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x) -/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */ -#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP,\ - 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_OUT_0 \ + __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4) -/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */ -#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP,\ - 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LIM_OUT_0 \ + __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4) #define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0) #define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\ @@ -5289,17 +5733,20 @@ enum sparx5_target { #define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\ FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x) -/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */ -#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP,\ - 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LWR_TGT_OUT_0 \ + __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4) -/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */ -#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP,\ - 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_TGT_OUT_0 \ + __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4) -/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */ -#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP,\ - 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_LIM_OUT_0 \ + __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4) #define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0) #define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\ @@ -5313,9 +5760,10 @@ enum sparx5_target { #define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\ FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x) -/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ -#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR,\ - t, 12, 0, 0, 1, 56, 0, 0, 1, 4) +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS10G_BR_PCS_CFG(t) \ + __REG(TARGET_PCS10G_BR, t, regs->tsize[TC_PCS10G_BR], 0, 0, 1, 56, 0, \ + 0, 1, 4) #define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31) #define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\ @@ -5389,9 +5837,10 @@ enum sparx5_target { #define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x) -/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ -#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR,\ - t, 12, 0, 0, 1, 56, 4, 0, 1, 4) +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS10G_BR_PCS_SD_CFG(t) \ + __REG(TARGET_PCS10G_BR, t, regs->tsize[TC_PCS10G_BR], 0, 0, 1, 56, 4, \ + 0, 1, 4) #define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8) #define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ @@ -5411,9 +5860,10 @@ enum sparx5_target { #define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x) -/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ -#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR,\ - t, 8, 0, 0, 1, 56, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS25G_BR_PCS_CFG(t) \ + __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4) #define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31) #define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\ @@ -5487,9 +5937,10 @@ enum sparx5_target { #define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x) -/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ -#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR,\ - t, 8, 0, 0, 1, 56, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS25G_BR_PCS_SD_CFG(t) \ + __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4) #define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8) #define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ @@ -5509,9 +5960,10 @@ enum sparx5_target { #define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x) -/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ -#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR,\ - t, 13, 0, 0, 1, 56, 0, 0, 1, 4) +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS5G_BR_PCS_CFG(t) \ + __REG(TARGET_PCS5G_BR, t, regs->tsize[TC_PCS5G_BR], 0, 0, 1, 56, 0, 0, \ + 1, 4) #define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31) #define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\ @@ -5585,9 +6037,10 @@ enum sparx5_target { #define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x) -/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ -#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR,\ - t, 13, 0, 0, 1, 56, 4, 0, 1, 4) +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS5G_BR_PCS_SD_CFG(t) \ + __REG(TARGET_PCS5G_BR, t, regs->tsize[TC_PCS5G_BR], 0, 0, 1, 56, 4, 0, \ + 1, 4) #define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8) #define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ @@ -5607,58 +6060,67 @@ enum sparx5_target { #define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x) -/* PORT_CONF:HW_CFG:DEV5G_MODES */ -#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF,\ - 0, 1, 0, 0, 1, 24, 0, 0, 1, 4) +/* PORT_CONF:HW_CFG:DEV5G_MODES */ +#define PORT_CONF_DEV5G_MODES \ + __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0) #define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE BIT(1) #define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE BIT(2) #define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE BIT(3) #define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE BIT(4) #define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE BIT(5) #define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE BIT(6) #define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE BIT(7) #define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE BIT(8) #define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x) @@ -5671,27 +6133,30 @@ enum sparx5_target { #define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE BIT(10) #define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE BIT(11) #define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE BIT(12) #define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x) #define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x) -/* PORT_CONF:HW_CFG:DEV10G_MODES */ -#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF,\ - 0, 1, 0, 0, 1, 24, 4, 0, 1, 4) +/* PORT_CONF:HW_CFG:DEV10G_MODES */ +#define PORT_CONF_DEV10G_MODES \ + __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4) #define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0) #define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\ @@ -5699,75 +6164,87 @@ enum sparx5_target { #define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE BIT(1) #define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE BIT(2) #define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE BIT(3) #define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE BIT(4) #define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE BIT(5) #define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE BIT(6) #define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE BIT(7) #define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE BIT(8) #define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE BIT(9) #define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE BIT(10) #define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x) +/* SPARX5 ONLY */ #define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE BIT(11) #define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x) #define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x) -/* PORT_CONF:HW_CFG:DEV25G_MODES */ -#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF,\ - 0, 1, 0, 0, 1, 24, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* PORT_CONF:HW_CFG:DEV25G_MODES */ +#define PORT_CONF_DEV25G_MODES \ + __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4) #define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0) #define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\ @@ -5817,9 +6294,9 @@ enum sparx5_target { #define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x) -/* PORT_CONF:HW_CFG:QSGMII_ENA */ -#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF,\ - 0, 1, 0, 0, 1, 24, 12, 0, 1, 4) +/* PORT_CONF:HW_CFG:QSGMII_ENA */ +#define PORT_CONF_QSGMII_ENA \ + __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\ @@ -5857,45 +6334,52 @@ enum sparx5_target { #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x) +/* SPARX5 ONLY */ #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6 BIT(6) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x) +/* SPARX5 ONLY */ #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7 BIT(7) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x) +/* SPARX5 ONLY */ #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8 BIT(8) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x) +/* SPARX5 ONLY */ #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9 BIT(9) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x) +/* SPARX5 ONLY */ #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10 BIT(10) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x) +/* SPARX5 ONLY */ #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11 BIT(11) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x) #define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x) -/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */ -#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF,\ - 0, 1, 72, g, 6, 8, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */ +#define PORT_CONF_USGMII_CFG(g) \ + __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4) #define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9) #define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\ @@ -5939,39 +6423,46 @@ enum sparx5_target { #define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\ FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x) -/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */ -#define PTP_PTP_PIN_INTR __REG(TARGET_PTP,\ - 0, 1, 320, 0, 1, 16, 0, 0, 1, 4) +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PTP_PIN_INTR \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 0, 0, 1,\ + 4) -#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_INTR_PTP\ + GENMASK(regs->fsize[FW_PTP_PTP_PIN_INTR_INTR_PTP] + 0 - 1, 0) #define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\ - FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x) + spx5_field_prep(PTP_PTP_PIN_INTR_INTR_PTP, x) #define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\ - FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x) + spx5_field_get(PTP_PTP_PIN_INTR_INTR_PTP, x) -/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */ -#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP,\ - 0, 1, 320, 0, 1, 16, 4, 0, 1, 4) +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PTP_PIN_INTR_ENA \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 4, 0, 1,\ + 4) -#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA\ + GENMASK(regs->fsize[FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] + 0 - 1, 0) #define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\ - FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) + spx5_field_prep(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) #define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\ - FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) + spx5_field_get(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) -/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */ -#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP,\ - 0, 1, 320, 0, 1, 16, 8, 0, 1, 4) +/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */ +#define PTP_PTP_INTR_IDENT \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 8, 0, 1,\ + 4) -#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0) +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT\ + GENMASK(regs->fsize[FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] + 0 - 1, 0) #define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\ - FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) + spx5_field_prep(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) #define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\ - FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) + spx5_field_get(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) -/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */ -#define PTP_PTP_DOM_CFG __REG(TARGET_PTP,\ - 0, 1, 320, 0, 1, 16, 12, 0, 1, 4) +/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */ +#define PTP_PTP_DOM_CFG \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 12, 0, \ + 1, 4) #define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9) #define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\ @@ -5997,13 +6488,15 @@ enum sparx5_target { #define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\ FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x) -/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */ -#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP,\ - 0, 1, 336, g, 3, 28, 0, r, 2, 4) +/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */ +#define PTP_CLK_PER_CFG(g, r) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \ + 0, r, 2, 4) -/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */ -#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP,\ - 0, 1, 336, g, 3, 28, 8, 0, 1, 4) +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */ +#define PTP_PTP_CUR_NSEC(g) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \ + 8, 0, 1, 4) #define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0) #define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\ @@ -6011,9 +6504,10 @@ enum sparx5_target { #define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\ FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x) -/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */ -#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP,\ - 0, 1, 336, g, 3, 28, 12, 0, 1, 4) +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */ +#define PTP_PTP_CUR_NSEC_FRAC(g) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \ + 12, 0, 1, 4) #define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0) #define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\ @@ -6021,13 +6515,15 @@ enum sparx5_target { #define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\ FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x) -/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */ -#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP,\ - 0, 1, 336, g, 3, 28, 16, 0, 1, 4) +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */ +#define PTP_PTP_CUR_SEC_LSB(g) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \ + 16, 0, 1, 4) -/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */ -#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP,\ - 0, 1, 336, g, 3, 28, 20, 0, 1, 4) +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */ +#define PTP_PTP_CUR_SEC_MSB(g) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \ + 20, 0, 1, 4) #define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0) #define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\ @@ -6035,37 +6531,43 @@ enum sparx5_target { #define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\ FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x) -/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */ -#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP,\ - 0, 1, 336, g, 3, 28, 24, 0, 1, 4) +/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */ +#define PTP_NTP_CUR_NSEC(g) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \ + 24, 0, 1, 4) -/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */ -#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 0, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */ +#define PTP_PTP_PIN_CFG(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 0, 0, 1,\ + 4) -#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26) +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION\ + GENMASK(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] + 2, regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION]) #define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\ - FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) + spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) #define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\ - FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) + spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) -#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24) +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC\ + GENMASK(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] + 1, regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC]) #define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\ - FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) + spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) #define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\ - FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) + spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) -#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23) +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL\ + BIT(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL]) #define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\ - FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) + spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) #define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\ - FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) + spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) -#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21) +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT\ + GENMASK(regs->fsize[FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] + 21 - 1, 21) #define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\ - FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) + spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) #define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\ - FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) + spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) #define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18) #define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\ @@ -6097,9 +6599,10 @@ enum sparx5_target { #define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x) -/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */ -#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 4, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */ +#define PTP_PTP_TOD_SEC_MSB(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 4, 0, 1,\ + 4) #define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0) #define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\ @@ -6107,13 +6610,15 @@ enum sparx5_target { #define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\ FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x) -/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */ -#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 8, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */ +#define PTP_PTP_TOD_SEC_LSB(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 8, 0, 1,\ + 4) -/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */ -#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 12, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */ +#define PTP_PTP_TOD_NSEC(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 12, 0, \ + 1, 4) #define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0) #define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\ @@ -6121,9 +6626,10 @@ enum sparx5_target { #define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\ FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x) -/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */ -#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 16, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */ +#define PTP_PTP_TOD_NSEC_FRAC(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 16, 0, \ + 1, 4) #define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0) #define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\ @@ -6131,13 +6637,15 @@ enum sparx5_target { #define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\ FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x) -/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */ -#define PTP_NTP_NSEC(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 20, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */ +#define PTP_NTP_NSEC(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 20, 0, \ + 1, 4) -/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */ -#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 24, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */ +#define PTP_PIN_WF_HIGH_PERIOD(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 24, 0, \ + 1, 4) #define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0) #define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\ @@ -6145,9 +6653,10 @@ enum sparx5_target { #define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\ FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x) -/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */ -#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 28, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */ +#define PTP_PIN_WF_LOW_PERIOD(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 28, 0, \ + 1, 4) #define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0) #define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\ @@ -6155,9 +6664,10 @@ enum sparx5_target { #define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\ FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x) -/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */ -#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP,\ - 0, 1, 0, g, 5, 64, 32, 0, 1, 4) +/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */ +#define PTP_PIN_IOBOUNCH_DELAY(g) \ + __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 32, 0, \ + 1, 4) #define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3) #define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\ @@ -6171,22 +6681,27 @@ enum sparx5_target { #define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x) -/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */ -#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP,\ - 0, 1, 420, g, 5, 8, 0, 0, 1, 4) +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */ +#define PTP_PHAD_CTRL(g) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PHASE_DETECTOR_CTRL], g, \ + regs->gcnt[GC_PTP_PHASE_DETECTOR_CTRL], \ + regs->gsize[GW_PTP_PHASE_DETECTOR_CTRL], 0, 0, 1, 4) -#define PTP_PHAD_CTRL_PHAD_ENA BIT(7) +#define PTP_PHAD_CTRL_PHAD_ENA\ + BIT(regs->fpos[FP_PTP_PHAD_CTRL_PHAD_ENA]) #define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\ - FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x) + spx5_field_prep(PTP_PHAD_CTRL_PHAD_ENA, x) #define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\ - FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x) + spx5_field_get(PTP_PHAD_CTRL_PHAD_ENA, x) -#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6) +#define PTP_PHAD_CTRL_PHAD_FAILED\ + BIT(regs->fpos[FP_PTP_PHAD_CTRL_PHAD_FAILED]) #define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\ - FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x) + spx5_field_prep(PTP_PHAD_CTRL_PHAD_FAILED, x) #define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\ - FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x) + spx5_field_get(PTP_PHAD_CTRL_PHAD_FAILED, x) +/* SPARX5 ONLY */ #define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3) #define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\ FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x) @@ -6199,13 +6714,16 @@ enum sparx5_target { #define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\ FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x) -/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */ -#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP,\ - 0, 1, 420, g, 5, 8, 4, 0, 1, 4) +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */ +#define PTP_PHAD_CYC_STAT(g) \ + __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PHASE_DETECTOR_CTRL], g, \ + regs->gcnt[GC_PTP_PHASE_DETECTOR_CTRL], \ + regs->gsize[GW_PTP_PHASE_DETECTOR_CTRL], 4, 0, 1, 4) -/* QFWD:SYSTEM:SWITCH_PORT_MODE */ -#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD,\ - 0, 1, 0, 0, 1, 340, 0, r, 70, 4) +/* QFWD:SYSTEM:SWITCH_PORT_MODE */ +#define QFWD_SWITCH_PORT_MODE(r) \ + __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, \ + regs->rcnt[RC_QFWD_SWITCH_PORT_MODE], 4) #define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19) #define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\ @@ -6261,49 +6779,53 @@ enum sparx5_target { #define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\ FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x) -/* QFWD:SYSTEM:FRAME_COPY_CFG */ -#define QFWD_FRAME_COPY_CFG(r)\ +/* QFWD:SYSTEM:FRAME_COPY_CFG */ +#define QFWD_FRAME_COPY_CFG(r) \ __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 284, r, 12, 4) -#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL GENMASK(12, 6) +#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL\ + GENMASK(regs->fsize[FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] + 6 - 1, 6) #define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(x)\ - FIELD_PREP(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x) + spx5_field_prep(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x) #define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(x)\ - FIELD_GET(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x) + spx5_field_get(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x) -/* QRES:RES_CTRL:RES_CFG */ -#define QRES_RES_CFG(g) __REG(TARGET_QRES,\ - 0, 1, 0, g, 5120, 16, 0, 0, 1, 4) +/* QRES:RES_CTRL:RES_CFG */ +#define QRES_RES_CFG(g) \ + __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4) -#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0) +#define QRES_RES_CFG_WM_HIGH\ + GENMASK(regs->fsize[FW_QRES_RES_CFG_WM_HIGH] + 0 - 1, 0) #define QRES_RES_CFG_WM_HIGH_SET(x)\ - FIELD_PREP(QRES_RES_CFG_WM_HIGH, x) + spx5_field_prep(QRES_RES_CFG_WM_HIGH, x) #define QRES_RES_CFG_WM_HIGH_GET(x)\ - FIELD_GET(QRES_RES_CFG_WM_HIGH, x) + spx5_field_get(QRES_RES_CFG_WM_HIGH, x) -/* QRES:RES_CTRL:RES_STAT */ -#define QRES_RES_STAT(g) __REG(TARGET_QRES,\ - 0, 1, 0, g, 5120, 16, 4, 0, 1, 4) +/* QRES:RES_CTRL:RES_STAT */ +#define QRES_RES_STAT(g) \ + __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4) -#define QRES_RES_STAT_MAXUSE GENMASK(20, 0) +#define QRES_RES_STAT_MAXUSE\ + GENMASK(regs->fsize[FW_QRES_RES_STAT_MAXUSE] + 0 - 1, 0) #define QRES_RES_STAT_MAXUSE_SET(x)\ - FIELD_PREP(QRES_RES_STAT_MAXUSE, x) + spx5_field_prep(QRES_RES_STAT_MAXUSE, x) #define QRES_RES_STAT_MAXUSE_GET(x)\ - FIELD_GET(QRES_RES_STAT_MAXUSE, x) + spx5_field_get(QRES_RES_STAT_MAXUSE, x) -/* QRES:RES_CTRL:RES_STAT_CUR */ -#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES,\ - 0, 1, 0, g, 5120, 16, 8, 0, 1, 4) +/* QRES:RES_CTRL:RES_STAT_CUR */ +#define QRES_RES_STAT_CUR(g) \ + __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4) -#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0) +#define QRES_RES_STAT_CUR_INUSE\ + GENMASK(regs->fsize[FW_QRES_RES_STAT_CUR_INUSE] + 0 - 1, 0) #define QRES_RES_STAT_CUR_INUSE_SET(x)\ - FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x) + spx5_field_prep(QRES_RES_STAT_CUR_INUSE, x) #define QRES_RES_STAT_CUR_INUSE_GET(x)\ - FIELD_GET(QRES_RES_STAT_CUR_INUSE, x) + spx5_field_get(QRES_RES_STAT_CUR_INUSE, x) -/* DEVCPU_QS:XTR:XTR_GRP_CFG */ -#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS,\ - 0, 1, 0, 0, 1, 36, 0, r, 2, 4) +/* DEVCPU_QS:XTR:XTR_GRP_CFG */ +#define QS_XTR_GRP_CFG(r) \ + __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4) #define QS_XTR_GRP_CFG_MODE GENMASK(3, 2) #define QS_XTR_GRP_CFG_MODE_SET(x)\ @@ -6323,13 +6845,13 @@ enum sparx5_target { #define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x) -/* DEVCPU_QS:XTR:XTR_RD */ -#define QS_XTR_RD(r) __REG(TARGET_QS,\ - 0, 1, 0, 0, 1, 36, 8, r, 2, 4) +/* DEVCPU_QS:XTR:XTR_RD */ +#define QS_XTR_RD(r) \ + __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4) -/* DEVCPU_QS:XTR:XTR_FLUSH */ -#define QS_XTR_FLUSH __REG(TARGET_QS,\ - 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) +/* DEVCPU_QS:XTR:XTR_FLUSH */ +#define QS_XTR_FLUSH \ + __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) #define QS_XTR_FLUSH_FLUSH GENMASK(1, 0) #define QS_XTR_FLUSH_FLUSH_SET(x)\ @@ -6337,9 +6859,9 @@ enum sparx5_target { #define QS_XTR_FLUSH_FLUSH_GET(x)\ FIELD_GET(QS_XTR_FLUSH_FLUSH, x) -/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */ -#define QS_XTR_DATA_PRESENT __REG(TARGET_QS,\ - 0, 1, 0, 0, 1, 36, 28, 0, 1, 4) +/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */ +#define QS_XTR_DATA_PRESENT \ + __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4) #define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0) #define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\ @@ -6347,9 +6869,9 @@ enum sparx5_target { #define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\ FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x) -/* DEVCPU_QS:INJ:INJ_GRP_CFG */ -#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS,\ - 0, 1, 36, 0, 1, 40, 0, r, 2, 4) +/* DEVCPU_QS:INJ:INJ_GRP_CFG */ +#define QS_INJ_GRP_CFG(r) \ + __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4) #define QS_INJ_GRP_CFG_MODE GENMASK(3, 2) #define QS_INJ_GRP_CFG_MODE_SET(x)\ @@ -6363,13 +6885,13 @@ enum sparx5_target { #define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x) -/* DEVCPU_QS:INJ:INJ_WR */ -#define QS_INJ_WR(r) __REG(TARGET_QS,\ - 0, 1, 36, 0, 1, 40, 8, r, 2, 4) +/* DEVCPU_QS:INJ:INJ_WR */ +#define QS_INJ_WR(r) \ + __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4) -/* DEVCPU_QS:INJ:INJ_CTRL */ -#define QS_INJ_CTRL(r) __REG(TARGET_QS,\ - 0, 1, 36, 0, 1, 40, 16, r, 2, 4) +/* DEVCPU_QS:INJ:INJ_CTRL */ +#define QS_INJ_CTRL(r) \ + __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4) #define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21) #define QS_INJ_CTRL_GAP_SIZE_SET(x)\ @@ -6401,9 +6923,9 @@ enum sparx5_target { #define QS_INJ_CTRL_VLD_BYTES_GET(x)\ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x) -/* DEVCPU_QS:INJ:INJ_STATUS */ -#define QS_INJ_STATUS __REG(TARGET_QS,\ - 0, 1, 36, 0, 1, 40, 24, 0, 1, 4) +/* DEVCPU_QS:INJ:INJ_STATUS */ +#define QS_INJ_STATUS \ + __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4) #define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4) #define QS_INJ_STATUS_WMARK_REACHED_SET(x)\ @@ -6423,21 +6945,24 @@ enum sparx5_target { #define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\ FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x) -/* QSYS:PAUSE_CFG:PAUSE_CFG */ -#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS,\ - 0, 1, 544, 0, 1, 1128, 0, r, 70, 4) +/* QSYS:PAUSE_CFG:PAUSE_CFG */ +#define QSYS_PAUSE_CFG(r) \ + __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], 0, \ + r, regs->rcnt[RC_QSYS_PAUSE_CFG], 4) -#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14) +#define QSYS_PAUSE_CFG_PAUSE_START\ + GENMASK(regs->fsize[FW_QSYS_PAUSE_CFG_PAUSE_START] + 14 - 1, 14) #define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\ - FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x) + spx5_field_prep(QSYS_PAUSE_CFG_PAUSE_START, x) #define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\ - FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x) + spx5_field_get(QSYS_PAUSE_CFG_PAUSE_START, x) -#define QSYS_PAUSE_CFG_PAUSE_STOP GENMASK(13, 2) +#define QSYS_PAUSE_CFG_PAUSE_STOP\ + GENMASK(regs->fsize[FW_QSYS_PAUSE_CFG_PAUSE_STOP] + 2 - 1, 2) #define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\ - FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x) + spx5_field_prep(QSYS_PAUSE_CFG_PAUSE_STOP, x) #define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\ - FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x) + spx5_field_get(QSYS_PAUSE_CFG_PAUSE_STOP, x) #define QSYS_PAUSE_CFG_PAUSE_ENA BIT(1) #define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\ @@ -6451,19 +6976,22 @@ enum sparx5_target { #define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\ FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x) -/* QSYS:PAUSE_CFG:ATOP */ -#define QSYS_ATOP(r) __REG(TARGET_QSYS,\ - 0, 1, 544, 0, 1, 1128, 284, r, 70, 4) +/* QSYS:PAUSE_CFG:ATOP */ +#define QSYS_ATOP(r) \ + __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \ + 284, r, regs->rcnt[RC_QSYS_ATOP], 4) -#define QSYS_ATOP_ATOP GENMASK(11, 0) +#define QSYS_ATOP_ATOP\ + GENMASK(regs->fsize[FW_QSYS_ATOP_ATOP] + 0 - 1, 0) #define QSYS_ATOP_ATOP_SET(x)\ - FIELD_PREP(QSYS_ATOP_ATOP, x) + spx5_field_prep(QSYS_ATOP_ATOP, x) #define QSYS_ATOP_ATOP_GET(x)\ - FIELD_GET(QSYS_ATOP_ATOP, x) + spx5_field_get(QSYS_ATOP_ATOP, x) -/* QSYS:PAUSE_CFG:FWD_PRESSURE */ -#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS,\ - 0, 1, 544, 0, 1, 1128, 564, r, 70, 4) +/* QSYS:PAUSE_CFG:FWD_PRESSURE */ +#define QSYS_FWD_PRESSURE(r) \ + __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \ + 564, r, regs->rcnt[RC_QSYS_FWD_PRESSURE], 4) #define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1) #define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\ @@ -6477,19 +7005,22 @@ enum sparx5_target { #define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\ FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x) -/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */ -#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS,\ - 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4) +/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */ +#define QSYS_ATOP_TOT_CFG \ + __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \ + 844, 0, 1, 4) -#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0) +#define QSYS_ATOP_TOT_CFG_ATOP_TOT\ + GENMASK(regs->fsize[FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] + 0 - 1, 0) #define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\ - FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) + spx5_field_prep(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) #define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\ - FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) + spx5_field_get(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) -/* QSYS:CALCFG:CAL_AUTO */ -#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS,\ - 0, 1, 2304, 0, 1, 40, 0, r, 7, 4) +/* QSYS:CALCFG:CAL_AUTO */ +#define QSYS_CAL_AUTO(r) \ + __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_CALCFG], 0, 1, 40, 0, r, \ + regs->rcnt[RC_QSYS_CAL_AUTO], 4) #define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0) #define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\ @@ -6497,9 +7028,10 @@ enum sparx5_target { #define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\ FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x) -/* QSYS:CALCFG:CAL_CTRL */ -#define QSYS_CAL_CTRL __REG(TARGET_QSYS,\ - 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4) +/* QSYS:CALCFG:CAL_CTRL */ +#define QSYS_CAL_CTRL \ + __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_CALCFG], 0, 1, 40, 36, 0, \ + 1, 4) #define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11) #define QSYS_CAL_CTRL_CAL_MODE_SET(x)\ @@ -6519,9 +7051,10 @@ enum sparx5_target { #define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\ FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x) -/* QSYS:RAM_CTRL:RAM_INIT */ -#define QSYS_RAM_INIT __REG(TARGET_QSYS,\ - 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4) +/* QSYS:RAM_CTRL:RAM_INIT */ +#define QSYS_RAM_INIT \ + __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_RAM_CTRL], 0, 1, 4, 0, 0, \ + 1, 4) #define QSYS_RAM_INIT_RAM_INIT BIT(1) #define QSYS_RAM_INIT_RAM_INIT_SET(x)\ @@ -6535,9 +7068,10 @@ enum sparx5_target { #define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x) -/* REW:COMMON:OWN_UPSID */ -#define REW_OWN_UPSID(r) __REG(TARGET_REW,\ - 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4) +/* REW:COMMON:OWN_UPSID */ +#define REW_OWN_UPSID(r) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 0, r, \ + regs->rcnt[RC_REW_OWN_UPSID], 4) #define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0) #define REW_OWN_UPSID_OWN_UPSID_SET(x)\ @@ -6545,15 +7079,17 @@ enum sparx5_target { #define REW_OWN_UPSID_OWN_UPSID_GET(x)\ FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x) -/* REW:COMMON:RTAG_ETAG_CTRL */ -#define REW_RTAG_ETAG_CTRL(r) __REG(TARGET_REW,\ - 0, 1, 387264, 0, 1, 1232, 560, r, 70, 4) +/* REW:COMMON:RTAG_ETAG_CTRL */ +#define REW_RTAG_ETAG_CTRL(r) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 560, r,\ + regs->rcnt[RC_REW_RTAG_ETAG_CTRL], 4) -#define REW_RTAG_ETAG_CTRL_IPE_TBL GENMASK(9, 3) +#define REW_RTAG_ETAG_CTRL_IPE_TBL\ + GENMASK(regs->fsize[FW_REW_RTAG_ETAG_CTRL_IPE_TBL] + 3 - 1, 3) #define REW_RTAG_ETAG_CTRL_IPE_TBL_SET(x)\ - FIELD_PREP(REW_RTAG_ETAG_CTRL_IPE_TBL, x) + spx5_field_prep(REW_RTAG_ETAG_CTRL_IPE_TBL, x) #define REW_RTAG_ETAG_CTRL_IPE_TBL_GET(x)\ - FIELD_GET(REW_RTAG_ETAG_CTRL_IPE_TBL, x) + spx5_field_get(REW_RTAG_ETAG_CTRL_IPE_TBL, x) #define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA GENMASK(2, 1) #define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(x)\ @@ -6567,9 +7103,10 @@ enum sparx5_target { #define REW_RTAG_ETAG_CTRL_KEEP_ETAG_GET(x)\ FIELD_GET(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x) -/* REW:COMMON:ES0_CTRL */ -#define REW_ES0_CTRL __REG(TARGET_REW,\ - 0, 1, 387264, 0, 1, 1232, 852, 0, 1, 4) +/* REW:COMMON:ES0_CTRL */ +#define REW_ES0_CTRL \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 852, 0,\ + 1, 4) #define REW_ES0_CTRL_ES0_BY_RT_FWD BIT(5) #define REW_ES0_CTRL_ES0_BY_RT_FWD_SET(x)\ @@ -6607,9 +7144,10 @@ enum sparx5_target { #define REW_ES0_CTRL_ES0_LU_ENA_GET(x)\ FIELD_GET(REW_ES0_CTRL_ES0_LU_ENA, x) -/* REW:PORT:PORT_VLAN_CFG */ -#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW,\ - 0, 1, 360448, g, 70, 256, 0, 0, 1, 4) +/* REW:PORT:PORT_VLAN_CFG */ +#define REW_PORT_VLAN_CFG(g) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \ + regs->gcnt[GC_REW_PORT], 256, 0, 0, 1, 4) #define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13) #define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\ @@ -6629,9 +7167,10 @@ enum sparx5_target { #define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) -/* REW:PORT:PCP_MAP_DE0 */ -#define REW_PCP_MAP_DE0(g, r) __REG(TARGET_REW,\ - 0, 1, 360448, g, 70, 256, 4, r, 8, 4) +/* REW:PORT:PCP_MAP_DE0 */ +#define REW_PCP_MAP_DE0(g, r) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \ + regs->gcnt[GC_REW_PORT], 256, 4, r, 8, 4) #define REW_PCP_MAP_DE0_PCP_DE0 GENMASK(2, 0) #define REW_PCP_MAP_DE0_PCP_DE0_SET(x)\ @@ -6639,9 +7178,10 @@ enum sparx5_target { #define REW_PCP_MAP_DE0_PCP_DE0_GET(x)\ FIELD_GET(REW_PCP_MAP_DE0_PCP_DE0, x) -/* REW:PORT:PCP_MAP_DE1 */ -#define REW_PCP_MAP_DE1(g, r) __REG(TARGET_REW,\ - 0, 1, 360448, g, 70, 256, 36, r, 8, 4) +/* REW:PORT:PCP_MAP_DE1 */ +#define REW_PCP_MAP_DE1(g, r) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \ + regs->gcnt[GC_REW_PORT], 256, 36, r, 8, 4) #define REW_PCP_MAP_DE1_PCP_DE1 GENMASK(2, 0) #define REW_PCP_MAP_DE1_PCP_DE1_SET(x)\ @@ -6649,9 +7189,10 @@ enum sparx5_target { #define REW_PCP_MAP_DE1_PCP_DE1_GET(x)\ FIELD_GET(REW_PCP_MAP_DE1_PCP_DE1, x) -/* REW:PORT:DEI_MAP_DE0 */ -#define REW_DEI_MAP_DE0(g, r) __REG(TARGET_REW,\ - 0, 1, 360448, g, 70, 256, 68, r, 8, 4) +/* REW:PORT:DEI_MAP_DE0 */ +#define REW_DEI_MAP_DE0(g, r) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \ + regs->gcnt[GC_REW_PORT], 256, 68, r, 8, 4) #define REW_DEI_MAP_DE0_DEI_DE0 BIT(0) #define REW_DEI_MAP_DE0_DEI_DE0_SET(x)\ @@ -6659,9 +7200,10 @@ enum sparx5_target { #define REW_DEI_MAP_DE0_DEI_DE0_GET(x)\ FIELD_GET(REW_DEI_MAP_DE0_DEI_DE0, x) -/* REW:PORT:DEI_MAP_DE1 */ -#define REW_DEI_MAP_DE1(g, r) __REG(TARGET_REW,\ - 0, 1, 360448, g, 70, 256, 100, r, 8, 4) +/* REW:PORT:DEI_MAP_DE1 */ +#define REW_DEI_MAP_DE1(g, r) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \ + regs->gcnt[GC_REW_PORT], 256, 100, r, 8, 4) #define REW_DEI_MAP_DE1_DEI_DE1 BIT(0) #define REW_DEI_MAP_DE1_DEI_DE1_SET(x)\ @@ -6669,9 +7211,10 @@ enum sparx5_target { #define REW_DEI_MAP_DE1_DEI_DE1_GET(x)\ FIELD_GET(REW_DEI_MAP_DE1_DEI_DE1, x) -/* REW:PORT:TAG_CTRL */ -#define REW_TAG_CTRL(g) __REG(TARGET_REW,\ - 0, 1, 360448, g, 70, 256, 132, 0, 1, 4) +/* REW:PORT:TAG_CTRL */ +#define REW_TAG_CTRL(g) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \ + regs->gcnt[GC_REW_PORT], 256, 132, 0, 1, 4) #define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13) #define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\ @@ -6709,9 +7252,10 @@ enum sparx5_target { #define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\ FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x) -/* REW:PORT:DSCP_MAP */ -#define REW_DSCP_MAP(g) __REG(TARGET_REW,\ - 0, 1, 360448, g, 70, 256, 136, 0, 1, 4) +/* REW:PORT:DSCP_MAP */ +#define REW_DSCP_MAP(g) \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \ + regs->gcnt[GC_REW_PORT], 256, 136, 0, 1, 4) #define REW_DSCP_MAP_DSCP_UPDATE_ENA BIT(1) #define REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(x)\ @@ -6725,9 +7269,10 @@ enum sparx5_target { #define REW_DSCP_MAP_DSCP_REMAP_ENA_GET(x)\ FIELD_GET(REW_DSCP_MAP_DSCP_REMAP_ENA, x) -/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */ -#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW,\ - 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */ +#define REW_PTP_TWOSTEP_CTRL \ + __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4) #define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12) #define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\ @@ -6765,9 +7310,10 @@ enum sparx5_target { #define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x) -/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */ -#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW,\ - 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */ +#define REW_PTP_TWOSTEP_STAMP \ + __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4) #define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0) #define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\ @@ -6775,9 +7321,10 @@ enum sparx5_target { #define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\ FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x) -/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */ -#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW,\ - 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */ +#define REW_PTP_TWOSTEP_STAMP_SUBNS \ + __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4) #define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0) #define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\ @@ -6785,17 +7332,20 @@ enum sparx5_target { #define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\ FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x) -/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */ -#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW,\ - 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4) +/* SPARX5 ONLY */ +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */ +#define REW_PTP_RSRV_NOT_ZERO \ + __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4) -/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */ -#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW,\ - 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4) +/* SPARX5 ONLY */ +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */ +#define REW_PTP_RSRV_NOT_ZERO1 \ + __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4) -/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */ -#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW,\ - 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */ +#define REW_PTP_RSRV_NOT_ZERO2 \ + __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4) #define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0) #define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\ @@ -6803,9 +7353,10 @@ enum sparx5_target { #define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\ FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x) -/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */ -#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW,\ - 0, 1, 378368, 0, 1, 40, 24, r, 4, 4) +/* SPARX5 ONLY */ +/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */ +#define REW_PTP_GEN_STAMP_FMT(r) \ + __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4) #define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2) #define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\ @@ -6819,9 +7370,10 @@ enum sparx5_target { #define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x) -/* REW:RAM_CTRL:RAM_INIT */ -#define REW_RAM_INIT __REG(TARGET_REW,\ - 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4) +/* REW:RAM_CTRL:RAM_INIT */ +#define REW_RAM_INIT \ + __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_RAM_CTRL], 0, 1, 4, 0, 0, 1,\ + 4) #define REW_RAM_INIT_RAM_INIT BIT(1) #define REW_RAM_INIT_RAM_INIT_SET(x)\ @@ -6835,9 +7387,9 @@ enum sparx5_target { #define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x) -/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */ -#define VCAP_ES0_CTRL __REG(TARGET_VCAP_ES0,\ - 0, 1, 0, 0, 1, 8, 0, 0, 1, 4) +/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */ +#define VCAP_ES0_CTRL \ + __REG(TARGET_VCAP_ES0, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4) #define VCAP_ES0_CTRL_UPDATE_CMD GENMASK(24, 22) #define VCAP_ES0_CTRL_UPDATE_CMD_SET(x)\ @@ -6887,9 +7439,9 @@ enum sparx5_target { #define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_GET(x)\ FIELD_GET(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x) -/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */ -#define VCAP_ES0_CFG __REG(TARGET_VCAP_ES0,\ - 0, 1, 0, 0, 1, 8, 4, 0, 1, 4) +/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */ +#define VCAP_ES0_CFG \ + __REG(TARGET_VCAP_ES0, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4) #define VCAP_ES0_CFG_MV_NUM_POS GENMASK(31, 16) #define VCAP_ES0_CFG_MV_NUM_POS_SET(x)\ @@ -6903,33 +7455,33 @@ enum sparx5_target { #define VCAP_ES0_CFG_MV_SIZE_GET(x)\ FIELD_GET(VCAP_ES0_CFG_MV_SIZE, x) -/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */ -#define VCAP_ES0_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES0,\ - 0, 1, 8, 0, 1, 904, 0, r, 64, 4) +/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */ +#define VCAP_ES0_VCAP_ENTRY_DAT(r) \ + __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 0, r, 64, 4) -/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */ -#define VCAP_ES0_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES0,\ - 0, 1, 8, 0, 1, 904, 256, r, 64, 4) +/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */ +#define VCAP_ES0_VCAP_MASK_DAT(r) \ + __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 256, r, 64, 4) -/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */ -#define VCAP_ES0_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES0,\ - 0, 1, 8, 0, 1, 904, 512, r, 64, 4) +/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */ +#define VCAP_ES0_VCAP_ACTION_DAT(r) \ + __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 512, r, 64, 4) -/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */ -#define VCAP_ES0_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES0,\ - 0, 1, 8, 0, 1, 904, 768, r, 32, 4) +/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */ +#define VCAP_ES0_VCAP_CNT_DAT(r) \ + __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 768, r, 32, 4) -/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */ -#define VCAP_ES0_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES0,\ - 0, 1, 8, 0, 1, 904, 896, 0, 1, 4) +/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */ +#define VCAP_ES0_VCAP_CNT_FW_DAT \ + __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4) -/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */ -#define VCAP_ES0_VCAP_TG_DAT __REG(TARGET_VCAP_ES0,\ - 0, 1, 8, 0, 1, 904, 900, 0, 1, 4) +/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */ +#define VCAP_ES0_VCAP_TG_DAT \ + __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4) -/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */ -#define VCAP_ES0_IDX __REG(TARGET_VCAP_ES0,\ - 0, 1, 912, 0, 1, 8, 0, 0, 1, 4) +/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */ +#define VCAP_ES0_IDX \ + __REG(TARGET_VCAP_ES0, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4) #define VCAP_ES0_IDX_CORE_IDX GENMASK(3, 0) #define VCAP_ES0_IDX_CORE_IDX_SET(x)\ @@ -6937,9 +7489,9 @@ enum sparx5_target { #define VCAP_ES0_IDX_CORE_IDX_GET(x)\ FIELD_GET(VCAP_ES0_IDX_CORE_IDX, x) -/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */ -#define VCAP_ES0_MAP __REG(TARGET_VCAP_ES0,\ - 0, 1, 912, 0, 1, 8, 4, 0, 1, 4) +/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */ +#define VCAP_ES0_MAP \ + __REG(TARGET_VCAP_ES0, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4) #define VCAP_ES0_MAP_CORE_MAP GENMASK(2, 0) #define VCAP_ES0_MAP_CORE_MAP_SET(x)\ @@ -6947,9 +7499,9 @@ enum sparx5_target { #define VCAP_ES0_MAP_CORE_MAP_GET(x)\ FIELD_GET(VCAP_ES0_MAP_CORE_MAP, x) -/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */ -#define VCAP_ES0_VCAP_STICKY __REG(TARGET_VCAP_ES0,\ - 0, 1, 920, 0, 1, 4, 0, 0, 1, 4) +/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */ +#define VCAP_ES0_VCAP_STICKY \ + __REG(TARGET_VCAP_ES0, 0, 1, 920, 0, 1, 4, 0, 0, 1, 4) #define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0) #define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\ @@ -6957,49 +7509,49 @@ enum sparx5_target { #define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\ FIELD_GET(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x) -/* VCAP_ES0:VCAP_CONST:VCAP_VER */ -#define VCAP_ES0_VCAP_VER __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 0, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:VCAP_VER */ +#define VCAP_ES0_VCAP_VER \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */ -#define VCAP_ES0_ENTRY_WIDTH __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 4, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */ +#define VCAP_ES0_ENTRY_WIDTH \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */ -#define VCAP_ES0_ENTRY_CNT __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 8, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */ +#define VCAP_ES0_ENTRY_CNT \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */ -#define VCAP_ES0_ENTRY_SWCNT __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 12, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */ +#define VCAP_ES0_ENTRY_SWCNT \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */ -#define VCAP_ES0_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 16, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */ +#define VCAP_ES0_ENTRY_TG_WIDTH \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */ -#define VCAP_ES0_ACTION_DEF_CNT __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 20, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */ +#define VCAP_ES0_ACTION_DEF_CNT \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */ -#define VCAP_ES0_ACTION_WIDTH __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 24, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */ +#define VCAP_ES0_ACTION_WIDTH \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */ -#define VCAP_ES0_CNT_WIDTH __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 28, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */ +#define VCAP_ES0_CNT_WIDTH \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:CORE_CNT */ -#define VCAP_ES0_CORE_CNT __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 32, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:CORE_CNT */ +#define VCAP_ES0_CORE_CNT \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4) -/* VCAP_ES0:VCAP_CONST:IF_CNT */ -#define VCAP_ES0_IF_CNT __REG(TARGET_VCAP_ES0,\ - 0, 1, 924, 0, 1, 40, 36, 0, 1, 4) +/* VCAP_ES0:VCAP_CONST:IF_CNT */ +#define VCAP_ES0_IF_CNT \ + __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4) -/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */ -#define VCAP_ES2_CTRL __REG(TARGET_VCAP_ES2,\ - 0, 1, 0, 0, 1, 8, 0, 0, 1, 4) +/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */ +#define VCAP_ES2_CTRL \ + __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4) #define VCAP_ES2_CTRL_UPDATE_CMD GENMASK(24, 22) #define VCAP_ES2_CTRL_UPDATE_CMD_SET(x)\ @@ -7049,9 +7601,9 @@ enum sparx5_target { #define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_GET(x)\ FIELD_GET(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x) -/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */ -#define VCAP_ES2_CFG __REG(TARGET_VCAP_ES2,\ - 0, 1, 0, 0, 1, 8, 4, 0, 1, 4) +/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */ +#define VCAP_ES2_CFG \ + __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4) #define VCAP_ES2_CFG_MV_NUM_POS GENMASK(31, 16) #define VCAP_ES2_CFG_MV_NUM_POS_SET(x)\ @@ -7065,33 +7617,33 @@ enum sparx5_target { #define VCAP_ES2_CFG_MV_SIZE_GET(x)\ FIELD_GET(VCAP_ES2_CFG_MV_SIZE, x) -/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */ -#define VCAP_ES2_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES2,\ - 0, 1, 8, 0, 1, 904, 0, r, 64, 4) +/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */ +#define VCAP_ES2_VCAP_ENTRY_DAT(r) \ + __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 0, r, 64, 4) -/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */ -#define VCAP_ES2_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES2,\ - 0, 1, 8, 0, 1, 904, 256, r, 64, 4) +/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */ +#define VCAP_ES2_VCAP_MASK_DAT(r) \ + __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 256, r, 64, 4) -/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */ -#define VCAP_ES2_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES2,\ - 0, 1, 8, 0, 1, 904, 512, r, 64, 4) +/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */ +#define VCAP_ES2_VCAP_ACTION_DAT(r) \ + __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 512, r, 64, 4) -/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */ -#define VCAP_ES2_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES2,\ - 0, 1, 8, 0, 1, 904, 768, r, 32, 4) +/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */ +#define VCAP_ES2_VCAP_CNT_DAT(r) \ + __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 768, r, 32, 4) -/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */ -#define VCAP_ES2_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES2,\ - 0, 1, 8, 0, 1, 904, 896, 0, 1, 4) +/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */ +#define VCAP_ES2_VCAP_CNT_FW_DAT \ + __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4) -/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */ -#define VCAP_ES2_VCAP_TG_DAT __REG(TARGET_VCAP_ES2,\ - 0, 1, 8, 0, 1, 904, 900, 0, 1, 4) +/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */ +#define VCAP_ES2_VCAP_TG_DAT \ + __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4) -/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */ -#define VCAP_ES2_IDX __REG(TARGET_VCAP_ES2,\ - 0, 1, 912, 0, 1, 8, 0, 0, 1, 4) +/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */ +#define VCAP_ES2_IDX \ + __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4) #define VCAP_ES2_IDX_CORE_IDX GENMASK(3, 0) #define VCAP_ES2_IDX_CORE_IDX_SET(x)\ @@ -7099,9 +7651,9 @@ enum sparx5_target { #define VCAP_ES2_IDX_CORE_IDX_GET(x)\ FIELD_GET(VCAP_ES2_IDX_CORE_IDX, x) -/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */ -#define VCAP_ES2_MAP __REG(TARGET_VCAP_ES2,\ - 0, 1, 912, 0, 1, 8, 4, 0, 1, 4) +/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */ +#define VCAP_ES2_MAP \ + __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4) #define VCAP_ES2_MAP_CORE_MAP GENMASK(2, 0) #define VCAP_ES2_MAP_CORE_MAP_SET(x)\ @@ -7109,9 +7661,9 @@ enum sparx5_target { #define VCAP_ES2_MAP_CORE_MAP_GET(x)\ FIELD_GET(VCAP_ES2_MAP_CORE_MAP, x) -/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */ -#define VCAP_ES2_VCAP_STICKY __REG(TARGET_VCAP_ES2,\ - 0, 1, 920, 0, 1, 4, 0, 0, 1, 4) +/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */ +#define VCAP_ES2_VCAP_STICKY \ + __REG(TARGET_VCAP_ES2, 0, 1, 920, 0, 1, 4, 0, 0, 1, 4) #define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0) #define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\ @@ -7119,49 +7671,49 @@ enum sparx5_target { #define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\ FIELD_GET(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x) -/* VCAP_ES2:VCAP_CONST:VCAP_VER */ -#define VCAP_ES2_VCAP_VER __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 0, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:VCAP_VER */ +#define VCAP_ES2_VCAP_VER \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */ -#define VCAP_ES2_ENTRY_WIDTH __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 4, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */ +#define VCAP_ES2_ENTRY_WIDTH \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */ -#define VCAP_ES2_ENTRY_CNT __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 8, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */ +#define VCAP_ES2_ENTRY_CNT \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */ -#define VCAP_ES2_ENTRY_SWCNT __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 12, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */ +#define VCAP_ES2_ENTRY_SWCNT \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */ -#define VCAP_ES2_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 16, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */ +#define VCAP_ES2_ENTRY_TG_WIDTH \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */ -#define VCAP_ES2_ACTION_DEF_CNT __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 20, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */ +#define VCAP_ES2_ACTION_DEF_CNT \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */ -#define VCAP_ES2_ACTION_WIDTH __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 24, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */ +#define VCAP_ES2_ACTION_WIDTH \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */ -#define VCAP_ES2_CNT_WIDTH __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 28, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */ +#define VCAP_ES2_CNT_WIDTH \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:CORE_CNT */ -#define VCAP_ES2_CORE_CNT __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 32, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:CORE_CNT */ +#define VCAP_ES2_CORE_CNT \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4) -/* VCAP_ES2:VCAP_CONST:IF_CNT */ -#define VCAP_ES2_IF_CNT __REG(TARGET_VCAP_ES2,\ - 0, 1, 924, 0, 1, 40, 36, 0, 1, 4) +/* VCAP_ES2:VCAP_CONST:IF_CNT */ +#define VCAP_ES2_IF_CNT \ + __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4) -/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */ -#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER,\ - 0, 1, 0, 0, 1, 8, 0, 0, 1, 4) +/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */ +#define VCAP_SUPER_CTRL \ + __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4) #define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22) #define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\ @@ -7211,9 +7763,9 @@ enum sparx5_target { #define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_GET(x)\ FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x) -/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */ -#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER,\ - 0, 1, 0, 0, 1, 8, 4, 0, 1, 4) +/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */ +#define VCAP_SUPER_CFG \ + __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4) #define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16) #define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\ @@ -7227,33 +7779,33 @@ enum sparx5_target { #define VCAP_SUPER_CFG_MV_SIZE_GET(x)\ FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x) -/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */ -#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER,\ - 0, 1, 8, 0, 1, 904, 0, r, 64, 4) +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */ +#define VCAP_SUPER_VCAP_ENTRY_DAT(r) \ + __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 0, r, 64, 4) -/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */ -#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER,\ - 0, 1, 8, 0, 1, 904, 256, r, 64, 4) +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */ +#define VCAP_SUPER_VCAP_MASK_DAT(r) \ + __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 256, r, 64, 4) -/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */ -#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER,\ - 0, 1, 8, 0, 1, 904, 512, r, 64, 4) +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */ +#define VCAP_SUPER_VCAP_ACTION_DAT(r) \ + __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 512, r, 64, 4) -/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */ -#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER,\ - 0, 1, 8, 0, 1, 904, 768, r, 32, 4) +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */ +#define VCAP_SUPER_VCAP_CNT_DAT(r) \ + __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 768, r, 32, 4) -/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */ -#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 8, 0, 1, 904, 896, 0, 1, 4) +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */ +#define VCAP_SUPER_VCAP_CNT_FW_DAT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4) -/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */ -#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 8, 0, 1, 904, 900, 0, 1, 4) +/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */ +#define VCAP_SUPER_VCAP_TG_DAT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4) -/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */ -#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER,\ - 0, 1, 912, 0, 1, 8, 0, 0, 1, 4) +/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */ +#define VCAP_SUPER_IDX \ + __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4) #define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0) #define VCAP_SUPER_IDX_CORE_IDX_SET(x)\ @@ -7261,9 +7813,9 @@ enum sparx5_target { #define VCAP_SUPER_IDX_CORE_IDX_GET(x)\ FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x) -/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */ -#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER,\ - 0, 1, 912, 0, 1, 8, 4, 0, 1, 4) +/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */ +#define VCAP_SUPER_MAP \ + __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4) #define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0) #define VCAP_SUPER_MAP_CORE_MAP_SET(x)\ @@ -7271,49 +7823,49 @@ enum sparx5_target { #define VCAP_SUPER_MAP_CORE_MAP_GET(x)\ FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x) -/* VCAP_SUPER:VCAP_CONST:VCAP_VER */ -#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 0, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:VCAP_VER */ +#define VCAP_SUPER_VCAP_VER \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */ -#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 4, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */ +#define VCAP_SUPER_ENTRY_WIDTH \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */ -#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 8, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */ +#define VCAP_SUPER_ENTRY_CNT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */ -#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 12, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */ +#define VCAP_SUPER_ENTRY_SWCNT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */ -#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 16, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */ +#define VCAP_SUPER_ENTRY_TG_WIDTH \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */ -#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 20, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */ +#define VCAP_SUPER_ACTION_DEF_CNT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */ -#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 24, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */ +#define VCAP_SUPER_ACTION_WIDTH \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */ -#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 28, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */ +#define VCAP_SUPER_CNT_WIDTH \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:CORE_CNT */ -#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 32, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:CORE_CNT */ +#define VCAP_SUPER_CORE_CNT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4) -/* VCAP_SUPER:VCAP_CONST:IF_CNT */ -#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 924, 0, 1, 40, 36, 0, 1, 4) +/* VCAP_SUPER:VCAP_CONST:IF_CNT */ +#define VCAP_SUPER_IF_CNT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4) -/* VCAP_SUPER:RAM_CTRL:RAM_INIT */ -#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER,\ - 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4) +/* VCAP_SUPER:RAM_CTRL:RAM_INIT */ +#define VCAP_SUPER_RAM_INIT \ + __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4) #define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1) #define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\ @@ -7327,9 +7879,10 @@ enum sparx5_target { #define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x) -/* VOP:RAM_CTRL:RAM_INIT */ -#define VOP_RAM_INIT __REG(TARGET_VOP,\ - 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4) +/* VOP:RAM_CTRL:RAM_INIT */ +#define VOP_RAM_INIT \ + __REG(TARGET_VOP, 0, 1, regs->gaddr[GA_VOP_RAM_CTRL], 0, 1, 4, 0, 0, 1,\ + 4) #define VOP_RAM_INIT_RAM_INIT BIT(1) #define VOP_RAM_INIT_RAM_INIT_SET(x)\ @@ -7343,9 +7896,10 @@ enum sparx5_target { #define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\ FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x) -/* XQS:SYSTEM:STAT_CFG */ -#define XQS_STAT_CFG __REG(TARGET_XQS,\ - 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4) +/* XQS:SYSTEM:STAT_CFG */ +#define XQS_STAT_CFG \ + __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_SYSTEM], 0, 1, 872, 860, 0, \ + 1, 4) #define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18) #define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\ @@ -7353,11 +7907,12 @@ enum sparx5_target { #define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\ FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x) -#define XQS_STAT_CFG_STAT_VIEW GENMASK(17, 5) +#define XQS_STAT_CFG_STAT_VIEW\ + GENMASK(regs->fsize[FW_XQS_STAT_CFG_STAT_VIEW] + 5 - 1, 5) #define XQS_STAT_CFG_STAT_VIEW_SET(x)\ - FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x) + spx5_field_prep(XQS_STAT_CFG_STAT_VIEW, x) #define XQS_STAT_CFG_STAT_VIEW_GET(x)\ - FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x) + spx5_field_get(XQS_STAT_CFG_STAT_VIEW, x) #define XQS_STAT_CFG_STAT_SRV_PKT_ONLY BIT(4) #define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\ @@ -7371,48 +7926,56 @@ enum sparx5_target { #define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\ FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x) -/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */ -#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS,\ - 0, 1, 7936, g, 4, 48, 0, 0, 1, 4) +/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */ +#define XQS_QLIMIT_SHR_TOP_CFG(g) \ + __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 0, 0,\ + 1, 4) -#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP\ + GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] + 0 - 1, 0) #define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\ - FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) + spx5_field_prep(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) #define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\ - FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) + spx5_field_get(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) -/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */ -#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS,\ - 0, 1, 7936, g, 4, 48, 4, 0, 1, 4) +/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */ +#define XQS_QLIMIT_SHR_ATOP_CFG(g) \ + __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 4, 0,\ + 1, 4) -#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP\ + GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] + 0 - 1, 0) #define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\ - FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) + spx5_field_prep(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) #define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\ - FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) + spx5_field_get(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) -/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */ -#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS,\ - 0, 1, 7936, g, 4, 48, 8, 0, 1, 4) +/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */ +#define XQS_QLIMIT_SHR_CTOP_CFG(g) \ + __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 8, 0,\ + 1, 4) -#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP\ + GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] + 0 - 1, 0) #define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\ - FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) + spx5_field_prep(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) #define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\ - FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) + spx5_field_get(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) -/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */ -#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS,\ - 0, 1, 7936, g, 4, 48, 12, 0, 1, 4) +/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */ +#define XQS_QLIMIT_SHR_QLIM_CFG(g) \ + __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 12, \ + 0, 1, 4) -#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0) +#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM\ + GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] + 0 - 1, 0) #define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\ - FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) + spx5_field_prep(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) #define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\ - FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) + spx5_field_get(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) -/* XQS:STAT:CNT */ -#define XQS_CNT(g) __REG(TARGET_XQS,\ - 0, 1, 0, g, 1024, 4, 0, 0, 1, 4) +/* XQS:STAT:CNT */ +#define XQS_CNT(g) \ + __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4) #endif /* _SPARX5_MAIN_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index 705a004b324f..d4e9986ef16a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -55,7 +55,7 @@ static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40); } -void sparx5_set_port_ifh(void *ifh_hdr, u16 portno) +void sparx5_set_port_ifh(struct sparx5 *sparx5, void *ifh_hdr, u16 portno) { /* VSTAX.RSV = 1. MSBit must be 1 */ ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1); @@ -68,7 +68,8 @@ void sparx5_set_port_ifh(void *ifh_hdr, u16 portno) /* MISC.PIPELINE_ACT */ ifh_encode_bitfield(ifh_hdr, 1, 42, 3); /* FWD.SRC_PORT = CPU */ - ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7); + ifh_encode_bitfield(ifh_hdr, sparx5_get_pgid(sparx5, SPX5_PORT_CPU_0), + 46, 7); /* FWD.SFLOW_ID (disable SFlow sampling) */ ifh_encode_bitfield(ifh_hdr, 124, 57, 7); /* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */ @@ -190,7 +191,8 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p) sparx5_mact_forget(sparx5, dev->dev_addr, port->pvid); /* Add new */ - sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid); + sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU), + addr->sa_data, port->pvid); /* Record the address */ eth_hw_addr_set(dev, addr->sa_data); @@ -290,7 +292,7 @@ int sparx5_register_netdevs(struct sparx5 *sparx5) int portno; int err; - for (portno = 0; portno < SPX5_PORTS; portno++) + for (portno = 0; portno < sparx5->data->consts->n_ports; portno++) if (sparx5->ports[portno]) { err = register_netdev(sparx5->ports[portno]->ndev); if (err) { @@ -309,7 +311,7 @@ void sparx5_destroy_netdevs(struct sparx5 *sparx5) struct sparx5_port *port; int portno; - for (portno = 0; portno < SPX5_PORTS; portno++) { + for (portno = 0; portno < sparx5->data->consts->n_ports; portno++) { port = sparx5->ports[portno]; if (port && port->phylink) { /* Disconnect the phy */ @@ -327,8 +329,7 @@ void sparx5_unregister_netdevs(struct sparx5 *sparx5) { int portno; - for (portno = 0; portno < SPX5_PORTS; portno++) + for (portno = 0; portno < sparx5->data->consts->n_ports; portno++) if (sparx5->ports[portno]) unregister_netdev(sparx5->ports[portno]->ndev); } - diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 70427643f777..5bfa86a71ac8 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -75,7 +75,7 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) sparx5_ifh_parse(ifh, &fi); /* Map to port netdev */ - port = fi.src_port < SPX5_PORTS ? + port = fi.src_port < sparx5->data->consts->n_ports ? sparx5->ports[fi.src_port] : NULL; if (!port || !port->ndev) { dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); @@ -235,7 +235,7 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) netdev_tx_t ret; memset(ifh, 0, IFH_LEN * 4); - sparx5_set_port_ifh(ifh, port->portno); + sparx5_set_port_ifh(sparx5, ifh, port->portno); if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { if (sparx5_ptp_txtstamp_request(port, skb) < 0) @@ -317,7 +317,9 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5) sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); /* CPU ports capture setup */ - for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0); + portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1); + portno++) { /* ASM CPU port: No preamble, IFH, enable padding */ spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c index af8b435009f4..eae819fa9486 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c @@ -5,13 +5,13 @@ void sparx5_pgid_init(struct sparx5 *spx5) { int i; - for (i = 0; i < PGID_TABLE_SIZE; i++) + for (i = 0; i < spx5->data->consts->n_pgids; i++) spx5->pgid_map[i] = SPX5_PGID_FREE; /* Reserved for unicast, flood control, broadcast, and CPU. * These cannot be freed. */ - for (i = 0; i <= PGID_CPU; i++) + for (i = 0; i <= sparx5_get_pgid(spx5, PGID_CPU); i++) spx5->pgid_map[i] = SPX5_PGID_RESERVED; } @@ -22,7 +22,8 @@ int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx) /* The multicast area starts at index 65, but the first 7 * are reserved for flood masks and CPU. Start alloc after that. */ - for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) { + for (i = sparx5_get_pgid(spx5, PGID_MCAST_START); + i < spx5->data->consts->n_pgids; i++) { if (spx5->pgid_map[i] == SPX5_PGID_FREE) { spx5->pgid_map[i] = SPX5_PGID_MULTICAST; *idx = i; @@ -35,7 +36,8 @@ int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx) int sparx5_pgid_free(struct sparx5 *spx5, u16 idx) { - if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE) + if (idx <= sparx5_get_pgid(spx5, PGID_CPU) || + idx >= spx5->data->consts->n_pgids) return -EINVAL; if (spx5->pgid_map[idx] == SPX5_PGID_FREE) @@ -44,3 +46,8 @@ int sparx5_pgid_free(struct sparx5 *spx5, u16 idx) spx5->pgid_map[idx] = SPX5_PGID_FREE; return 0; } + +int sparx5_get_pgid(struct sparx5 *sparx5, int pgid) +{ + return sparx5->data->consts->n_ports + pgid; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c index 8ada5cee1342..c88820e83812 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c @@ -11,10 +11,11 @@ static int sparx5_policer_service_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol) { u32 idx, pup_tokens, max_pup_tokens, burst, thres; + const struct sparx5_ops *ops = sparx5->data->ops; struct sparx5_sdlb_group *g; u64 rate; - g = &sdlb_groups[pol->group]; + g = ops->get_sdlb_group(pol->group); idx = pol->idx; rate = pol->rate * 1000; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index 062e486c002c..0b38b4cb0929 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -132,8 +132,8 @@ static int sparx5_get_sfi_status(struct sparx5 *sparx5, return -EINVAL; } - dev = sparx5_to_high_dev(portno); - tinst = sparx5_port_dev_index(portno); + dev = sparx5_to_high_dev(sparx5, portno); + tinst = sparx5_port_dev_index(sparx5, portno); inst = spx5_inst_get(sparx5, dev, tinst); value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); @@ -213,11 +213,13 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5, struct sparx5_port *port, struct sparx5_port_config *conf) { - if ((sparx5_port_is_2g5(port->portno) && + const struct sparx5_ops *ops = sparx5->data->ops; + + if ((ops->is_port_2g5(port->portno) && conf->speed > SPEED_2500) || - (sparx5_port_is_5g(port->portno) && + (ops->is_port_5g(port->portno) && conf->speed > SPEED_5000) || - (sparx5_port_is_10g(port->portno) && + (ops->is_port_10g(port->portno) && conf->speed > SPEED_10000)) return sparx5_port_error(port, conf, SPX5_PERR_SPEED); @@ -226,14 +228,14 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5, return -EINVAL; case PHY_INTERFACE_MODE_1000BASEX: if (conf->speed != SPEED_1000 || - sparx5_port_is_2g5(port->portno)) + ops->is_port_2g5(port->portno)) return sparx5_port_error(port, conf, SPX5_PERR_SPEED); - if (sparx5_port_is_2g5(port->portno)) + if (ops->is_port_2g5(port->portno)) return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); break; case PHY_INTERFACE_MODE_2500BASEX: if (conf->speed != SPEED_2500 || - sparx5_port_is_2g5(port->portno)) + ops->is_port_2g5(port->portno)) return sparx5_port_error(port, conf, SPX5_PERR_SPEED); break; case PHY_INTERFACE_MODE_QSGMII: @@ -316,10 +318,11 @@ static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno) static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev) { u32 tinst = high_spd_dev ? - sparx5_port_dev_index(port->portno) : port->portno; + sparx5_port_dev_index(sparx5, port->portno) : port->portno; u32 dev = high_spd_dev ? - sparx5_to_high_dev(port->portno) : TARGET_DEV2G5; + sparx5_to_high_dev(sparx5, port->portno) : TARGET_DEV2G5; void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst); + const struct sparx5_ops *ops = sparx5->data->ops; u32 spd = port->conf.speed; u32 spd_prm; int err; @@ -427,7 +430,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, HSCH_FLUSH_CTRL); if (high_spd_dev) { - u32 pcs = sparx5_to_pcs_dev(port->portno); + u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno); void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst); /* 12: Disable 5G/10G/25 BaseR PCS */ @@ -436,7 +439,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, pcsinst, PCS10G_BR_PCS_CFG(0)); - if (sparx5_port_is_25g(port->portno)) + if (ops->is_port_25g(port->portno)) /* Disable 25G PCS */ spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0), DEV25G_PCS25G_CFG_PCS25G_ENA, @@ -513,9 +516,8 @@ static int sparx5_port_fifo_sz(struct sparx5 *sparx5, /* Configure port muxing: * QSGMII: 4x2G5 devices */ -static int sparx5_port_mux_set(struct sparx5 *sparx5, - struct sparx5_port *port, - struct sparx5_port_config *conf) +int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port, + struct sparx5_port_config *conf) { u32 portno = port->portno; u32 inst; @@ -558,9 +560,10 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5, bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO; enum sparx5_vlan_port_type vlan_type = port->vlan_type; bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE; - u32 dev = sparx5_to_high_dev(port->portno); - u32 tinst = sparx5_port_dev_index(port->portno); + u32 dev = sparx5_to_high_dev(sparx5, port->portno); + u32 tinst = sparx5_port_dev_index(sparx5, port->portno); void __iomem *inst = spx5_inst_get(sparx5, dev, tinst); + const struct sparx5_ops *ops = sparx5->data->ops; u32 etype; etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ? @@ -575,7 +578,7 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5, sparx5, DEV2G5_MAC_TAGS_CFG(port->portno)); - if (sparx5_port_is_2g5(port->portno)) + if (ops->is_port_2g5(port->portno)) return 0; spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) | @@ -789,9 +792,9 @@ static int sparx5_port_pcs_high_set(struct sparx5 *sparx5, struct sparx5_port_config *conf) { u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0; - u32 pix = sparx5_port_dev_index(port->portno); - u32 dev = sparx5_to_high_dev(port->portno); - u32 pcs = sparx5_to_pcs_dev(port->portno); + u32 pix = sparx5_port_dev_index(sparx5, port->portno); + u32 dev = sparx5_to_high_dev(sparx5, port->portno); + u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno); void __iomem *devinst; void __iomem *pcsinst; int err; @@ -843,19 +846,22 @@ static int sparx5_port_pcs_high_set(struct sparx5 *sparx5, /* Switch between 1G/2500 and 5G/10G/25G devices */ static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd) { - int bt_indx = BIT(sparx5_port_dev_index(port)); + const struct sparx5_ops *ops = sparx5->data->ops; + int bt_indx; + + bt_indx = BIT(ops->get_port_dev_bit(sparx5, port)); - if (sparx5_port_is_5g(port)) { + if (ops->is_port_5g(port)) { spx5_rmw(hsd ? 0 : bt_indx, bt_indx, sparx5, PORT_CONF_DEV5G_MODES); - } else if (sparx5_port_is_10g(port)) { + } else if (ops->is_port_10g(port)) { spx5_rmw(hsd ? 0 : bt_indx, bt_indx, sparx5, PORT_CONF_DEV10G_MODES); - } else if (sparx5_port_is_25g(port)) { + } else if (ops->is_port_25g(port)) { spx5_rmw(hsd ? 0 : bt_indx, bt_indx, sparx5, @@ -1016,9 +1022,10 @@ int sparx5_port_init(struct sparx5 *sparx5, { u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ)); u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ)); - u32 devhigh = sparx5_to_high_dev(port->portno); - u32 pix = sparx5_port_dev_index(port->portno); - u32 pcs = sparx5_to_pcs_dev(port->portno); + const struct sparx5_ops *ops = sparx5->data->ops; + u32 devhigh = sparx5_to_high_dev(sparx5, port->portno); + u32 pix = sparx5_port_dev_index(sparx5, port->portno); + u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno); bool sd_pol = port->signd_active_high; bool sd_sel = !port->signd_internal; bool sd_ena = port->signd_enable; @@ -1031,7 +1038,7 @@ int sparx5_port_init(struct sparx5 *sparx5, pcsinst = spx5_inst_get(sparx5, pcs, pix); /* Set the mux port mode */ - err = sparx5_port_mux_set(sparx5, port, conf); + err = ops->set_port_mux(sparx5, port, conf); if (err) return err; @@ -1082,7 +1089,7 @@ int sparx5_port_init(struct sparx5 *sparx5, if (err) return err; - if (!sparx5_port_is_2g5(port->portno)) + if (!ops->is_port_2g5(port->portno)) /* Enable shadow device */ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1), DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, @@ -1105,7 +1112,7 @@ int sparx5_port_init(struct sparx5 *sparx5, sparx5, DEV2G5_MAC_IFG_CFG(port->portno)); - if (sparx5_port_is_2g5(port->portno)) + if (ops->is_port_2g5(port->portno)) return 0; /* Low speed device only - return */ /* Now setup the high speed device */ @@ -1128,7 +1135,7 @@ int sparx5_port_init(struct sparx5 *sparx5, pcsinst, PCS10G_BR_PCS_SD_CFG(0)); - if (sparx5_port_is_25g(port->portno)) { + if (ops->is_port_25g(port->portno)) { /* Handle Signal Detect in 25G PCS */ spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) | DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) | @@ -1345,3 +1352,8 @@ int sparx5_port_qos_default_set(const struct sparx5_port *port, return 0; } + +int sparx5_get_internal_port(struct sparx5 *sparx5, int port) +{ + return sparx5->data->consts->n_ports + port; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h index 607c4ff1df6b..9b9bcc6834bc 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h @@ -40,25 +40,29 @@ static inline bool sparx5_port_is_25g(int portno) return portno >= 56 && portno <= 63; } -static inline u32 sparx5_to_high_dev(int port) +static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port) { - if (sparx5_port_is_5g(port)) + const struct sparx5_ops *ops = sparx5->data->ops; + + if (ops->is_port_5g(port)) return TARGET_DEV5G; - if (sparx5_port_is_10g(port)) + if (ops->is_port_10g(port)) return TARGET_DEV10G; return TARGET_DEV25G; } -static inline u32 sparx5_to_pcs_dev(int port) +static inline u32 sparx5_to_pcs_dev(struct sparx5 *sparx5, int port) { - if (sparx5_port_is_5g(port)) + const struct sparx5_ops *ops = sparx5->data->ops; + + if (ops->is_port_5g(port)) return TARGET_PCS5G_BR; - if (sparx5_port_is_10g(port)) + if (ops->is_port_10g(port)) return TARGET_PCS10G_BR; return TARGET_PCS25G_BR; } -static inline int sparx5_port_dev_index(int port) +static inline u32 sparx5_port_dev_mapping(struct sparx5 *sparx5, int port) { if (sparx5_port_is_2g5(port)) return port; @@ -70,6 +74,11 @@ static inline int sparx5_port_dev_index(int port) return (port - 56); } +static inline u32 sparx5_port_dev_index(struct sparx5 *sparx5, int port) +{ + return sparx5->data->ops->get_port_dev_index(sparx5, port); +} + int sparx5_port_init(struct sparx5 *sparx5, struct sparx5_port *spx5_port, struct sparx5_port_config *conf); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c index 8dee1ab1fa75..cd4f42c3f7eb 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c @@ -20,36 +20,40 @@ static struct sparx5_pool_entry sparx5_psfp_sg_pool[SPX5_PSFP_SG_CNT]; /* Pool of available stream filters */ static struct sparx5_pool_entry sparx5_psfp_sf_pool[SPX5_PSFP_SF_CNT]; -static int sparx5_psfp_sf_get(u32 *id) +static int sparx5_psfp_sf_get(struct sparx5 *sparx5, u32 *id) { - return sparx5_pool_get(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id); + return sparx5_pool_get(sparx5_psfp_sf_pool, + sparx5->data->consts->n_filters, id); } -static int sparx5_psfp_sf_put(u32 id) +static int sparx5_psfp_sf_put(struct sparx5 *sparx5, u32 id) { - return sparx5_pool_put(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id); + return sparx5_pool_put(sparx5_psfp_sf_pool, + sparx5->data->consts->n_filters, id); } -static int sparx5_psfp_sg_get(u32 idx, u32 *id) +static int sparx5_psfp_sg_get(struct sparx5 *sparx5, u32 idx, u32 *id) { - return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, - idx, id); + return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, + sparx5->data->consts->n_gates, idx, id); } -static int sparx5_psfp_sg_put(u32 id) +static int sparx5_psfp_sg_put(struct sparx5 *sparx5, u32 id) { - return sparx5_pool_put(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, id); + return sparx5_pool_put(sparx5_psfp_sg_pool, + sparx5->data->consts->n_gates, id); } -static int sparx5_psfp_fm_get(u32 idx, u32 *id) +static int sparx5_psfp_fm_get(struct sparx5 *sparx5, u32 idx, u32 *id) { - return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, idx, - id); + return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, + sparx5->data->consts->n_sdlbs, idx, id); } -static int sparx5_psfp_fm_put(u32 id) +static int sparx5_psfp_fm_put(struct sparx5 *sparx5, u32 id) { - return sparx5_pool_put(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, id); + return sparx5_pool_put(sparx5_psfp_fm_pool, + sparx5->data->consts->n_sdlbs, id); } u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx) @@ -205,7 +209,7 @@ int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf, { int ret; - ret = sparx5_psfp_sf_get(id); + ret = sparx5_psfp_sf_get(sparx5, id); if (ret < 0) return ret; @@ -220,7 +224,7 @@ int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id) sparx5_psfp_sf_set(sparx5, id, &sf); - return sparx5_psfp_sf_put(id); + return sparx5_psfp_sf_put(sparx5, id); } int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx, @@ -229,7 +233,7 @@ int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx, ktime_t basetime; int ret; - ret = sparx5_psfp_sg_get(uidx, id); + ret = sparx5_psfp_sg_get(sparx5, uidx, id); if (ret < 0) return ret; /* Was already in use, no need to reconfigure */ @@ -253,7 +257,7 @@ int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id) const struct sparx5_psfp_sg sg = { 0 }; int ret; - ret = sparx5_psfp_sg_put(id); + ret = sparx5_psfp_sg_put(sparx5, id); if (ret < 0) return ret; /* Stream gate still in use ? */ @@ -270,7 +274,7 @@ int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx, int ret; /* Get flow meter */ - ret = sparx5_psfp_fm_get(uidx, &fm->pol.idx); + ret = sparx5_psfp_fm_get(sparx5, uidx, &fm->pol.idx); if (ret < 0) return ret; /* Was already in use, no need to reconfigure */ @@ -303,7 +307,7 @@ int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id) if (ret < 0) return ret; - ret = sparx5_psfp_fm_put(id); + ret = sparx5_psfp_fm_put(sparx5, id); if (ret < 0) return ret; /* Do not reset flow-meter if still in use. */ @@ -315,11 +319,12 @@ int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id) void sparx5_psfp_init(struct sparx5 *sparx5) { + const struct sparx5_ops *ops = sparx5->data->ops; const struct sparx5_sdlb_group *group; int i; - for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) { - group = &sdlb_groups[i]; + for (i = 0; i < sparx5->data->consts->n_lb_groups; i++) { + group = ops->get_sdlb_group(i); sparx5_sdlb_group_init(sparx5, group->max_rate, group->min_burst, group->frame_size, i); } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c index 5a932460db58..9b15e44f9e64 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c @@ -274,6 +274,7 @@ static void sparx5_get_hwtimestamp(struct sparx5 *sparx5, u32 nsec) { /* Read current PTP time to get seconds */ + const struct sparx5_consts *consts = sparx5->data->consts; unsigned long flags; u32 curr_nsec; @@ -285,10 +286,10 @@ static void sparx5_get_hwtimestamp(struct sparx5 *sparx5, PTP_PTP_PIN_CFG_PTP_PIN_ACTION | PTP_PTP_PIN_CFG_PTP_PIN_DOM | PTP_PTP_PIN_CFG_PTP_PIN_SYNC, - sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); - ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); - curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin)); + curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); ts->tv_nsec = nsec; @@ -440,8 +441,11 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp, { struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); struct sparx5 *sparx5 = phc->sparx5; + const struct sparx5_consts *consts; unsigned long flags; + consts = sparx5->data->consts; + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); /* Must be in IDLE mode before the time can be loaded */ @@ -451,14 +455,14 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp, PTP_PTP_PIN_CFG_PTP_PIN_ACTION | PTP_PTP_PIN_CFG_PTP_PIN_DOM | PTP_PTP_PIN_CFG_PTP_PIN_SYNC, - sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); /* Set new value */ spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)), - sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin)); spx5_wr(lower_32_bits(ts->tv_sec), - sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); - spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin)); + spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); /* Apply new values */ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) | @@ -467,7 +471,7 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp, PTP_PTP_PIN_CFG_PTP_PIN_ACTION | PTP_PTP_PIN_CFG_PTP_PIN_DOM | PTP_PTP_PIN_CFG_PTP_PIN_SYNC, - sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); @@ -478,10 +482,13 @@ int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); struct sparx5 *sparx5 = phc->sparx5; + const struct sparx5_consts *consts; unsigned long flags; time64_t s; s64 ns; + consts = sparx5->data->consts; + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | @@ -490,12 +497,12 @@ int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) PTP_PTP_PIN_CFG_PTP_PIN_ACTION | PTP_PTP_PIN_CFG_PTP_PIN_DOM | PTP_PTP_PIN_CFG_PTP_PIN_SYNC, - sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); - s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin)); s <<= 32; - s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); - ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin)); + ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC; spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); @@ -515,6 +522,9 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); struct sparx5 *sparx5 = phc->sparx5; + const struct sparx5_consts *consts; + + consts = sparx5->data->consts; if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { unsigned long flags; @@ -528,10 +538,10 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) PTP_PTP_PIN_CFG_PTP_PIN_ACTION | PTP_PTP_PIN_CFG_PTP_PIN_DOM | PTP_PTP_PIN_CFG_PTP_PIN_SYNC, - sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta), - sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin)); /* Adjust time with the value of PTP_TOD_NSEC */ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) | @@ -540,7 +550,7 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) PTP_PTP_PIN_CFG_PTP_PIN_ACTION | PTP_PTP_PIN_CFG_PTP_PIN_DOM | PTP_PTP_PIN_CFG_PTP_PIN_SYNC, - sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + sparx5, PTP_PTP_PIN_CFG(consts->tod_pin)); spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); } else { @@ -630,7 +640,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5) /* Enable master counters */ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG); - for (i = 0; i < SPX5_PORTS; i++) { + for (i = 0; i < sparx5->data->consts->n_ports; i++) { port = sparx5->ports[i]; if (!port) continue; @@ -646,7 +656,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5) struct sparx5_port *port; int i; - for (i = 0; i < SPX5_PORTS; i++) { + for (i = 0; i < sparx5->data->consts->n_ports; i++) { port = sparx5->ports[i]; if (!port) continue; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c index 5f34febaee6b..d065f8c40d37 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c @@ -74,6 +74,11 @@ static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = { 26214200 /* 26.214 Gbps */ }; +u32 sparx5_get_hsch_max_group_rate(int grp) +{ + return spx5_hsch_max_group_rate[grp]; +} + static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT]; static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group) @@ -385,6 +390,7 @@ static int sparx5_dwrr_conf_set(struct sparx5_port *port, static int sparx5_leak_groups_init(struct sparx5 *sparx5) { + const struct sparx5_ops *ops = sparx5->data->ops; struct sparx5_layer *layer; u32 sys_clk_per_100ps; struct sparx5_lg *lg; @@ -397,7 +403,7 @@ static int sparx5_leak_groups_init(struct sparx5 *sparx5) layer = &layers[i]; for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) { lg = &layer->leak_groups[ii]; - lg->max_rate = spx5_hsch_max_group_rate[ii]; + lg->max_rate = ops->get_hsch_max_group_rate(i); /* Calculate the leak time in us, to serve a maximum * rate of 'max_rate' for this group diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h index ced35033a6c5..1231a80335d7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h @@ -79,4 +79,6 @@ int sparx5_tc_ets_add(struct sparx5_port *port, int sparx5_tc_ets_del(struct sparx5_port *port); +u32 sparx5_get_hsch_max_group_rate(int grp); + #endif /* __SPARX5_QOS_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c new file mode 100644 index 000000000000..1db212ce3df7 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2024 Microchip Technology Inc. + */ + +/* This file is autogenerated by cml-utils 2024-09-24 14:02:24 +0200. + * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b + */ + +#include "sparx5_regs.h" + +const unsigned int sparx5_tsize[TSIZE_LAST] = { + [TC_DEV10G] = 12, + [TC_DEV2G5] = 65, + [TC_DEV5G] = 13, + [TC_PCS10G_BR] = 12, + [TC_PCS5G_BR] = 13, +}; + +const unsigned int sparx5_raddr[RADDR_LAST] = { + [RA_CPU_PROC_CTRL] = 176, + [RA_GCB_SOFT_RST] = 8, + [RA_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 24, +}; + +const unsigned int sparx5_rcnt[RCNT_LAST] = { + [RC_ANA_AC_OWN_UPSID] = 3, + [RC_ANA_ACL_VCAP_S2_CFG] = 70, + [RC_ANA_ACL_OWN_UPSID] = 3, + [RC_ANA_CL_OWN_UPSID] = 3, + [RC_ANA_L2_OWN_UPSID] = 3, + [RC_ASM_PORT_CFG] = 67, + [RC_DSM_BUF_CFG] = 67, + [RC_DSM_DEV_TX_STOP_WM_CFG] = 67, + [RC_DSM_RX_PAUSE_CFG] = 67, + [RC_DSM_MAC_CFG] = 67, + [RC_DSM_MAC_ADDR_BASE_HIGH_CFG] = 65, + [RC_DSM_MAC_ADDR_BASE_LOW_CFG] = 65, + [RC_DSM_TAXI_CAL_CFG] = 9, + [RC_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 65, + [RC_HSCH_PORT_MODE] = 70, + [RC_QFWD_SWITCH_PORT_MODE] = 70, + [RC_QSYS_PAUSE_CFG] = 70, + [RC_QSYS_ATOP] = 70, + [RC_QSYS_FWD_PRESSURE] = 70, + [RC_QSYS_CAL_AUTO] = 7, + [RC_REW_OWN_UPSID] = 3, + [RC_REW_RTAG_ETAG_CTRL] = 70, +}; + +const unsigned int sparx5_gaddr[GADDR_LAST] = { + [GA_ANA_AC_RAM_CTRL] = 839108, + [GA_ANA_AC_PS_COMMON] = 894472, + [GA_ANA_AC_MIRROR_PROBE] = 893696, + [GA_ANA_AC_SRC] = 849920, + [GA_ANA_AC_PGID] = 786432, + [GA_ANA_AC_TSN_SF] = 839136, + [GA_ANA_AC_TSN_SF_CFG] = 839680, + [GA_ANA_AC_TSN_SF_STATUS] = 839072, + [GA_ANA_AC_SG_ACCESS] = 839140, + [GA_ANA_AC_SG_CONFIG] = 851584, + [GA_ANA_AC_SG_STATUS] = 839088, + [GA_ANA_AC_SG_STATUS_STICKY] = 839152, + [GA_ANA_AC_STAT_GLOBAL_CFG_PORT] = 851552, + [GA_ANA_AC_STAT_CNT_CFG_PORT] = 843776, + [GA_ANA_AC_STAT_GLOBAL_CFG_ACL] = 893792, + [GA_ANA_ACL_COMMON] = 32768, + [GA_ANA_ACL_KEY_SEL] = 34200, + [GA_ANA_ACL_CNT_B] = 16384, + [GA_ANA_ACL_STICKY] = 36408, + [GA_ANA_AC_POL_POL_ALL_CFG] = 75968, + [GA_ANA_AC_POL_COMMON_BDLB] = 79048, + [GA_ANA_AC_POL_COMMON_BUM_SLB] = 79056, + [GA_ANA_AC_SDLB_LBGRP_TBL] = 295468, + [GA_ANA_CL_PORT] = 131072, + [GA_ANA_CL_COMMON] = 166912, + [GA_ANA_L2_COMMON] = 566024, + [GA_ANA_L3_COMMON] = 493632, + [GA_ANA_L3_VLAN_ARP_L3MC_STICKY] = 491460, + [GA_ASM_CFG] = 33280, + [GA_ASM_PFC_TIMER_CFG] = 34716, + [GA_ASM_LBK_WM_CFG] = 34744, + [GA_ASM_LBK_MISC_CFG] = 34756, + [GA_ASM_RAM_CTRL] = 34832, + [GA_EACL_ES2_KEY_SELECT_PROFILE] = 149504, + [GA_EACL_CNT_TBL] = 122880, + [GA_EACL_POL_CFG] = 150608, + [GA_EACL_ES2_STICKY] = 118696, + [GA_EACL_RAM_CTRL] = 118736, + [GA_GCB_SIO_CTRL] = 876, + [GA_HSCH_HSCH_DWRR] = 162816, + [GA_HSCH_HSCH_MISC] = 163104, + [GA_HSCH_HSCH_LEAK_LISTS] = 161664, + [GA_HSCH_SYSTEM] = 184000, + [GA_HSCH_MMGT] = 162368, + [GA_HSCH_TAS_CONFIG] = 162384, + [GA_PTP_PTP_CFG] = 320, + [GA_PTP_PTP_TOD_DOMAINS] = 336, + [GA_PTP_PHASE_DETECTOR_CTRL] = 420, + [GA_QSYS_CALCFG] = 2304, + [GA_QSYS_RAM_CTRL] = 2344, + [GA_REW_COMMON] = 387264, + [GA_REW_PORT] = 360448, + [GA_REW_VOE_PORT_LM_CNT] = 393216, + [GA_REW_RAM_CTRL] = 378696, + [GA_VOP_RAM_CTRL] = 279176, + [GA_XQS_SYSTEM] = 6768, + [GA_XQS_QLIMIT_SHR] = 7936, +}; + +const unsigned int sparx5_gcnt[GCNT_LAST] = { + [GC_ANA_AC_SRC] = 102, + [GC_ANA_AC_PGID] = 3290, + [GC_ANA_AC_TSN_SF_CFG] = 1024, + [GC_ANA_AC_STAT_CNT_CFG_PORT] = 70, + [GC_ANA_ACL_KEY_SEL] = 134, + [GC_ANA_ACL_CNT_A] = 4096, + [GC_ANA_ACL_CNT_B] = 4096, + [GC_ANA_AC_SDLB_LBGRP_TBL] = 10, + [GC_ANA_AC_SDLB_LBSET_TBL] = 4616, + [GC_ANA_CL_PORT] = 70, + [GC_ANA_L2_ISDX_LIMIT] = 1536, + [GC_ANA_L2_ISDX] = 4096, + [GC_ANA_L3_VLAN] = 5120, + [GC_ASM_DEV_STATISTICS] = 65, + [GC_EACL_ES2_KEY_SELECT_PROFILE] = 138, + [GC_EACL_CNT_TBL] = 2048, + [GC_GCB_SIO_CTRL] = 3, + [GC_HSCH_HSCH_CFG] = 5040, + [GC_HSCH_HSCH_DWRR] = 72, + [GC_PTP_PTP_PINS] = 5, + [GC_PTP_PHASE_DETECTOR_CTRL] = 5, + [GC_REW_PORT] = 70, + [GC_REW_VOE_PORT_LM_CNT] = 520, +}; + +const unsigned int sparx5_gsize[GSIZE_LAST] = { + [GW_ANA_AC_SRC] = 16, + [GW_ANA_L2_COMMON] = 700, + [GW_ASM_CFG] = 1088, + [GW_CPU_CPU_REGS] = 204, + [GW_FDMA_FDMA] = 428, + [GW_GCB_CHIP_REGS] = 424, + [GW_HSCH_TAS_CONFIG] = 12, + [GW_PTP_PHASE_DETECTOR_CTRL] = 8, + [GW_QSYS_PAUSE_CFG] = 1128, +}; + +const unsigned int sparx5_fpos[FPOS_LAST] = { + [FP_CPU_PROC_CTRL_AARCH64_MODE_ENA] = 12, + [FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS] = 11, + [FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS] = 10, + [FP_CPU_PROC_CTRL_BE_EXCEP_MODE] = 9, + [FP_CPU_PROC_CTRL_VINITHI] = 8, + [FP_CPU_PROC_CTRL_CFGTE] = 7, + [FP_CPU_PROC_CTRL_CP15S_DISABLE] = 6, + [FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE] = 5, + [FP_CPU_PROC_CTRL_L2_FLUSH_REQ] = 1, + [FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE] = 7, + [FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY] = 6, + [FP_FDMA_CH_CFG_CH_INJ_PORT] = 5, + [FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] = 26, + [FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] = 24, + [FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL] = 23, + [FP_PTP_PHAD_CTRL_PHAD_ENA] = 7, + [FP_PTP_PHAD_CTRL_PHAD_FAILED] = 6, +}; + +const unsigned int sparx5_fsize[FSIZE_LAST] = { + [FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK] = 32, + [FW_ANA_AC_SRC_CFG_PORT_MASK] = 32, + [FW_ANA_AC_PGID_CFG_PORT_MASK] = 32, + [FW_ANA_AC_TSN_SF_PORT_NUM] = 9, + [FW_ANA_AC_TSN_SF_CFG_TSN_SGID] = 10, + [FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] = 10, + [FW_ANA_AC_SG_ACCESS_CTRL_SGID] = 10, + [FW_ANA_AC_PORT_SGE_CFG_MASK] = 16, + [FW_ANA_AC_SDLB_XLB_START_LBSET_START] = 13, + [FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] = 5, + [FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] = 13, + [FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] = 13, + [FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] = 4, + [FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] = 13, + [FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA] = 32, + [FW_ANA_L2_DLB_CFG_DLB_IDX] = 13, + [FW_ANA_L2_TSN_CFG_TSN_SFID] = 10, + [FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK] = 32, + [FW_FDMA_CH_CFG_CH_DCB_DB_CNT] = 4, + [FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] = 9, + [FW_HSCH_SE_CFG_SE_DWRR_CNT] = 7, + [FW_HSCH_SE_CONNECT_SE_LEAK_LINK] = 16, + [FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] = 7, + [FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] = 13, + [FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] = 16, + [FW_HSCH_FLUSH_CTRL_FLUSH_PORT] = 7, + [FW_HSCH_FLUSH_CTRL_FLUSH_HIER] = 16, + [FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] = 14, + [FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] = 11, + [FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] = 14, + [FW_PTP_PTP_PIN_INTR_INTR_PTP] = 5, + [FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] = 5, + [FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] = 5, + [FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] = 2, + [FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] = 7, + [FW_QRES_RES_CFG_WM_HIGH] = 12, + [FW_QRES_RES_STAT_MAXUSE] = 21, + [FW_QRES_RES_STAT_CUR_INUSE] = 21, + [FW_QSYS_PAUSE_CFG_PAUSE_START] = 12, + [FW_QSYS_PAUSE_CFG_PAUSE_STOP] = 12, + [FW_QSYS_ATOP_ATOP] = 12, + [FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] = 12, + [FW_REW_RTAG_ETAG_CTRL_IPE_TBL] = 7, + [FW_XQS_STAT_CFG_STAT_VIEW] = 13, + [FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] = 15, + [FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] = 15, + [FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] = 15, + [FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] = 15, +}; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h new file mode 100644 index 000000000000..c4e8b581c1f3 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2024 Microchip Technology Inc. + */ + +/* This file is autogenerated by cml-utils 2024-09-24 14:02:24 +0200. + * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b + */ + +#ifndef _SPARX5_REGS_H_ +#define _SPARX5_REGS_H_ + +/* These enumerated values are used to index the platform specific structs + * containing the addresses, counts, size and positions, of register groups, + * registers and fields. + */ + +enum sparx5_tsize_enum { + TC_DEV10G, + TC_DEV2G5, + TC_DEV5G, + TC_PCS10G_BR, + TC_PCS5G_BR, + TSIZE_LAST, +}; + +enum sparx5_raddr_enum { + RA_CPU_PROC_CTRL, + RA_GCB_SOFT_RST, + RA_GCB_HW_SGPIO_TO_SD_MAP_CFG, + RADDR_LAST, +}; + +enum sparx5_rcnt_enum { + RC_ANA_AC_OWN_UPSID, + RC_ANA_ACL_VCAP_S2_CFG, + RC_ANA_ACL_OWN_UPSID, + RC_ANA_CL_OWN_UPSID, + RC_ANA_L2_OWN_UPSID, + RC_ASM_PORT_CFG, + RC_DSM_BUF_CFG, + RC_DSM_DEV_TX_STOP_WM_CFG, + RC_DSM_RX_PAUSE_CFG, + RC_DSM_MAC_CFG, + RC_DSM_MAC_ADDR_BASE_HIGH_CFG, + RC_DSM_MAC_ADDR_BASE_LOW_CFG, + RC_DSM_TAXI_CAL_CFG, + RC_GCB_HW_SGPIO_TO_SD_MAP_CFG, + RC_HSCH_PORT_MODE, + RC_QFWD_SWITCH_PORT_MODE, + RC_QSYS_PAUSE_CFG, + RC_QSYS_ATOP, + RC_QSYS_FWD_PRESSURE, + RC_QSYS_CAL_AUTO, + RC_REW_OWN_UPSID, + RC_REW_RTAG_ETAG_CTRL, + RCNT_LAST, +}; + +enum sparx5_gaddr_enum { + GA_ANA_AC_RAM_CTRL, + GA_ANA_AC_PS_COMMON, + GA_ANA_AC_MIRROR_PROBE, + GA_ANA_AC_SRC, + GA_ANA_AC_PGID, + GA_ANA_AC_TSN_SF, + GA_ANA_AC_TSN_SF_CFG, + GA_ANA_AC_TSN_SF_STATUS, + GA_ANA_AC_SG_ACCESS, + GA_ANA_AC_SG_CONFIG, + GA_ANA_AC_SG_STATUS, + GA_ANA_AC_SG_STATUS_STICKY, + GA_ANA_AC_STAT_GLOBAL_CFG_PORT, + GA_ANA_AC_STAT_CNT_CFG_PORT, + GA_ANA_AC_STAT_GLOBAL_CFG_ACL, + GA_ANA_ACL_COMMON, + GA_ANA_ACL_KEY_SEL, + GA_ANA_ACL_CNT_B, + GA_ANA_ACL_STICKY, + GA_ANA_AC_POL_POL_ALL_CFG, + GA_ANA_AC_POL_COMMON_BDLB, + GA_ANA_AC_POL_COMMON_BUM_SLB, + GA_ANA_AC_SDLB_LBGRP_TBL, + GA_ANA_CL_PORT, + GA_ANA_CL_COMMON, + GA_ANA_L2_COMMON, + GA_ANA_L3_COMMON, + GA_ANA_L3_VLAN_ARP_L3MC_STICKY, + GA_ASM_CFG, + GA_ASM_PFC_TIMER_CFG, + GA_ASM_LBK_WM_CFG, + GA_ASM_LBK_MISC_CFG, + GA_ASM_RAM_CTRL, + GA_EACL_ES2_KEY_SELECT_PROFILE, + GA_EACL_CNT_TBL, + GA_EACL_POL_CFG, + GA_EACL_ES2_STICKY, + GA_EACL_RAM_CTRL, + GA_GCB_SIO_CTRL, + GA_HSCH_HSCH_DWRR, + GA_HSCH_HSCH_MISC, + GA_HSCH_HSCH_LEAK_LISTS, + GA_HSCH_SYSTEM, + GA_HSCH_MMGT, + GA_HSCH_TAS_CONFIG, + GA_PTP_PTP_CFG, + GA_PTP_PTP_TOD_DOMAINS, + GA_PTP_PHASE_DETECTOR_CTRL, + GA_QSYS_CALCFG, + GA_QSYS_RAM_CTRL, + GA_REW_COMMON, + GA_REW_PORT, + GA_REW_VOE_PORT_LM_CNT, + GA_REW_RAM_CTRL, + GA_VOP_RAM_CTRL, + GA_XQS_SYSTEM, + GA_XQS_QLIMIT_SHR, + GADDR_LAST, +}; + +enum sparx5_gcnt_enum { + GC_ANA_AC_SRC, + GC_ANA_AC_PGID, + GC_ANA_AC_TSN_SF_CFG, + GC_ANA_AC_STAT_CNT_CFG_PORT, + GC_ANA_ACL_KEY_SEL, + GC_ANA_ACL_CNT_A, + GC_ANA_ACL_CNT_B, + GC_ANA_AC_SDLB_LBGRP_TBL, + GC_ANA_AC_SDLB_LBSET_TBL, + GC_ANA_CL_PORT, + GC_ANA_L2_ISDX_LIMIT, + GC_ANA_L2_ISDX, + GC_ANA_L3_VLAN, + GC_ASM_DEV_STATISTICS, + GC_EACL_ES2_KEY_SELECT_PROFILE, + GC_EACL_CNT_TBL, + GC_GCB_SIO_CTRL, + GC_HSCH_HSCH_CFG, + GC_HSCH_HSCH_DWRR, + GC_PTP_PTP_PINS, + GC_PTP_PHASE_DETECTOR_CTRL, + GC_REW_PORT, + GC_REW_VOE_PORT_LM_CNT, + GCNT_LAST, +}; + +enum sparx5_gsize_enum { + GW_ANA_AC_SRC, + GW_ANA_L2_COMMON, + GW_ASM_CFG, + GW_CPU_CPU_REGS, + GW_FDMA_FDMA, + GW_GCB_CHIP_REGS, + GW_HSCH_TAS_CONFIG, + GW_PTP_PHASE_DETECTOR_CTRL, + GW_QSYS_PAUSE_CFG, + GSIZE_LAST, +}; + +enum sparx5_fpos_enum { + FP_CPU_PROC_CTRL_AARCH64_MODE_ENA, + FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, + FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, + FP_CPU_PROC_CTRL_BE_EXCEP_MODE, + FP_CPU_PROC_CTRL_VINITHI, + FP_CPU_PROC_CTRL_CFGTE, + FP_CPU_PROC_CTRL_CP15S_DISABLE, + FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, + FP_CPU_PROC_CTRL_L2_FLUSH_REQ, + FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE, + FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, + FP_FDMA_CH_CFG_CH_INJ_PORT, + FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION, + FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, + FP_PTP_PHAD_CTRL_PHAD_ENA, + FP_PTP_PHAD_CTRL_PHAD_FAILED, + FPOS_LAST, +}; + +enum sparx5_fsize_enum { + FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK, + FW_ANA_AC_SRC_CFG_PORT_MASK, + FW_ANA_AC_PGID_CFG_PORT_MASK, + FW_ANA_AC_TSN_SF_PORT_NUM, + FW_ANA_AC_TSN_SF_CFG_TSN_SGID, + FW_ANA_AC_TSN_SF_STATUS_TSN_SFID, + FW_ANA_AC_SG_ACCESS_CTRL_SGID, + FW_ANA_AC_PORT_SGE_CFG_MASK, + FW_ANA_AC_SDLB_XLB_START_LBSET_START, + FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, + FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, + FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, + FW_ANA_AC_SDLB_XLB_NEXT_LBGRP, + FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, + FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA, + FW_ANA_L2_DLB_CFG_DLB_IDX, + FW_ANA_L2_TSN_CFG_TSN_SFID, + FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK, + FW_FDMA_CH_CFG_CH_DCB_DB_CNT, + FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, + FW_HSCH_SE_CFG_SE_DWRR_CNT, + FW_HSCH_SE_CONNECT_SE_LEAK_LINK, + FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT, + FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX, + FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST, + FW_HSCH_FLUSH_CTRL_FLUSH_PORT, + FW_HSCH_FLUSH_CTRL_FLUSH_HIER, + FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, + FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, + FW_LRN_AUTOAGE_CFG_2_NEXT_ROW, + FW_PTP_PTP_PIN_INTR_INTR_PTP, + FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, + FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, + FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT, + FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, + FW_QRES_RES_CFG_WM_HIGH, + FW_QRES_RES_STAT_MAXUSE, + FW_QRES_RES_STAT_CUR_INUSE, + FW_QSYS_PAUSE_CFG_PAUSE_START, + FW_QSYS_PAUSE_CFG_PAUSE_STOP, + FW_QSYS_ATOP_ATOP, + FW_QSYS_ATOP_TOT_CFG_ATOP_TOT, + FW_REW_RTAG_ETAG_CTRL_IPE_TBL, + FW_XQS_STAT_CFG_STAT_VIEW, + FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, + FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, + FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, + FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, + FSIZE_LAST, +}; + +extern const unsigned int sparx5_tsize[TSIZE_LAST]; +extern const unsigned int sparx5_raddr[RADDR_LAST]; +extern const unsigned int sparx5_rcnt[RCNT_LAST]; +extern const unsigned int sparx5_gaddr[GADDR_LAST]; +extern const unsigned int sparx5_gcnt[GCNT_LAST]; +extern const unsigned int sparx5_gsize[GSIZE_LAST]; +extern const unsigned int sparx5_fpos[FPOS_LAST]; +extern const unsigned int sparx5_fsize[FSIZE_LAST]; + +#endif /* _SPARX5_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c index f5267218caeb..df1d15600aad 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c @@ -20,6 +20,11 @@ struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT] = { { 5000000ULL, 8192 / 8, 64 } /* 5 M */ }; +struct sparx5_sdlb_group *sparx5_get_sdlb_group(int idx) +{ + return &sdlb_groups[idx]; +} + int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5) { u32 clk_per_100ps; @@ -178,14 +183,15 @@ static int sparx5_sdlb_group_get_count(struct sparx5 *sparx5, u32 group) int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst) { + const struct sparx5_ops *ops = sparx5->data->ops; const struct sparx5_sdlb_group *group; u64 rate_bps; int i, count; rate_bps = rate * 1000; - for (i = SPX5_SDLB_GROUP_CNT - 1; i >= 0; i--) { - group = &sdlb_groups[i]; + for (i = sparx5->data->consts->n_lb_groups - 1; i >= 0; i--) { + group = ops->get_sdlb_group(i); count = sparx5_sdlb_group_get_count(sparx5, i); @@ -208,7 +214,7 @@ int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group) u32 itr, next; int i; - for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) { + for (i = 0; i < sparx5->data->consts->n_lb_groups; i++) { if (sparx5_sdlb_group_is_empty(sparx5, i)) continue; @@ -303,11 +309,12 @@ int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx) void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst, u32 frame_size, u32 idx) { + const struct sparx5_ops *ops = sparx5->data->ops; u32 thres_shift, mask = 0x01, power = 0; struct sparx5_sdlb_group *group; u64 max_token; - group = &sdlb_groups[idx]; + group = ops->get_sdlb_group(idx); /* Number of positions to right-shift LB's threshold value. */ while ((min_burst & mask) == 0) { diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index 0b4abc3eb53d..bc9ecb9392cd 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -32,24 +32,34 @@ static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port, static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag) { bool should_flood = flood_flag || port->is_mrouter; + struct sparx5 *sparx5 = port->sparx5; int pgid; - for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++) + for (pgid = sparx5_get_pgid(sparx5, PGID_IPV4_MC_DATA); + pgid <= sparx5_get_pgid(sparx5, PGID_IPV6_MC_CTRL); pgid++) sparx5_pgid_update_mask(port, pgid, should_flood); } static void sparx5_port_attr_bridge_flags(struct sparx5_port *port, struct switchdev_brport_flags flags) { + struct sparx5 *sparx5 = port->sparx5; + if (flags.mask & BR_MCAST_FLOOD) { - sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD)); + sparx5_pgid_update_mask(port, + sparx5_get_pgid(sparx5, PGID_MC_FLOOD), + !!(flags.val & BR_MCAST_FLOOD)); sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD)); } if (flags.mask & BR_FLOOD) - sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD)); + sparx5_pgid_update_mask(port, + sparx5_get_pgid(sparx5, PGID_UC_FLOOD), + !!(flags.val & BR_FLOOD)); if (flags.mask & BR_BCAST_FLOOD) - sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD)); + sparx5_pgid_update_mask(port, + sparx5_get_pgid(sparx5, PGID_BCAST), + !!(flags.val & BR_BCAST_FLOOD)); } static void sparx5_attr_stp_state_set(struct sparx5_port *port, @@ -219,7 +229,8 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port, port->vid = NULL_VID; /* Forward frames to CPU */ - sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0); + sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU), + port->ndev->dev_addr, 0); /* Port enters in host more therefore restore mc list */ __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync); @@ -254,7 +265,8 @@ static int sparx5_port_add_addr(struct net_device *dev, bool up) u16 vid = port->pvid; if (up) - sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid); + sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU), + port->ndev->dev_addr, vid); else sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid); @@ -330,7 +342,8 @@ static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: if (host_addr) - sparx5_add_mact_entry(sparx5, dev, PGID_CPU, + sparx5_add_mact_entry(sparx5, dev, + sparx5_get_pgid(sparx5, PGID_CPU), fdb_info->addr, vid); else sparx5_add_mact_entry(sparx5, port->ndev, port->portno, @@ -418,8 +431,8 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev, switchdev_blocking_nb); /* Flood broadcast to CPU */ - sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast, - v->vid); + sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_BCAST), + dev->broadcast, v->vid); return 0; } @@ -547,7 +560,7 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev, /* Add any mrouter ports to the new entry */ if (is_new && ether_addr_is_ip_mcast(v->addr)) - for (i = 0; i < SPX5_PORTS; i++) + for (i = 0; i < spx5->data->consts->n_ports; i++) if (spx5->ports[i] && spx5->ports[i]->is_mrouter) sparx5_pgid_update_mask(spx5->ports[i], entry->pgid_idx, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c index e80f3166db7d..28b2514c8330 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c @@ -60,8 +60,8 @@ static int sparx5_tc_setup_block(struct net_device *ndev, cb, ndev, ndev, false); } -static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer, - u32 *idx) +static void sparx5_tc_get_layer_and_idx(struct sparx5 *sparx5, u32 parent, + u32 portno, u32 *layer, u32 *idx) { if (parent == TC_H_ROOT) { *layer = 2; @@ -90,8 +90,8 @@ static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev, struct sparx5_port *port = netdev_priv(ndev); u32 layer, se_idx; - sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer, - &se_idx); + sparx5_tc_get_layer_and_idx(port->sparx5, qopt->parent, port->portno, + &layer, &se_idx); switch (qopt->command) { case TC_TBF_REPLACE: diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c index 8d67d9f24c76..c3bbed140554 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c @@ -785,7 +785,9 @@ static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5, * allocate a stream gate that is always open. */ if (sg_idx < 0) { - sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN); + /* Always-open stream gate is always the last */ + sg_idx = sparx5_pool_idx_to_id(sparx5->data->consts->n_gates - + 1); sg->ipv = 0; /* Disabled */ sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT; sg->num_entries = 1; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c index ac001ae59a38..d42097aa60a0 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -16,8 +16,10 @@ static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid) /* Output mask to respective registers */ spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid)); - spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid)); - spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid)); + if (is_sparx5(sparx5)) { + spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid)); + spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid)); + } return 0; } @@ -141,15 +143,19 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable) void sparx5_pgid_clear(struct sparx5 *spx5, int pgid) { spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid)); - spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid)); - spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid)); + if (is_sparx5(spx5)) { + spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid)); + spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid)); + } } void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3]) { portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid)); - portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid)); - portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid)); + if (is_sparx5(spx5)) { + portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid)); + portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid)); + } } void sparx5_update_fwd(struct sparx5 *sparx5) @@ -162,26 +168,33 @@ void sparx5_update_fwd(struct sparx5 *sparx5) bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS); /* Update flood masks */ - for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) { + for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD); + port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) { spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port)); - spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port)); - spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port)); + if (is_sparx5(sparx5)) { + spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port)); + spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port)); + } } /* Update SRC masks */ - for (port = 0; port < SPX5_PORTS; port++) { + for (port = 0; port < sparx5->data->consts->n_ports; port++) { if (test_bit(port, sparx5->bridge_fwd_mask)) { /* Allow to send to all bridged but self */ bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS); clear_bit(port, workmask); bitmap_to_arr32(mask, workmask, SPX5_PORTS); spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port)); - spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port)); - spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port)); + if (is_sparx5(sparx5)) { + spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port)); + spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port)); + } } else { spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port)); - spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port)); - spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port)); + if (is_sparx5(sparx5)) { + spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port)); + spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port)); + } } } @@ -192,8 +205,10 @@ void sparx5_update_fwd(struct sparx5 *sparx5) /* Apply learning mask */ spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG); - spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1); - spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2); + if (is_sparx5(sparx5)) { + spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1); + spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2); + } } void sparx5_vlan_port_apply(struct sparx5 *sparx5, diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index ca4ed58f1206..e97af7ac2bb2 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright (c) 2021, Microsoft Corporation. */ +#include <linux/debugfs.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/utsname.h> @@ -8,6 +9,8 @@ #include <net/mana/mana.h> +struct dentry *mana_debugfs_root; + static u32 mana_gd_r32(struct gdma_context *g, u64 offset) { return readl(g->bar0_va + offset); @@ -1516,6 +1519,12 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) gc->bar0_va = bar0_va; gc->dev = &pdev->dev; + if (gc->is_pf) + gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root); + else + gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot), + mana_debugfs_root); + err = mana_gd_setup(pdev); if (err) goto unmap_bar; @@ -1529,6 +1538,13 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) cleanup_gd: mana_gd_cleanup(pdev); unmap_bar: + /* + * at this point we know that the other debugfs child dir/files + * are either not yet created or are already cleaned up. + * The pci debugfs folder clean-up now, will only be cleaning up + * adapter-MTU file and apc->mana_pci_debugfs folder. + */ + debugfs_remove_recursive(gc->mana_pci_debugfs); pci_iounmap(pdev, bar0_va); free_gc: pci_set_drvdata(pdev, NULL); @@ -1549,6 +1565,8 @@ static void mana_gd_remove(struct pci_dev *pdev) mana_gd_cleanup(pdev); + debugfs_remove_recursive(gc->mana_pci_debugfs); + pci_iounmap(pdev, gc->bar0_va); vfree(gc); @@ -1600,6 +1618,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev) mana_gd_cleanup(pdev); + debugfs_remove_recursive(gc->mana_pci_debugfs); + pci_disable_device(pdev); } @@ -1619,7 +1639,28 @@ static struct pci_driver mana_driver = { .shutdown = mana_gd_shutdown, }; -module_pci_driver(mana_driver); +static int __init mana_driver_init(void) +{ + int err; + + mana_debugfs_root = debugfs_create_dir("mana", NULL); + + err = pci_register_driver(&mana_driver); + if (err) + debugfs_remove(mana_debugfs_root); + + return err; +} + +static void __exit mana_driver_exit(void) +{ + debugfs_remove(mana_debugfs_root); + + pci_unregister_driver(&mana_driver); +} + +module_init(mana_driver_init); +module_exit(mana_driver_exit); MODULE_DEVICE_TABLE(pci, mana_id_table); diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index c47266d1c7c2..57ac732e7707 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -3,6 +3,7 @@ #include <uapi/linux/bpf.h> +#include <linux/debugfs.h> #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -30,6 +31,21 @@ static void mana_adev_idx_free(int idx) ida_free(&mana_adev_ida, idx); } +static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct gdma_queue *gdma_q = filp->private_data; + + return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr, + gdma_q->queue_size); +} + +static const struct file_operations mana_dbg_q_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mana_dbg_q_read, +}; + /* Microsoft Azure Network Adapter (MANA) functions */ static int mana_open(struct net_device *ndev) @@ -721,6 +737,13 @@ static const struct net_device_ops mana_devops = { static void mana_cleanup_port_context(struct mana_port_context *apc) { + /* + * at this point all dir/files under the vport directory + * are already cleaned up. + * We are sure the apc->mana_port_debugfs remove will not + * cause any freed memory access issues + */ + debugfs_remove(apc->mana_port_debugfs); kfree(apc->rxqs); apc->rxqs = NULL; } @@ -943,6 +966,8 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, else gc->adapter_mtu = ETH_FRAME_LEN; + debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu); + return 0; } @@ -1228,6 +1253,8 @@ static void mana_destroy_eq(struct mana_context *ac) if (!ac->eqs) return; + debugfs_remove_recursive(ac->mana_eqs_debugfs); + for (i = 0; i < gc->max_num_queues; i++) { eq = ac->eqs[i].eq; if (!eq) @@ -1240,6 +1267,18 @@ static void mana_destroy_eq(struct mana_context *ac) ac->eqs = NULL; } +static void mana_create_eq_debugfs(struct mana_context *ac, int i) +{ + struct mana_eq eq = ac->eqs[i]; + char eqnum[32]; + + sprintf(eqnum, "eq%d", i); + eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs); + debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head); + debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail); + debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops); +} + static int mana_create_eq(struct mana_context *ac) { struct gdma_dev *gd = ac->gdma_dev; @@ -1260,11 +1299,14 @@ static int mana_create_eq(struct mana_context *ac) spec.eq.context = ac->eqs; spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; + ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs); + for (i = 0; i < gc->max_num_queues; i++) { spec.eq.msix_index = (i + 1) % gc->num_msix_usable; err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); if (err) goto out; + mana_create_eq_debugfs(ac, i); } return 0; @@ -1871,6 +1913,8 @@ static void mana_destroy_txq(struct mana_port_context *apc) return; for (i = 0; i < apc->num_queues; i++) { + debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs); + napi = &apc->tx_qp[i].tx_cq.napi; if (apc->tx_qp[i].txq.napi_initialized) { napi_synchronize(napi); @@ -1889,6 +1933,31 @@ static void mana_destroy_txq(struct mana_port_context *apc) apc->tx_qp = NULL; } +static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx) +{ + struct mana_tx_qp *tx_qp = &apc->tx_qp[idx]; + char qnum[32]; + + sprintf(qnum, "TX-%d", idx); + tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); + debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs, + &tx_qp->txq.gdma_sq->head); + debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs, + &tx_qp->txq.gdma_sq->tail); + debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs, + &tx_qp->txq.pending_skbs.qlen); + debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs, + &tx_qp->tx_cq.gdma_cq->head); + debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs, + &tx_qp->tx_cq.gdma_cq->tail); + debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs, + &tx_qp->tx_cq.budget); + debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs, + tx_qp->txq.gdma_sq, &mana_dbg_q_fops); + debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs, + tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops); +} + static int mana_create_txq(struct mana_port_context *apc, struct net_device *net) { @@ -2000,6 +2069,8 @@ static int mana_create_txq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; + mana_create_txq_debugfs(apc, i); + netif_napi_add_tx(net, &cq->napi, mana_poll); napi_enable(&cq->napi); txq->napi_initialized = true; @@ -2027,6 +2098,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc, if (!rxq) return; + debugfs_remove_recursive(rxq->mana_rx_debugfs); + napi = &rxq->rx_cq.napi; if (napi_initialized) { @@ -2308,6 +2381,28 @@ out: return NULL; } +static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx) +{ + struct mana_rxq *rxq; + char qnum[32]; + + rxq = apc->rxqs[idx]; + + sprintf(qnum, "RX-%d", idx); + rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); + debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head); + debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail); + debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf); + debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs, + &rxq->rx_cq.gdma_cq->head); + debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs, + &rxq->rx_cq.gdma_cq->tail); + debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget); + debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops); + debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq, + &mana_dbg_q_fops); +} + static int mana_add_rx_queues(struct mana_port_context *apc, struct net_device *ndev) { @@ -2326,6 +2421,8 @@ static int mana_add_rx_queues(struct mana_port_context *apc, u64_stats_init(&rxq->stats.syncp); apc->rxqs[i] = rxq; + + mana_create_rxq_debugfs(apc, i); } apc->default_rxobj = apc->rxqs[0]->rxobj; @@ -2518,14 +2615,19 @@ void mana_query_gf_stats(struct mana_port_context *apc) static int mana_init_port(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); + struct gdma_dev *gd = apc->ac->gdma_dev; u32 max_txq, max_rxq, max_queues; int port_idx = apc->port_idx; + struct gdma_context *gc; + char vport[32]; int err; err = mana_init_port_context(apc); if (err) return err; + gc = gd->gdma_context; + err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, &apc->indir_table_sz); if (err) { @@ -2542,7 +2644,8 @@ static int mana_init_port(struct net_device *ndev) apc->num_queues = apc->max_queues; eth_hw_addr_set(ndev, apc->mac_addr); - + sprintf(vport, "vport%d", port_idx); + apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs); return 0; reset_apc: diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index dc3864377538..349f11bf8e64 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -443,6 +443,15 @@ out: return err; } +static int mana_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_OTHER; + + return 0; +} + const struct ethtool_ops mana_ethtool_ops = { .get_ethtool_stats = mana_get_ethtool_stats, .get_sset_count = mana_get_sset_count, @@ -456,4 +465,6 @@ const struct ethtool_ops mana_ethtool_ops = { .set_channels = mana_set_channels, .get_ringparam = mana_get_ringparam, .set_ringparam = mana_set_ringparam, + .get_link_ksettings = mana_get_link_ksettings, + .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 96dc69e7141f..8bd60168624a 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -576,7 +576,7 @@ MODULE_DEVICE_TABLE(of, moxart_mac_match); static struct platform_driver moxart_mac_driver = { .probe = moxart_mac_probe, - .remove_new = moxart_remove, + .remove = moxart_remove, .driver = { .name = "moxart-ethernet", .of_match_table = moxart_mac_match, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index c09dd2e3343c..055b55651a49 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -416,7 +416,7 @@ static void mscc_ocelot_remove(struct platform_device *pdev) static struct platform_driver mscc_ocelot_driver = { .probe = mscc_ocelot_probe, - .remove_new = mscc_ocelot_remove, + .remove = mscc_ocelot_remove, .driver = { .name = "ocelot-switch", .of_match_table = mscc_ocelot_match, diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index 2b6e097df28f..6d29d2e1fa7c 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -241,7 +241,7 @@ static void jazz_sonic_device_remove(struct platform_device *pdev) static struct platform_driver jazz_sonic_driver = { .probe = jazz_sonic_probe, - .remove_new = jazz_sonic_device_remove, + .remove = jazz_sonic_device_remove, .driver = { .name = jazz_sonic_string, }, diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 2fc63860dbdb..a740e24a9759 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -545,7 +545,7 @@ static void mac_sonic_platform_remove(struct platform_device *pdev) static struct platform_driver mac_sonic_platform_driver = { .probe = mac_sonic_platform_probe, - .remove_new = mac_sonic_platform_remove, + .remove = mac_sonic_platform_remove, .driver = { .name = "macsonic", }, diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 998586872599..bea969dfa536 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -2090,7 +2090,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev, */ /* Ramit : 1024 DMA is not a good idea, it ends up banging * some DELL and COMPAQ SMP systems - * Turn on ALP, only we are accpeting Jumbo Packets */ + * Turn on ALP, only we are accepting Jumbo Packets */ writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD | RXCFG_STRIPCRC //| RXCFG_ALP diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 8943e7244310..c01a4cb5dc0f 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -264,7 +264,7 @@ static void xtsonic_device_remove(struct platform_device *pdev) static struct platform_driver xtsonic_driver = { .probe = xtsonic_probe, - .remove_new = xtsonic_device_remove, + .remove = xtsonic_device_remove, .driver = { .name = xtsonic_string, }, diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index f235e76e4ce9..f8016dc25e0a 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8523,7 +8523,7 @@ static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. - * At this point, the card has exprienced a hard reset, + * At this point, the card has experienced a hard reset, * followed by fixups by BIOS, and has its config space * set up identically to what it was at cold boot. */ diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 2aa4ad9cf96e..230d5ff99dd7 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1415,7 +1415,7 @@ static void nixge_remove(struct platform_device *pdev) static struct platform_driver nixge_driver = { .probe = nixge_probe, - .remove_new = nixge_remove, + .remove = nixge_remove, .driver = { .name = "nixge", .of_match_table = nixge_dt_ids, diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index dd3e58a1319c..8b9a3e3bba30 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1503,7 +1503,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match); static struct platform_driver lpc_eth_driver = { .probe = lpc_eth_drv_probe, - .remove_new = lpc_eth_drv_remove, + .remove = lpc_eth_drv_remove, #ifdef CONFIG_PM .suspend = lpc_eth_drv_suspend, .resume = lpc_eth_drv_resume, diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index f67be4b8ad43..464a72afb758 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -2873,6 +2873,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, false, SPLIT_TYPE_NONE, 0); } + cond_resched(); } return offset; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 6263f847b6b9..9e5f0dbc8a07 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -596,6 +596,7 @@ static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) barrier(); while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { udelay(DMAE_MIN_WAIT_TIME); + cond_resched(); if (++wait_cnt > wait_cnt_limit) { DP_NOTICE(p_hwfn->cdev, "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 16e6bd466143..26a714bfad4e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -459,12 +459,11 @@ static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, static int _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_mcp_mb_params *p_mb_params, - u32 max_retries, u32 usecs) + struct qed_mcp_mb_params *p_mb_params) { - u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); struct qed_mcp_cmd_elem *p_cmd_elem; u16 seq_num; + u32 cnt = 0; int rc = 0; /* Wait until the mailbox is non-occupied */ @@ -488,12 +487,13 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) - msleep(msecs); + usleep_range(QED_MCP_RESP_ITER_US, + QED_MCP_RESP_ITER_US * 2); else - udelay(usecs); - } while (++cnt < max_retries); + udelay(QED_MCP_RESP_ITER_US); + } while (++cnt < QED_DRV_MB_MAX_RETRIES); - if (cnt >= max_retries) { + if (cnt >= QED_DRV_MB_MAX_RETRIES) { DP_NOTICE(p_hwfn, "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); @@ -520,9 +520,10 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, */ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) - msleep(msecs); + usleep_range(QED_MCP_RESP_ITER_US, + QED_MCP_RESP_ITER_US * 2); else - udelay(usecs); + udelay(QED_MCP_RESP_ITER_US); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); @@ -536,9 +537,9 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, goto err; spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); - } while (++cnt < max_retries); + } while (++cnt < QED_DRV_MB_MAX_RETRIES); - if (cnt >= max_retries) { + if (cnt >= QED_DRV_MB_MAX_RETRIES) { DP_NOTICE(p_hwfn, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); @@ -564,7 +565,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", p_mb_params->mcp_resp, p_mb_params->mcp_param, - (cnt * usecs) / 1000, (cnt * usecs) % 1000); + (cnt * QED_MCP_RESP_ITER_US) / 1000, + (cnt * QED_MCP_RESP_ITER_US) % 1000); /* Clear the sequence number from the MFW response */ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; @@ -581,8 +583,6 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_mcp_mb_params *p_mb_params) { size_t union_data_size = sizeof(union drv_union_data); - u32 max_retries = QED_DRV_MB_MAX_RETRIES; - u32 usecs = QED_MCP_RESP_ITER_US; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { @@ -606,13 +606,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EINVAL; } - if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { - max_retries = DIV_ROUND_UP(max_retries, 1000); - usecs *= 1000; - } - - return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, - usecs); + return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params); } static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn, @@ -3085,20 +3079,13 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, - (u32 *)(p_buf + offset), false); + (u32 *)(p_buf + offset), true); if (rc || (resp != FW_MSG_CODE_NVM_OK)) { DP_NOTICE(cdev, "MCP command rc = %d\n", rc); break; } - /* This can be a lengthy process, and it's possible scheduler - * isn't preemptible. Sleep a bit to prevent CPU hogging. - */ - if (bytes_left % 0x1000 < - (bytes_left - read_len) % 0x1000) - usleep_range(1000, 2000); - offset += read_len; bytes_left -= read_len; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index e4bc18009d08..a508ebc4b206 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -293,6 +293,11 @@ static struct sgmii_ops qdf2400_ops = { }; #endif +struct emac_match_data { + struct sgmii_ops **sgmii_ops; + struct device *target_device; +}; + static int emac_sgmii_acpi_match(struct device *dev, void *data) { #ifdef CONFIG_ACPI @@ -303,7 +308,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) {} }; const struct acpi_device_id *id = acpi_match_device(match_table, dev); - struct sgmii_ops **ops = data; + struct emac_match_data *match_data = data; if (id) { acpi_handle handle = ACPI_HANDLE(dev); @@ -324,10 +329,12 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) switch (hrv) { case 1: - *ops = &qdf2432_ops; + *match_data->sgmii_ops = &qdf2432_ops; + match_data->target_device = dev; return 1; case 2: - *ops = &qdf2400_ops; + *match_data->sgmii_ops = &qdf2400_ops; + match_data->target_device = dev; return 1; } } @@ -356,16 +363,21 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) int ret; if (has_acpi_companion(&pdev->dev)) { + struct emac_match_data match_data = { + .sgmii_ops = &phy->sgmii_ops, + .target_device = NULL, + }; struct device *dev; - dev = device_find_child(&pdev->dev, &phy->sgmii_ops, - emac_sgmii_acpi_match); + device_for_each_child(&pdev->dev, &match_data, emac_sgmii_acpi_match); + dev = match_data.target_device; if (!dev) { dev_warn(&pdev->dev, "cannot find internal phy node\n"); return 0; } + get_device(dev); sgmii_pdev = to_platform_device(dev); } else { const struct of_device_id *match; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 99d4647bf245..699a8afc214a 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -760,7 +760,7 @@ static void emac_shutdown(struct platform_device *pdev) static struct platform_driver emac_platform_driver = { .probe = emac_probe, - .remove_new = emac_remove, + .remove = emac_remove, .driver = { .name = "qcom-emac", .of_match_table = emac_dt_match, diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index ad06da0fdaa0..13deb3da4a64 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -98,8 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what) seq_printf(s, "IRQ : %d\n", qca->spi_dev->irq); - seq_printf(s, "INTR : %lx\n", - qca->intr); + seq_printf(s, "FLAGS : %lx\n", + qca->flags); seq_printf(s, "SPI max speed : %lu\n", (unsigned long)qca->spi_dev->max_speed_hz); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 8f7ce6b51a1c..ef9c02b000e4 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -35,7 +35,8 @@ #define MAX_DMA_BURST_LEN 5000 -#define SPI_INTR 0 +#define SPI_INTR 0 +#define SPI_RESET 1 /* Modules parameters */ #define QCASPI_CLK_SPEED_MIN 1000000 @@ -495,7 +496,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) if (qca->sync == QCASPI_SYNC_READY) qca->stats.bad_signature++; - qca->sync = QCASPI_SYNC_UNKNOWN; + set_bit(SPI_RESET, &qca->flags); netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n"); return; } else { @@ -505,12 +506,17 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) if (wrbuf_space != QCASPI_HW_BUF_LEN) { netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n"); qca->sync = QCASPI_SYNC_UNKNOWN; + qca->stats.buf_avail_err++; } else { netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n"); qca->sync = QCASPI_SYNC_READY; return; } } + } else { + /* Handle reset only on QCASPI_EVENT_UPDATE */ + if (test_and_clear_bit(SPI_RESET, &qca->flags)) + qca->sync = QCASPI_SYNC_UNKNOWN; } switch (qca->sync) { @@ -521,7 +527,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); if (signature != QCASPI_GOOD_SIGNATURE) { - qca->sync = QCASPI_SYNC_UNKNOWN; + set_bit(SPI_RESET, &qca->flags); qca->stats.bad_signature++; netdev_dbg(qca->net_dev, "sync: bad signature, restart\n"); /* don't reset right away */ @@ -552,7 +558,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) qca->reset_count); if (qca->reset_count >= QCASPI_RESET_TIMEOUT) { /* reset did not seem to take place, try again */ - qca->sync = QCASPI_SYNC_UNKNOWN; + set_bit(SPI_RESET, &qca->flags); qca->stats.reset_timeout++; netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n"); } @@ -581,14 +587,14 @@ qcaspi_spi_thread(void *data) continue; } - if (!test_bit(SPI_INTR, &qca->intr) && + if (!qca->flags && !qca->txr.skb[qca->txr.head]) schedule(); set_current_state(TASK_RUNNING); netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n", - qca->intr, + qca->flags, qca->txr.skb[qca->txr.head]); qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); @@ -602,7 +608,7 @@ qcaspi_spi_thread(void *data) msleep(QCASPI_QCA7K_REBOOT_TIME_MS); } - if (test_and_clear_bit(SPI_INTR, &qca->intr)) { + if (test_and_clear_bit(SPI_INTR, &qca->flags)) { start_spi_intr_handling(qca, &intr_cause); if (intr_cause & SPI_INT_CPU_ON) { @@ -627,7 +633,7 @@ qcaspi_spi_thread(void *data) /* restart sync */ netdev_dbg(qca->net_dev, "===> rdbuf error!\n"); qca->stats.read_buf_err++; - qca->sync = QCASPI_SYNC_UNKNOWN; + set_bit(SPI_RESET, &qca->flags); continue; } @@ -635,7 +641,7 @@ qcaspi_spi_thread(void *data) /* restart sync */ netdev_dbg(qca->net_dev, "===> wrbuf error!\n"); qca->stats.write_buf_err++; - qca->sync = QCASPI_SYNC_UNKNOWN; + set_bit(SPI_RESET, &qca->flags); continue; } @@ -664,7 +670,7 @@ qcaspi_intr_handler(int irq, void *data) { struct qcaspi *qca = data; - set_bit(SPI_INTR, &qca->intr); + set_bit(SPI_INTR, &qca->flags); if (qca->spi_thread) wake_up_process(qca->spi_thread); @@ -680,7 +686,7 @@ qcaspi_netdev_open(struct net_device *dev) if (!qca) return -EINVAL; - set_bit(SPI_INTR, &qca->intr); + set_bit(SPI_INTR, &qca->flags); qca->sync = QCASPI_SYNC_UNKNOWN; qcafrm_fsm_init_spi(&qca->frm_handle); @@ -799,7 +805,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue) jiffies, jiffies - dev_trans_start(dev)); qca->net_dev->stats.tx_errors++; /* Trigger tx queue flush and QCA7000 reset */ - qca->sync = QCASPI_SYNC_UNKNOWN; + set_bit(SPI_RESET, &qca->flags); if (qca->spi_thread) wake_up_process(qca->spi_thread); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index 8f4808695e82..7ba5c9e2f61c 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -81,7 +81,7 @@ struct qcaspi { struct qcafrm_handle frm_handle; struct sk_buff *rx_skb; - unsigned long intr; + unsigned long flags; u16 reset_count; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c index ed6e721b1555..bf055078a855 100644 --- a/drivers/net/ethernet/realtek/r8169_firmware.c +++ b/drivers/net/ethernet/realtek/r8169_firmware.c @@ -215,7 +215,7 @@ int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) { int rc; - rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + rc = firmware_request_nowarn(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); if (rc < 0) goto out; @@ -227,7 +227,7 @@ int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) return 0; out: - dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", - rtl_fw->fw_name, rc); + dev_warn(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); return rc; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 713a89bb21e9..79e7b223bd5b 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -16,6 +16,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/ethtool.h> +#include <linux/hwmon.h> #include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/in.h> @@ -617,7 +618,6 @@ struct rtl8169_tc_offsets { }; enum rtl_flag { - RTL_FLAG_TASK_ENABLED = 0, RTL_FLAG_TASK_RESET_PENDING, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, RTL_FLAG_TASK_TX_TIMEOUT, @@ -1346,40 +1346,19 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); } -static void rtl_dash_loop_wait(struct rtl8169_private *tp, - const struct rtl_cond *c, - unsigned long usecs, int n, bool high) -{ - if (!tp->dash_enabled) - return; - rtl_loop_wait(tp, c, usecs, n, high); -} - -static void rtl_dash_loop_wait_high(struct rtl8169_private *tp, - const struct rtl_cond *c, - unsigned long d, int n) -{ - rtl_dash_loop_wait(tp, c, d, n, true); -} - -static void rtl_dash_loop_wait_low(struct rtl8169_private *tp, - const struct rtl_cond *c, - unsigned long d, int n) -{ - rtl_dash_loop_wait(tp, c, d, n, false); -} - static void rtl8168dp_driver_start(struct rtl8169_private *tp) { r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); - rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); + if (tp->dash_enabled) + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); } static void rtl8168ep_driver_start(struct rtl8169_private *tp) { r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); - rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); + if (tp->dash_enabled) + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); } static void rtl8168_driver_start(struct rtl8169_private *tp) @@ -1393,7 +1372,8 @@ static void rtl8168_driver_start(struct rtl8169_private *tp) static void rtl8168dp_driver_stop(struct rtl8169_private *tp) { r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); - rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); + if (tp->dash_enabled) + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); } static void rtl8168ep_driver_stop(struct rtl8169_private *tp) @@ -1401,7 +1381,8 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp) rtl8168ep_stop_cmac(tp); r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); - rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); + if (tp->dash_enabled) + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); } static void rtl8168_driver_stop(struct rtl8169_private *tp) @@ -2160,6 +2141,19 @@ static void rtl8169_get_ringparam(struct net_device *dev, data->tx_pending = NUM_TX_DESC; } +static void rtl8169_get_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *pause_stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_is_8125(tp)) + return; + + rtl8169_update_counters(tp); + pause_stats->tx_pause_frames = le32_to_cpu(tp->counters->tx_pause_on); + pause_stats->rx_pause_frames = le32_to_cpu(tp->counters->rx_pause_on); +} + static void rtl8169_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *data) { @@ -2186,6 +2180,69 @@ static int rtl8169_set_pauseparam(struct net_device *dev, return 0; } +static void rtl8169_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_update_counters(tp); + + mac_stats->FramesTransmittedOK = + le64_to_cpu(tp->counters->tx_packets); + mac_stats->SingleCollisionFrames = + le32_to_cpu(tp->counters->tx_one_collision); + mac_stats->MultipleCollisionFrames = + le32_to_cpu(tp->counters->tx_multi_collision); + mac_stats->FramesReceivedOK = + le64_to_cpu(tp->counters->rx_packets); + mac_stats->AlignmentErrors = + le16_to_cpu(tp->counters->align_errors); + mac_stats->FramesLostDueToIntMACXmitError = + le64_to_cpu(tp->counters->tx_errors); + mac_stats->BroadcastFramesReceivedOK = + le64_to_cpu(tp->counters->rx_broadcast); + mac_stats->MulticastFramesReceivedOK = + le32_to_cpu(tp->counters->rx_multicast); + + if (!rtl_is_8125(tp)) + return; + + mac_stats->AlignmentErrors = + le32_to_cpu(tp->counters->align_errors32); + mac_stats->OctetsTransmittedOK = + le64_to_cpu(tp->counters->tx_octets); + mac_stats->LateCollisions = + le32_to_cpu(tp->counters->tx_late_collision); + mac_stats->FramesAbortedDueToXSColls = + le32_to_cpu(tp->counters->tx_aborted32); + mac_stats->OctetsReceivedOK = + le64_to_cpu(tp->counters->rx_octets); + mac_stats->FramesLostDueToIntMACRcvError = + le32_to_cpu(tp->counters->rx_mac_error); + mac_stats->MulticastFramesXmittedOK = + le64_to_cpu(tp->counters->tx_multicast64); + mac_stats->BroadcastFramesXmittedOK = + le64_to_cpu(tp->counters->tx_broadcast64); + mac_stats->MulticastFramesReceivedOK = + le64_to_cpu(tp->counters->rx_multicast64); + mac_stats->FrameTooLongErrors = + le32_to_cpu(tp->counters->rx_frame_too_long); +} + +static void rtl8169_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_is_8125(tp)) + return; + + rtl8169_update_counters(tp); + + ctrl_stats->UnsupportedOpcodesReceived = + le32_to_cpu(tp->counters->rx_unknown_opcode); +} + static const struct ethtool_ops rtl8169_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, @@ -2207,8 +2264,11 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_ringparam = rtl8169_get_ringparam, + .get_pause_stats = rtl8169_get_pause_stats, .get_pauseparam = rtl8169_get_pauseparam, .set_pauseparam = rtl8169_set_pauseparam, + .get_eth_mac_stats = rtl8169_get_eth_mac_stats, + .get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats, }; static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) @@ -2423,11 +2483,9 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { - if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) - return; - set_bit(flag, tp->wk.flags); - schedule_work(&tp->wk.work); + if (!schedule_work(&tp->wk.work)) + clear_bit(flag, tp->wk.flags); } static void rtl8169_init_phy(struct rtl8169_private *tp) @@ -3893,6 +3951,9 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp) break; } + /* enable extended tally counter */ + r8168_mac_ocp_modify(tp, 0xea84, 0, BIT(1) | BIT(0)); + rtl_hw_config(tp); } @@ -4233,8 +4294,8 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, { unsigned int padto = 0, len = skb->len; - if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && - rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + if (len < 128 + RTL_MIN_PATCH_LEN && rtl_skb_is_udp(skb) && + skb_transport_header_was_set(skb)) { unsigned int trans_data_len = skb_tail_pointer(skb) - skb_transport_header(skb); @@ -4258,9 +4319,15 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, struct sk_buff *skb) { - unsigned int padto; + unsigned int padto = 0; - padto = rtl8125_quirk_udp_padto(tp, skb); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + padto = rtl8125_quirk_udp_padto(tp, skb); + break; + default: + break; + } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: @@ -4712,11 +4779,6 @@ static void rtl_task(struct work_struct *work) container_of(work, struct rtl8169_private, wk.work); int ret; - rtnl_lock(); - - if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) - goto out_unlock; - if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) { /* if chip isn't accessible, reset bus to revive it */ if (RTL_R32(tp, TxConfig) == ~0) { @@ -4724,7 +4786,7 @@ static void rtl_task(struct work_struct *work) if (ret < 0) { netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n"); netif_device_detach(tp->dev); - goto out_unlock; + return; } } @@ -4743,8 +4805,6 @@ reset: } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) { rtl_reset_work(tp); } -out_unlock: - rtnl_unlock(); } static int rtl8169_poll(struct napi_struct *napi, int budget) @@ -4771,11 +4831,7 @@ static void r8169_phylink_handler(struct net_device *ndev) if (netif_carrier_ok(ndev)) { rtl_link_chg_patch(tp); pm_request_resume(d); - netif_wake_queue(tp->dev); } else { - /* In few cases rx is broken after link-down otherwise */ - if (rtl_is_8125(tp)) - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE); pm_runtime_idle(d); } @@ -4806,6 +4862,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp) static void rtl8169_down(struct rtl8169_private *tp) { + disable_work_sync(&tp->wk.work); /* Clear all task flags */ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); @@ -4834,7 +4891,7 @@ static void rtl8169_up(struct rtl8169_private *tp) phy_resume(tp->phydev); rtl8169_init_phy(tp); napi_enable(&tp->napi); - set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + enable_work(&tp->wk.work); rtl_reset_work(tp); phy_start(tp->phydev); @@ -4851,8 +4908,6 @@ static int rtl8169_close(struct net_device *dev) rtl8169_down(tp); rtl8169_rx_clear(tp); - cancel_work(&tp->wk.work); - free_irq(tp->irq, tp); phy_disconnect(tp->phydev); @@ -5085,7 +5140,7 @@ static void rtl_remove_one(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); - cancel_work_sync(&tp->wk.work); + disable_work_sync(&tp->wk.work); if (IS_ENABLED(CONFIG_R8169_LEDS)) r8169_remove_leds(tp->leds); @@ -5365,6 +5420,43 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp) return false; } +static umode_t r8169_hwmon_is_visible(const void *drvdata, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + return 0444; +} + +static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct rtl8169_private *tp = dev_get_drvdata(dev); + int val_raw; + + val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff; + if (val_raw >= 512) + val_raw -= 1024; + + *val = 1000 * val_raw / 2; + + return 0; +} + +static const struct hwmon_ops r8169_hwmon_ops = { + .is_visible = r8169_hwmon_is_visible, + .read = r8169_hwmon_read, +}; + +static const struct hwmon_channel_info * const r8169_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), + NULL +}; + +static const struct hwmon_chip_info r8169_hwmon_chip_info = { + .ops = &r8169_hwmon_ops, + .info = r8169_hwmon_info, +}; + static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct rtl8169_private *tp; @@ -5462,6 +5554,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->irq = pci_irq_vector(pdev, 0); INIT_WORK(&tp->wk.work, rtl_task); + disable_work(&tp->wk.work); rtl_init_mac_address(tp); @@ -5487,11 +5580,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= dev->hw_features; - /* There has been a number of reports that using SG/TSO results in - * tx timeouts. However for a lot of people SG/TSO works fine. - * Therefore disable both features by default, but allow users to - * enable them. Use at own risk! - */ if (rtl_chip_supports_csum_v2(tp)) { dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2); @@ -5502,6 +5590,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1); } + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * It's not fully clear which chip versions are affected. Vendor + * drivers enable SG/TSO for certain chip versions per default, + * let's mimic this here. On other chip versions users can + * use ethtool to enable SG/TSO, use at own risk! + */ + if (tp->mac_version >= RTL_GIGA_MAC_VER_46 && + tp->mac_version != RTL_GIGA_MAC_VER_61) + dev->features |= dev->hw_features; + dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; @@ -5539,6 +5638,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; + /* The temperature sensor is available from RTl8125B */ + if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63) + /* ignore errors */ + devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp, + &r8169_hwmon_chip_info, + NULL); rc = register_netdev(dev); if (rc) return rc; diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index cf29b1208482..d504abba7565 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -99,7 +99,6 @@ static void rtl8125a_config_eee_phy(struct phy_device *phydev) static void rtl8125b_config_eee_phy(struct phy_device *phydev) { - phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index a7de5cf6b317..7b48060c250b 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -998,6 +998,8 @@ enum CSR1_BIT { CSR1_TDHD = 0x08000000, }; +#define CSR1_CSUM_ENABLE (CSR1_TTCP4 | CSR1_TUDP4 | CSR1_TTCP6 | CSR1_TUDP6) + enum CSR2_BIT { CSR2_RIP4 = 0x00000001, CSR2_RTCP4 = 0x00000010, @@ -1012,6 +1014,9 @@ enum CSR2_BIT { CSR2_RDHD = 0x08000000, }; +#define CSR2_CSUM_ENABLE (CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4 | \ + CSR2_RTCP6 | CSR2_RUDP6 | CSR2_RICMP6) + #define DBAT_ENTRY_NUM 22 #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 @@ -1050,6 +1055,7 @@ struct ravb_hw_info { size_t gstrings_size; netdev_features_t net_hw_features; netdev_features_t net_features; + netdev_features_t vlan_features; int stats_len; u32 tccr_mask; u32 tx_max_frame_size; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 907af4651c55..ac0f093f647a 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -504,11 +504,10 @@ static void ravb_csum_init_gbeth(struct net_device *ndev) ndev->features &= ~NETIF_F_RXCSUM; } else { if (tx_enable) - ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1); + ravb_write(ndev, CSR1_CSUM_ENABLE, CSR1); if (rx_enable) - ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4, - CSR2); + ravb_write(ndev, CSR2_CSUM_ENABLE, CSR2); } done: @@ -750,38 +749,34 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) static void ravb_rx_csum_gbeth(struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); - __wsum csum_ip_hdr, csum_proto; - skb_frag_t *last_frag; - u8 *hw_csum; + size_t csum_len; + u16 *hw_csum; - /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4 - * bytes appended to packet data. First 2 bytes is ip header checksum - * and last 2 bytes is protocol checksum. + /* The hardware checksum status is contained in 4 bytes appended to + * packet data. + * + * For ipv4, the first 2 bytes are the ip header checksum status. We can + * ignore this as it will always be re-checked in inet_gro_receive(). + * + * The last 2 bytes are the protocol checksum status which will be zero + * if the checksum has been validated. */ - if (unlikely(skb->len < sizeof(__sum16) * 2)) + csum_len = sizeof(*hw_csum) * 2; + if (unlikely(skb->len < csum_len)) return; if (skb_is_nonlinear(skb)) { - last_frag = &shinfo->frags[shinfo->nr_frags - 1]; - hw_csum = skb_frag_address(last_frag) + - skb_frag_size(last_frag); + skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1]; + + hw_csum = (u16 *)(skb_frag_address(last_frag) + + skb_frag_size(last_frag)); + skb_frag_size_sub(last_frag, csum_len); } else { - hw_csum = skb_tail_pointer(skb); + hw_csum = (u16 *)skb_tail_pointer(skb); + skb_trim(skb, skb->len - csum_len); } - hw_csum -= sizeof(__sum16); - csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); - - hw_csum -= sizeof(__sum16); - csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); - - if (skb_is_nonlinear(skb)) - skb_frag_size_sub(last_frag, 2 * sizeof(__sum16)); - else - skb_trim(skb, skb->len - 2 * sizeof(__sum16)); - - /* TODO: IPV6 Rx checksum */ - if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto) + if (!get_unaligned(--hw_csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } @@ -2067,32 +2062,44 @@ out_unlock: static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb) { - struct iphdr *ip = ip_hdr(skb); + u16 net_protocol = ntohs(skb->protocol); + u8 inner_protocol; - /* TODO: Need to add support for VLAN tag 802.1Q */ - if (skb_vlan_tag_present(skb)) - return false; + /* GbEth IP can calculate the checksum if: + * - there are zero or one VLAN headers with TPID=0x8100 + * - the network protocol is IPv4 or IPv6 + * - the transport protocol is TCP, UDP or ICMP + * - the packet is not fragmented + */ - /* TODO: Need to add hardware checksum for IPv6 */ - if (skb->protocol != htons(ETH_P_IP)) - return false; + if (net_protocol == ETH_P_8021Q) { + struct vlan_hdr vhdr, *vh; - switch (ip->protocol) { - case IPPROTO_TCP: - break; - case IPPROTO_UDP: - /* If the checksum value in the UDP header field is 0, TOE does - * not calculate checksum for UDP part of this frame as it is - * optional function as per standards. - */ - if (udp_hdr(skb)->check == 0) + vh = skb_header_pointer(skb, ETH_HLEN, sizeof(vhdr), &vhdr); + if (!vh) return false; + + net_protocol = ntohs(vh->h_vlan_encapsulated_proto); + } + + switch (net_protocol) { + case ETH_P_IP: + inner_protocol = ip_hdr(skb)->protocol; + break; + case ETH_P_IPV6: + inner_protocol = ipv6_hdr(skb)->nexthdr; break; default: return false; } - return true; + switch (inner_protocol) { + case IPPROTO_TCP: + case IPPROTO_UDP: + return true; + default: + return false; + } } /* Packet transmit function for Ethernet AVB */ @@ -2530,7 +2537,7 @@ static int ravb_set_features_gbeth(struct net_device *ndev, spin_lock_irqsave(&priv->lock, flags); if (changed & NETIF_F_RXCSUM) { if (features & NETIF_F_RXCSUM) - val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4; + val = CSR2_CSUM_ENABLE; else val = 0; @@ -2541,7 +2548,7 @@ static int ravb_set_features_gbeth(struct net_device *ndev, if (changed & NETIF_F_HW_CSUM) { if (features & NETIF_F_HW_CSUM) - val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4; + val = CSR1_CSUM_ENABLE; else val = 0; @@ -2778,6 +2785,7 @@ static const struct ravb_hw_info gbeth_hw_info = { .gstrings_size = sizeof(ravb_gstrings_stats_gbeth), .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, + .vlan_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), .tccr_mask = TCCR_TSRQ0, .tx_max_frame_size = 1522, @@ -2920,6 +2928,7 @@ static int ravb_probe(struct platform_device *pdev) ndev->features = info->net_features; ndev->hw_features = info->net_hw_features; + ndev->vlan_features = info->vlan_features; error = reset_control_deassert(rstc); if (error) @@ -3290,7 +3299,7 @@ static const struct dev_pm_ops ravb_dev_pm_ops = { static struct platform_driver ravb_driver = { .probe = ravb_probe, - .remove_new = ravb_remove, + .remove = ravb_remove, .driver = { .name = "ravb", .pm = pm_ptr(&ravb_dev_pm_ops), diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index b80aa27a7214..8d18dae4d8fb 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -2188,7 +2188,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, static struct platform_driver renesas_eth_sw_driver_platform = { .probe = renesas_eth_sw_probe, - .remove_new = renesas_eth_sw_remove, + .remove = renesas_eth_sw_remove, .driver = { .name = "renesas_eth_sw", .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7a25903e35c3..8887b8921009 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3560,7 +3560,7 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table); static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, - .remove_new = sh_eth_drv_remove, + .remove = sh_eth_drv_remove, .id_table = sh_eth_id_table, .driver = { .name = CARDNAME, diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index e6e130dbe1de..2eccc7617507 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, sxgbe_dt_ids); static struct platform_driver sxgbe_platform_driver = { .probe = sxgbe_platform_probe, - .remove_new = sxgbe_platform_remove, + .remove = sxgbe_platform_remove, .driver = { .name = SXGBE_RESOURCE_NAME, .pm = &sxgbe_platform_pm_ops, diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 76356dadf233..7967a0ee320b 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -832,7 +832,7 @@ static void sgiseeq_remove(struct platform_device *pdev) static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, - .remove_new = sgiseeq_remove, + .remove = sgiseeq_remove, .driver = { .name = "sgiseeq", } diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c index 83d9db71d7d7..44dc75feb162 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.c +++ b/drivers/net/ethernet/sfc/ef100_rx.c @@ -134,6 +134,9 @@ void __ef100_rx_packet(struct efx_channel *channel) goto free_rx_buffer; } + ++rx_queue->rx_packets; + rx_queue->rx_bytes += rx_buf->len; + efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum); goto out; @@ -149,8 +152,6 @@ static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index) struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_nic *efx = rx_queue->efx; - ++rx_queue->rx_packets; - netif_vdbg(efx, rx_status, efx->net_dev, "RX queue %d received id %x\n", efx_rx_queue_index(rx_queue), index); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 36b3b57e2055..90bb7db15519 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -22,6 +22,7 @@ #include "net_driver.h" #include <net/gre.h> #include <net/udp_tunnel.h> +#include <net/netdev_queues.h> #include "efx.h" #include "efx_common.h" #include "efx_channels.h" @@ -626,6 +627,113 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_bpf = efx_xdp }; +static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx, + struct netdev_queue_stats_rx *stats) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + + channel = efx_get_channel(efx, idx); + rx_queue = efx_channel_get_rx_queue(channel); + /* Count only packets since last time datapath was started */ + stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets; + stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes; + stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) - + channel->old_n_rx_hw_drops; + stats->hw_drop_overruns = channel->n_rx_nodesc_trunc - + channel->old_n_rx_hw_drop_overruns; +} + +static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx, + struct netdev_queue_stats_tx *stats) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + + channel = efx_get_tx_channel(efx, idx); + stats->packets = 0; + stats->bytes = 0; + stats->hw_gso_packets = 0; + stats->hw_gso_wire_packets = 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + stats->packets += tx_queue->complete_packets - + tx_queue->old_complete_packets; + stats->bytes += tx_queue->complete_bytes - + tx_queue->old_complete_bytes; + /* Note that, unlike stats->packets and stats->bytes, + * these count TXes enqueued, rather than completed, + * which may not be what users expect. + */ + stats->hw_gso_packets += tx_queue->tso_bursts - + tx_queue->old_tso_bursts; + stats->hw_gso_wire_packets += tx_queue->tso_packets - + tx_queue->old_tso_packets; + } +} + +static void efx_get_base_stats(struct net_device *net_dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + + rx->packets = 0; + rx->bytes = 0; + rx->hw_drops = 0; + rx->hw_drop_overruns = 0; + tx->packets = 0; + tx->bytes = 0; + tx->hw_gso_packets = 0; + tx->hw_gso_wire_packets = 0; + + /* Count all packets on non-core queues, and packets before last + * datapath start on core queues. + */ + efx_for_each_channel(channel, efx) { + rx_queue = efx_channel_get_rx_queue(channel); + if (channel->channel >= net_dev->real_num_rx_queues) { + rx->packets += rx_queue->rx_packets; + rx->bytes += rx_queue->rx_bytes; + rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel); + rx->hw_drop_overruns += channel->n_rx_nodesc_trunc; + } else { + rx->packets += rx_queue->old_rx_packets; + rx->bytes += rx_queue->old_rx_bytes; + rx->hw_drops += channel->old_n_rx_hw_drops; + rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns; + } + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (channel->channel < efx->tx_channel_offset || + channel->channel >= efx->tx_channel_offset + + net_dev->real_num_tx_queues) { + tx->packets += tx_queue->complete_packets; + tx->bytes += tx_queue->complete_bytes; + tx->hw_gso_packets += tx_queue->tso_bursts; + tx->hw_gso_wire_packets += tx_queue->tso_packets; + } else { + tx->packets += tx_queue->old_complete_packets; + tx->bytes += tx_queue->old_complete_bytes; + tx->hw_gso_packets += tx_queue->old_tso_bursts; + tx->hw_gso_wire_packets += tx_queue->old_tso_packets; + } + /* Include XDP TX in device-wide stats */ + tx->packets += tx_queue->complete_xdp_packets; + tx->bytes += tx_queue->complete_xdp_bytes; + } + } +} + +static const struct netdev_stat_ops efx_stat_ops = { + .get_queue_stats_rx = efx_get_queue_stats_rx, + .get_queue_stats_tx = efx_get_queue_stats_tx, + .get_base_stats = efx_get_base_stats, +}; + static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) { struct bpf_prog *old_prog; @@ -716,6 +824,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; + net_dev->stat_ops = &efx_stat_ops; if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) net_dev->priv_flags |= IFF_UNICAST_FLT; net_dev->ethtool_ops = &efx_ethtool_ops; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index f1723a6fb082..06b4f52713ef 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -1100,6 +1100,10 @@ void efx_start_channels(struct efx_nic *efx) atomic_inc(&efx->active_queues); } + /* reset per-queue stats */ + channel->old_n_rx_hw_drops = efx_get_queue_stat_rx_hw_drops(channel); + channel->old_n_rx_hw_drop_overruns = channel->n_rx_nodesc_trunc; + efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); atomic_inc(&efx->active_queues); @@ -1209,6 +1213,8 @@ static int efx_process_channel(struct efx_channel *channel, int budget) tx_queue->pkts_compl, tx_queue->bytes_compl); } + tx_queue->complete_packets += tx_queue->pkts_compl; + tx_queue->complete_bytes += tx_queue->bytes_compl; } /* Receive any packets we queued up */ diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h index 46b702648721..547cf94014a3 100644 --- a/drivers/net/ethernet/sfc/efx_channels.h +++ b/drivers/net/ethernet/sfc/efx_channels.h @@ -43,6 +43,13 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel); void efx_start_channels(struct efx_nic *efx); void efx_stop_channels(struct efx_nic *efx); +static inline u64 efx_get_queue_stat_rx_hw_drops(struct efx_channel *channel) +{ + return channel->n_rx_eth_crc_err + channel->n_rx_frm_trunc + + channel->n_rx_overlength + channel->n_rx_nodesc_trunc + + channel->n_rx_mport_bad; +} + void efx_init_napi_channel(struct efx_channel *channel); void efx_init_napi(struct efx_nic *efx); void efx_fini_napi_channel(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index 6ded44b86052..ae32e08540fa 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -75,7 +75,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets), EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets), EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), - EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err), @@ -83,8 +82,8 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err), - EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_overlength), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops), diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b85c51cbe7f9..b54662d32f55 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -193,6 +193,12 @@ struct efx_tx_buffer { * @initialised: Has hardware queue been initialised? * @timestamping: Is timestamping enabled for this channel? * @xdp_tx: Is this an XDP tx queue? + * @old_complete_packets: Value of @complete_packets as of last + * efx_init_tx_queue() + * @old_complete_bytes: Value of @complete_bytes as of last + * efx_init_tx_queue() + * @old_tso_bursts: Value of @tso_bursts as of last efx_init_tx_queue() + * @old_tso_packets: Value of @tso_packets as of last efx_init_tx_queue() * @read_count: Current read pointer. * This is the number of buffers that have been removed from both rings. * @old_write_count: The value of @write_count when last checked. @@ -202,6 +208,20 @@ struct efx_tx_buffer { * avoid cache-line ping-pong between the xmit path and the * completion path. * @merge_events: Number of TX merged completion events + * @bytes_compl: Number of bytes completed during this NAPI poll + * (efx_process_channel()). For BQL. + * @pkts_compl: Number of packets completed during this NAPI poll. + * @complete_packets: Number of packets completed since this struct was + * created. Only counts SKB packets, not XDP TX (it accumulates + * the same values that are reported to BQL). + * @complete_bytes: Number of bytes completed since this struct was + * created. For TSO, counts the superframe size, not the sizes of + * generated frames on the wire (i.e. the headers are only counted + * once) + * @complete_xdp_packets: Number of XDP TX packets completed since this + * struct was created. + * @complete_xdp_bytes: Number of XDP TX bytes completed since this + * struct was created. * @completed_timestamp_major: Top part of the most recent tx timestamp. * @completed_timestamp_minor: Low part of the most recent tx timestamp. * @insert_count: Current insert pointer @@ -232,6 +252,7 @@ struct efx_tx_buffer { * @xmit_pending: Are any packets waiting to be pushed to the NIC * @cb_packets: Number of times the TX copybreak feature has been used * @notify_count: Count of notified descriptors to the NIC + * @tx_packets: Number of packets sent since this struct was created * @empty_read_count: If the completion path has seen the queue as empty * and the transmission path has not yet checked this, the value of * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. @@ -255,6 +276,10 @@ struct efx_tx_queue { bool initialised; bool timestamping; bool xdp_tx; + unsigned long old_complete_packets; + unsigned long old_complete_bytes; + unsigned int old_tso_bursts; + unsigned int old_tso_packets; /* Members used mainly on the completion path */ unsigned int read_count ____cacheline_aligned_in_smp; @@ -262,6 +287,10 @@ struct efx_tx_queue { unsigned int merge_events; unsigned int bytes_compl; unsigned int pkts_compl; + unsigned long complete_packets; + unsigned long complete_bytes; + unsigned long complete_xdp_packets; + unsigned long complete_xdp_bytes; u32 completed_timestamp_major; u32 completed_timestamp_minor; @@ -370,6 +399,10 @@ struct efx_rx_page_state { * @recycle_count: RX buffer recycle counter. * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). * @grant_work: workitem used to grant credits to the MAE if @grant_credits + * @rx_packets: Number of packets received since this struct was created + * @rx_bytes: Number of bytes received since this struct was created + * @old_rx_packets: Value of @rx_packets as of last efx_init_rx_queue() + * @old_rx_bytes: Value of @rx_bytes as of last efx_init_rx_queue() * @xdp_rxq_info: XDP specific RX queue information. * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?. */ @@ -406,6 +439,9 @@ struct efx_rx_queue { struct work_struct grant_work; /* Statistics to supplement MAC stats */ unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long old_rx_packets; + unsigned long old_rx_bytes; struct xdp_rxq_info xdp_rxq_info; bool xdp_rxq_info_valid; }; @@ -451,10 +487,8 @@ enum efx_sync_events_state { * @filter_work: Work item for efx_filter_rfs_expire() * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * indexed by filter ID - * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors - * @n_rx_mcast_mismatch: Count of unmatched multicast frames * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors * @n_rx_overlength: Count of RX_OVERLENGTH errors * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun @@ -468,6 +502,10 @@ enum efx_sync_events_state { * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was * not recognised + * @old_n_rx_hw_drops: Count of all RX packets dropped for any reason as of last + * efx_start_channels() + * @old_n_rx_hw_drop_overruns: Value of @n_rx_nodesc_trunc as of last + * efx_start_channels() * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by * __efx_rx_packet(), or zero if there is none * @rx_pkt_index: Ring index of first buffer for next packet to be delivered @@ -511,7 +549,6 @@ struct efx_channel { u32 *rps_flow_id; #endif - unsigned int n_rx_tobe_disc; unsigned int n_rx_ip_hdr_chksum_err; unsigned int n_rx_tcp_udp_chksum_err; unsigned int n_rx_outer_ip_hdr_chksum_err; @@ -519,7 +556,6 @@ struct efx_channel { unsigned int n_rx_inner_ip_hdr_chksum_err; unsigned int n_rx_inner_tcp_udp_chksum_err; unsigned int n_rx_eth_crc_err; - unsigned int n_rx_mcast_mismatch; unsigned int n_rx_frm_trunc; unsigned int n_rx_overlength; unsigned int n_skbuff_leaks; @@ -532,6 +568,9 @@ struct efx_channel { unsigned int n_rx_xdp_redirect; unsigned int n_rx_mport_bad; + unsigned int old_n_rx_hw_drops; + unsigned int old_n_rx_hw_drop_overruns; + unsigned int rx_pkt_n_frags; unsigned int rx_pkt_index; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index f77a2d3ef37e..ffca82207e47 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -125,8 +125,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_rx_buffer *rx_buf; - rx_queue->rx_packets++; - rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->flags |= flags; @@ -394,6 +392,9 @@ void __efx_rx_packet(struct efx_channel *channel) goto out; } + rx_queue->rx_packets++; + rx_queue->rx_bytes += rx_buf->len; + if (!efx_do_xdp(efx, channel, rx_buf, &eh)) goto out; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 0b7dc75c40f9..ab358fe13e1d 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -241,6 +241,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->page_recycle_failed = 0; rx_queue->page_recycle_full = 0; + rx_queue->old_rx_packets = rx_queue->rx_packets; + rx_queue->old_rx_bytes = rx_queue->rx_bytes; + /* Initialise limit fields */ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; max_trigger = diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index fe2d476028e7..822ec6564b2d 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -553,6 +553,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, void efx_xmit_done_single(struct efx_tx_queue *tx_queue) { + unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0; unsigned int pkts_compl = 0, bytes_compl = 0; unsigned int efv_pkts_compl = 0; unsigned int read_ptr; @@ -577,7 +578,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue) if (buffer->flags & EFX_TX_BUF_SKB) finished = true; efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, - &efv_pkts_compl); + &efv_pkts_compl, &xdp_pkts_compl, + &xdp_bytes_compl); ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; @@ -585,6 +587,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue) tx_queue->pkts_compl += pkts_compl; tx_queue->bytes_compl += bytes_compl; + tx_queue->complete_xdp_packets += xdp_pkts_compl; + tx_queue->complete_xdp_bytes += xdp_bytes_compl; EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1); diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 2adb132b2f7e..a22a0d634ffc 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -86,6 +86,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->completed_timestamp_major = 0; tx_queue->completed_timestamp_minor = 0; + tx_queue->old_complete_packets = tx_queue->complete_packets; + tx_queue->old_complete_bytes = tx_queue->complete_bytes; + tx_queue->old_tso_bursts = tx_queue->tso_bursts; + tx_queue->old_tso_packets = tx_queue->tso_packets; + tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); tx_queue->tso_version = 0; @@ -109,12 +114,14 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) /* Free any buffers left in the ring */ while (tx_queue->read_count != tx_queue->write_count) { + unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0; unsigned int pkts_compl = 0, bytes_compl = 0; unsigned int efv_pkts_compl = 0; buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, - &efv_pkts_compl); + &efv_pkts_compl, &xdp_pkts_compl, + &xdp_bytes_compl); ++tx_queue->read_count; } @@ -150,7 +157,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, unsigned int *bytes_compl, - unsigned int *efv_pkts_compl) + unsigned int *efv_pkts_compl, + unsigned int *xdp_pkts, + unsigned int *xdp_bytes) { if (buffer->unmap_len) { struct device *dma_dev = &tx_queue->efx->pci_dev->dev; @@ -195,6 +204,10 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, tx_queue->queue, tx_queue->read_count); } else if (buffer->flags & EFX_TX_BUF_XDP) { xdp_return_frame_rx_napi(buffer->xdpf); + if (xdp_pkts) + (*xdp_pkts)++; + if (xdp_bytes) + (*xdp_bytes) += buffer->xdpf->len; } buffer->len = 0; @@ -210,7 +223,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index, unsigned int *pkts_compl, unsigned int *bytes_compl, - unsigned int *efv_pkts_compl) + unsigned int *efv_pkts_compl, + unsigned int *xdp_pkts, + unsigned int *xdp_bytes) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; @@ -230,7 +245,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, } efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl, - efv_pkts_compl); + efv_pkts_compl, xdp_pkts, xdp_bytes); ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; @@ -253,15 +268,18 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; + unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0; unsigned int efv_pkts_compl = 0; struct efx_nic *efx = tx_queue->efx; EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl, - &efv_pkts_compl); + &efv_pkts_compl, &xdp_pkts_compl, &xdp_bytes_compl); tx_queue->pkts_compl += pkts_compl; tx_queue->bytes_compl += bytes_compl; + tx_queue->complete_xdp_packets += xdp_pkts_compl; + tx_queue->complete_xdp_bytes += xdp_bytes_compl; if (pkts_compl + efv_pkts_compl > 1) ++tx_queue->merge_events; @@ -290,6 +308,8 @@ int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, unsigned int insert_count) { + unsigned int xdp_bytes_compl = 0; + unsigned int xdp_pkts_compl = 0; unsigned int efv_pkts_compl = 0; struct efx_tx_buffer *buffer; unsigned int bytes_compl = 0; @@ -300,7 +320,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, - &efv_pkts_compl); + &efv_pkts_compl, &xdp_pkts_compl, + &xdp_bytes_compl); } } diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h index 1e9f42938aac..039eefafba23 100644 --- a/drivers/net/ethernet/sfc/tx_common.h +++ b/drivers/net/ethernet/sfc/tx_common.h @@ -20,7 +20,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, unsigned int *bytes_compl, - unsigned int *efv_pkts_compl); + unsigned int *efv_pkts_compl, + unsigned int *xdp_pkts, + unsigned int *xdp_bytes); static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) { diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 98d0b561a057..4535579018c9 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1273,7 +1273,7 @@ static void ioc3_set_multicast_list(struct net_device *dev) static struct platform_driver ioc3eth_driver = { .probe = ioc3eth_probe, - .remove_new = ioc3eth_remove, + .remove = ioc3eth_remove, .driver = { .name = "ioc3-eth", } diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 18b6f93d875e..f7c3a5a766b7 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -864,7 +864,7 @@ static void meth_remove(struct platform_device *pdev) static struct platform_driver meth_driver = { .probe = meth_probe, - .remove_new = meth_remove, + .remove = meth_remove, .driver = { .name = "meth", } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index a5e23e2da90f..9d1a83a5fa7e 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2475,7 +2475,7 @@ static const struct dev_pm_ops smc_drv_pm_ops = { static struct platform_driver smc_driver = { .probe = smc_drv_probe, - .remove_new = smc_drv_remove, + .remove = smc_drv_remove, .driver = { .name = CARDNAME, .pm = &smc_drv_pm_ops, diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 74f1ccc96459..f539813878f5 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2667,7 +2667,7 @@ MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match); static struct platform_driver smsc911x_driver = { .probe = smsc911x_drv_probe, - .remove_new = smsc911x_drv_remove, + .remove = smsc911x_drv_remove, .driver = { .name = SMSC_CHIPNAME, .pm = SMSC911X_PM_OPS, diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 5ab8b81b84e6..dc99821c6226 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -2211,7 +2211,7 @@ MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); static struct platform_driver netsec_driver = { .probe = netsec_probe, - .remove_new = netsec_remove, + .remove = netsec_remove, .driver = { .name = "netsec", .pm = &netsec_pm_ops, diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index eed24e67c5a6..66b3549636f8 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1974,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, of_ave_match); static struct platform_driver ave_driver = { .probe = ave_probe, - .remove_new = ave_remove, + .remove = ave_remove, .driver = { .name = "ave", .pm = AVE_PM_OPS, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 643ee6d8d4dd..ef99ef3f1ab4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -135,7 +135,7 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match); static struct platform_driver anarion_dwmac_driver = { .probe = anarion_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "anarion-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index ec924c6c76c6..83290e707df5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -479,7 +479,7 @@ MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); static struct platform_driver dwc_eth_dwmac_driver = { .probe = dwc_eth_dwmac_probe, - .remove_new = dwc_eth_dwmac_remove, + .remove = dwc_eth_dwmac_remove, .driver = { .name = "dwc-eth-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 6b65420e11b5..641f3cd019a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, imx_dwmac_match); static struct platform_driver imx_dwmac_driver = { .probe = imx_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "imx-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index 19c93b998fb3..066783d66422 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -370,7 +370,7 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches); static struct platform_driver ingenic_mac_driver = { .probe = ingenic_mac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "ingenic-mac", .pm = pm_ptr(&ingenic_mac_pm_ops), diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index d68f0c4e7835..230e79658c54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -167,7 +167,7 @@ static void intel_eth_plat_remove(struct platform_device *pdev) static struct platform_driver intel_eth_plat_driver = { .probe = intel_eth_plat_probe, - .remove_new = intel_eth_plat_remove, + .remove = intel_eth_plat_remove, .driver = { .name = "intel-eth-plat", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 83ad7c7935e3..48acba5eb178 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -451,7 +451,7 @@ static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv, * should always be an XPCS. The original code would always * return this if present. */ - return &priv->hw->xpcs->pcs; + return xpcs_to_phylink_pcs(priv->hw->xpcs); } static int intel_mgbe_common_data(struct pci_dev *pdev, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 4ba15873d5b1..61227dcf56dc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -499,7 +499,7 @@ MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match); static struct platform_driver ipq806x_gmac_dwmac_driver = { .probe = ipq806x_gmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "ipq806x-gmac-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 4c810d8f5bea..22653ffd2a04 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match); static struct platform_driver lpc18xx_dwmac_driver = { .probe = lpc18xx_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "lpc18xx-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 2a9132d6d743..f8ca81675407 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -699,7 +699,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match); static struct platform_driver mediatek_dwmac_driver = { .probe = mediatek_dwmac_probe, - .remove_new = mediatek_dwmac_remove, + .remove = mediatek_dwmac_remove, .driver = { .name = "dwmac-mediatek", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index a16bfa9089ea..5469fa1b429e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -78,7 +78,7 @@ MODULE_DEVICE_TABLE(of, meson6_dwmac_match); static struct platform_driver meson6_dwmac_driver = { .probe = meson6_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "meson6-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index b23944aa344e..9c2d62d133ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -520,7 +520,7 @@ MODULE_DEVICE_TABLE(of, meson8b_dwmac_match); static struct platform_driver meson8b_dwmac_driver = { .probe = meson8b_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "meson8b-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 50073bdade46..8cb374668b74 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -2073,7 +2073,7 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match); static struct platform_driver rk_gmac_dwmac_driver = { .probe = rk_gmac_probe, - .remove_new = rk_gmac_remove, + .remove = rk_gmac_remove, .driver = { .name = "rk_gmac-dwmac", .pm = &rk_gmac_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c index 59a7bd560f96..13634965bc19 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c @@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(of, rzn1_dwmac_match); static struct platform_driver rzn1_dwmac_driver = { .probe = rzn1_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "rzn1-dwmac", .of_match_table = rzn1_dwmac_match, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index fdb4c773ec98..0745117d5872 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -582,7 +582,7 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match); static struct platform_driver socfpga_dwmac_driver = { .probe = socfpga_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "socfpga-dwmac", .pm = &socfpga_dwmac_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 4e1076faee0c..421666279dd3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -176,7 +176,7 @@ MODULE_DEVICE_TABLE(of, starfive_dwmac_match); static struct platform_driver starfive_dwmac_driver = { .probe = starfive_dwmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "starfive-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 4445cddc4cbe..a6ff02d905a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -358,7 +358,7 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match); static struct platform_driver sti_dwmac_driver = { .probe = sti_dwmac_probe, - .remove_new = sti_dwmac_remove, + .remove = sti_dwmac_remove, .driver = { .name = "sti-dwmac", .pm = &sti_dwmac_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index c1732955a697..1e8bac665cc9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -675,7 +675,7 @@ MODULE_DEVICE_TABLE(of, stm32_dwmac_match); static struct platform_driver stm32_dwmac_driver = { .probe = stm32_dwmac_probe, - .remove_new = stm32_dwmac_remove, + .remove = stm32_dwmac_remove, .driver = { .name = "stm32-dwmac", .pm = &stm32_dwmac_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4a0ae92b3055..4b7b2582a120 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1343,7 +1343,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); static struct platform_driver sun8i_dwmac_driver = { .probe = sun8i_dwmac_probe, - .remove_new = sun8i_dwmac_remove, + .remove = sun8i_dwmac_remove, .shutdown = sun8i_dwmac_shutdown, .driver = { .name = "dwmac-sun8i", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 2653a9f0958c..9ae318436c4a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -172,7 +172,7 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match); static struct platform_driver sun7i_dwmac_driver = { .probe = sun7i_gmac_probe, - .remove_new = stmmac_pltfr_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "sun7i-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index 6fdd94c8919e..3827997d2132 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -381,7 +381,7 @@ static SIMPLE_DEV_PM_OPS(tegra_mgbe_pm_ops, tegra_mgbe_suspend, tegra_mgbe_resum static struct platform_driver tegra_mgbe_driver = { .probe = tegra_mgbe_probe, - .remove_new = tegra_mgbe_remove, + .remove = tegra_mgbe_remove, .driver = { .name = "tegra-mgbe", .pm = &tegra_mgbe_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index a5a5cfa989c6..eccf7f537467 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -268,7 +268,7 @@ MODULE_DEVICE_TABLE(of, visconti_eth_dwmac_match); static struct platform_driver visconti_eth_dwmac_driver = { .probe = visconti_eth_dwmac_probe, - .remove_new = visconti_eth_dwmac_remove, + .remove = visconti_eth_dwmac_remove, .driver = { .name = "visconti-eth-dwmac", .of_match_table = visconti_eth_dwmac_match, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 93a78fd0737b..28fff6cab812 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -44,6 +44,7 @@ #define GMAC_MDIO_DATA 0x00000204 #define GMAC_GPIO_STATUS 0x0000020C #define GMAC_ARP_ADDR 0x00000210 +#define GMAC_EXT_CFG1 0x00000238 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) #define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30) @@ -284,6 +285,10 @@ enum power_event { #define GMAC_HW_FEAT_DVLAN BIT(5) #define GMAC_HW_FEAT_NRVF GENMASK(2, 0) +/* MAC extended config 1 */ +#define GMAC_CONFIG1_SAVE_EN BIT(24) +#define GMAC_CONFIG1_SPLM(v) FIELD_PREP(GENMASK(9, 8), v) + /* GMAC GPIO Status reg */ #define GMAC_GPO0 BIT(16) #define GMAC_GPO1 BIT(17) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index e99401bcc1f8..a5fb31eb0192 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -118,6 +118,8 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x, x->ipv4_pkt_rcvd++; if (rdes1 & RDES1_IPV6_HEADER) x->ipv6_pkt_rcvd++; + if (rdes1 & RDES1_IP_PAYLOAD_ERROR) + x->ip_payload_err++; if (message_type == RDES_EXT_NO_PTP) x->no_ptp_rx_msg_type_ext++; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h index 6da070ccd737..1ce6f43d545a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -95,7 +95,7 @@ #define RDES1_IPV4_HEADER BIT(4) #define RDES1_IPV6_HEADER BIT(5) #define RDES1_IP_CSUM_BYPASSED BIT(6) -#define RDES1_IP_CSUM_ERROR BIT(7) +#define RDES1_IP_PAYLOAD_ERROR BIT(7) #define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8) #define RDES1_PTP_PACKET_TYPE BIT(12) #define RDES1_PTP_VER BIT(13) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index e0165358c4ac..7c895e0ae71f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -526,6 +526,11 @@ static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr, value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ writel(value, ioaddr + GMAC_EXT_CONFIG); + value = readl(ioaddr + GMAC_EXT_CFG1); + value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */ + value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */ + writel(value, ioaddr + GMAC_EXT_CFG1); + value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); if (en) value |= DMA_CONTROL_SPH; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 03f90676b3ad..0c7d81ddd440 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -500,23 +500,22 @@ int stmmac_pcs_setup(struct net_device *ndev) struct fwnode_handle *devnode, *pcsnode; struct dw_xpcs *xpcs = NULL; struct stmmac_priv *priv; - int addr, mode, ret; + int addr, ret; priv = netdev_priv(ndev); - mode = priv->plat->phy_interface; devnode = priv->plat->port_node; if (priv->plat->pcs_init) { ret = priv->plat->pcs_init(priv); } else if (fwnode_property_present(devnode, "pcs-handle")) { pcsnode = fwnode_find_reference(devnode, "pcs-handle", 0); - xpcs = xpcs_create_fwnode(pcsnode, mode); + xpcs = xpcs_create_fwnode(pcsnode); fwnode_handle_put(pcsnode); ret = PTR_ERR_OR_ZERO(xpcs); } else if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->pcs_mask) { addr = ffs(priv->plat->mdio_bus_data->pcs_mask) - 1; - xpcs = xpcs_create_mdiodev(priv->mii, addr, mode); + xpcs = xpcs_create_mdiodev(priv->mii, addr); ret = PTR_ERR_OR_ZERO(xpcs); } else { return 0; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 41a27ae58ced..df6d35d41b97 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -10182,7 +10182,7 @@ static struct platform_driver niu_of_driver = { .of_match_table = niu_match, }, .probe = niu_of_probe, - .remove_new = niu_of_remove, + .remove = niu_of_remove, }; #endif /* CONFIG_SPARC64 */ diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 16c86b13c185..bbb3a6ca19ed 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1272,7 +1272,7 @@ static struct platform_driver bigmac_sbus_driver = { .of_match_table = bigmac_sbus_match, }, .probe = bigmac_sbus_probe, - .remove_new = bigmac_sbus_remove, + .remove = bigmac_sbus_remove, }; module_platform_driver(bigmac_sbus_driver); diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index aedd13c94225..2920341b14a0 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -965,7 +965,7 @@ static struct platform_driver qec_sbus_driver = { .of_match_table = qec_sbus_match, }, .probe = qec_sbus_probe, - .remove_new = qec_sbus_remove, + .remove = qec_sbus_remove, }; static int __init qec_init(void) diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index 391a1bc7f446..721d8ed3f302 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -549,7 +549,7 @@ MODULE_DEVICE_TABLE(of, spl2sw_of_match); static struct platform_driver spl2sw_driver = { .probe = spl2sw_probe, - .remove_new = spl2sw_remove, + .remove = spl2sw_remove, .driver = { .name = "sp7021_emac", .of_match_table = spl2sw_of_match, diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 0520e9f4bea7..6201a09fa5f0 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1031,9 +1031,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, int desc_idx, int cpu, int *len) { struct am65_cpsw_common *common = flow->common; - struct am65_cpsw_ndev_priv *ndev_priv; struct net_device *ndev = port->ndev; - struct am65_cpsw_ndev_stats *stats; int ret = AM65_CPSW_XDP_CONSUMED; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; @@ -1051,9 +1049,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, /* XDP prog might have changed packet data and boundaries */ *len = xdp->data_end - xdp->data; - ndev_priv = netdev_priv(ndev); - stats = this_cpu_ptr(ndev_priv->stats); - switch (act) { case XDP_PASS: ret = AM65_CPSW_XDP_PASS; @@ -1073,20 +1068,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, if (err) goto drop; - u64_stats_update_begin(&stats->syncp); - stats->rx_bytes += *len; - stats->rx_packets++; - u64_stats_update_end(&stats->syncp); + dev_sw_netstats_tx_add(ndev, 1, *len); ret = AM65_CPSW_XDP_CONSUMED; goto out; case XDP_REDIRECT: if (unlikely(xdp_do_redirect(ndev, xdp, prog))) goto drop; - u64_stats_update_begin(&stats->syncp); - stats->rx_bytes += *len; - stats->rx_packets++; - u64_stats_update_end(&stats->syncp); + dev_sw_netstats_rx_add(ndev, *len); ret = AM65_CPSW_XDP_REDIRECT; goto out; default: @@ -1147,7 +1136,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, u32 buf_dma_len, pkt_len, port_id = 0, csum_info; struct am65_cpsw_common *common = flow->common; struct am65_cpsw_ndev_priv *ndev_priv; - struct am65_cpsw_ndev_stats *stats; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; struct page *page, *new_page; @@ -1233,12 +1221,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, am65_cpsw_nuss_rx_csum(skb, csum_info); napi_gro_receive(&flow->napi_rx, skb); - stats = this_cpu_ptr(ndev_priv->stats); - - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += pkt_len; - u64_stats_update_end(&stats->syncp); + dev_sw_netstats_rx_add(ndev, pkt_len); allocate: new_page = page_pool_dev_alloc_pages(flow->page_pool); @@ -1321,10 +1304,7 @@ static struct sk_buff * am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn, dma_addr_t desc_dma) { - struct am65_cpsw_ndev_priv *ndev_priv; - struct am65_cpsw_ndev_stats *stats; struct cppi5_host_desc_t *desc_tx; - struct net_device *ndev; struct sk_buff *skb; void **swdata; @@ -1334,16 +1314,9 @@ am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn, skb = *(swdata); am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); - ndev = skb->dev; - am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); - ndev_priv = netdev_priv(ndev); - stats = this_cpu_ptr(ndev_priv->stats); - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); + dev_sw_netstats_tx_add(skb->dev, 1, skb->len); return skb; } @@ -1354,8 +1327,6 @@ am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common, dma_addr_t desc_dma, struct net_device **ndev) { - struct am65_cpsw_ndev_priv *ndev_priv; - struct am65_cpsw_ndev_stats *stats; struct cppi5_host_desc_t *desc_tx; struct am65_cpsw_port *port; struct xdp_frame *xdpf; @@ -1369,15 +1340,9 @@ am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common, am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); port = am65_common_get_port(common, port_id); + dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len); *ndev = port->ndev; - ndev_priv = netdev_priv(*ndev); - stats = this_cpu_ptr(ndev_priv->stats); - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += xdpf->len; - u64_stats_update_end(&stats->syncp); - return xdpf; } @@ -1899,31 +1864,7 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev); - unsigned int start; - int cpu; - - for_each_possible_cpu(cpu) { - struct am65_cpsw_ndev_stats *cpu_stats; - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; - - cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu); - do { - start = u64_stats_fetch_begin(&cpu_stats->syncp); - rx_packets = cpu_stats->rx_packets; - rx_bytes = cpu_stats->rx_bytes; - tx_packets = cpu_stats->tx_packets; - tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); - - stats->rx_packets += rx_packets; - stats->rx_bytes += rx_bytes; - stats->tx_packets += tx_packets; - stats->tx_bytes += tx_bytes; - } + dev_fetch_sw_netstats(stats, dev->tstats); stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; @@ -2710,13 +2651,6 @@ of_node_put: return ret; } -static void am65_cpsw_pcpu_stats_free(void *data) -{ - struct am65_cpsw_ndev_stats __percpu *stats = data; - - free_percpu(stats); -} - static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) { struct am65_cpsw_port *port; @@ -2736,7 +2670,6 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) struct device *dev = common->dev; struct am65_cpsw_port *port; struct phylink *phylink; - int ret; port = &common->ports[port_idx]; @@ -2829,21 +2762,13 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) port->ndev->features &= ~NETIF_F_HW_CSUM; - ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats); - if (!ndev_priv->stats) - return -ENOMEM; - - ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free, - ndev_priv->stats); - if (ret) - dev_err(dev, "failed to add percpu stat free action %d\n", ret); - + port->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; port->xdp_prog = NULL; if (!common->dma_ndev) common->dma_ndev = port->ndev; - return ret; + return 0; } static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common) @@ -3448,7 +3373,8 @@ static const struct am65_cpsw_pdata j7200_cpswxg_pdata = { .quirks = 0, .ale_dev_id = "am64-cpswxg", .fdqring_mode = K3_RINGACC_RING_MODE_RING, - .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), + .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | + BIT(PHY_INTERFACE_MODE_USXGMII), }; static const struct am65_cpsw_pdata j721e_cpswxg_pdata = { @@ -3500,7 +3426,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) struct resource *res; struct clk *clk; int ale_entries; - u64 id_temp; + __be64 id_temp; int ret, i; common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); @@ -3773,7 +3699,7 @@ static struct platform_driver am65_cpsw_nuss_driver = { .pm = &am65_cpsw_nuss_dev_pm_ops, }, .probe = am65_cpsw_nuss_probe, - .remove_new = am65_cpsw_nuss_remove, + .remove = am65_cpsw_nuss_remove, }; module_platform_driver(am65_cpsw_nuss_driver); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index dc8d544230dc..3f3e353dfe88 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -180,18 +180,9 @@ struct am65_cpsw_common { u32 *ale_context; }; -struct am65_cpsw_ndev_stats { - u64 tx_packets; - u64 tx_bytes; - u64 rx_packets; - u64 rx_bytes; - struct u64_stats_sync syncp; -}; - struct am65_cpsw_ndev_priv { u32 msg_enable; struct am65_cpsw_port *port; - struct am65_cpsw_ndev_stats __percpu *stats; bool offload_fwd_mark; /* Serialize access to MAC Merge state between ethtool requests * and link state updates diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c0a5abd8d9a8..4ef8cf6ea135 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1802,7 +1802,7 @@ static struct platform_driver cpsw_driver = { .of_match_table = cpsw_of_mtable, }, .probe = cpsw_probe, - .remove_new = cpsw_remove, + .remove = cpsw_remove, }; module_platform_driver(cpsw_driver); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 8d02d2b21429..d361caa80d05 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -162,27 +162,39 @@ static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, ale_entry[idx] |= (value << start); } -#define DEFINE_ALE_FIELD(name, start, bits) \ +#define DEFINE_ALE_FIELD_GET(name, start, bits) \ static inline int cpsw_ale_get_##name(u32 *ale_entry) \ { \ return cpsw_ale_get_field(ale_entry, start, bits); \ -} \ +} + +#define DEFINE_ALE_FIELD_SET(name, start, bits) \ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \ { \ cpsw_ale_set_field(ale_entry, start, bits, value); \ } -#define DEFINE_ALE_FIELD1(name, start) \ +#define DEFINE_ALE_FIELD(name, start, bits) \ +DEFINE_ALE_FIELD_GET(name, start, bits) \ +DEFINE_ALE_FIELD_SET(name, start, bits) + +#define DEFINE_ALE_FIELD1_GET(name, start) \ static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \ { \ return cpsw_ale_get_field(ale_entry, start, bits); \ -} \ +} + +#define DEFINE_ALE_FIELD1_SET(name, start) \ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \ u32 bits) \ { \ cpsw_ale_set_field(ale_entry, start, bits, value); \ } +#define DEFINE_ALE_FIELD1(name, start) \ +DEFINE_ALE_FIELD1_GET(name, start) \ +DEFINE_ALE_FIELD1_SET(name, start) + enum { ALE_ENT_VID_MEMBER_LIST = 0, ALE_ENT_VID_UNREG_MCAST_MSK, @@ -238,14 +250,14 @@ static const struct ale_entry_fld vlan_entry_k3_cpswxg[] = { DEFINE_ALE_FIELD(entry_type, 60, 2) DEFINE_ALE_FIELD(vlan_id, 48, 12) -DEFINE_ALE_FIELD(mcast_state, 62, 2) +DEFINE_ALE_FIELD_SET(mcast_state, 62, 2) DEFINE_ALE_FIELD1(port_mask, 66) DEFINE_ALE_FIELD(super, 65, 1) DEFINE_ALE_FIELD(ucast_type, 62, 2) -DEFINE_ALE_FIELD1(port_num, 66) -DEFINE_ALE_FIELD(blocked, 65, 1) -DEFINE_ALE_FIELD(secure, 64, 1) -DEFINE_ALE_FIELD(mcast, 40, 1) +DEFINE_ALE_FIELD1_SET(port_num, 66) +DEFINE_ALE_FIELD_SET(blocked, 65, 1) +DEFINE_ALE_FIELD_SET(secure, 64, 1) +DEFINE_ALE_FIELD_GET(mcast, 40, 1) #define NU_VLAN_UNREG_MCAST_IDX 1 diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 557cc71b9dd2..a98bcc5eb566 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -2127,7 +2127,7 @@ static struct platform_driver cpsw_driver = { .of_match_table = cpsw_of_mtable, }, .probe = cpsw_probe, - .remove_new = cpsw_remove, + .remove = cpsw_remove, }; module_platform_driver(cpsw_driver); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index b0950a318c42..ed8116fb05e9 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2070,7 +2070,7 @@ static struct platform_driver davinci_emac_driver = { .of_match_table = davinci_emac_of_match, }, .probe = davinci_emac_probe, - .remove_new = davinci_emac_remove, + .remove = davinci_emac_remove, }; /** diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 8e07d4a1b6ba..68507126be8e 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -760,7 +760,7 @@ static struct platform_driver davinci_mdio_driver = { .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, .probe = davinci_mdio_probe, - .remove_new = davinci_mdio_remove, + .remove = davinci_mdio_remove, }; static int __init davinci_mdio_init(void) diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 5c20ceb164df..0556910938fa 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1646,7 +1646,7 @@ MODULE_DEVICE_TABLE(of, prueth_dt_match); static struct platform_driver prueth_driver = { .probe = prueth_probe, - .remove_new = prueth_remove, + .remove = prueth_remove, .driver = { .name = "icssg-prueth", .of_match_table = prueth_dt_match, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 292f04d29f4f..5024f0647a0d 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -1215,7 +1215,7 @@ MODULE_DEVICE_TABLE(of, prueth_dt_match); static struct platform_driver prueth_driver = { .probe = prueth_probe, - .remove_new = prueth_remove, + .remove = prueth_remove, .driver = { .name = "icssg-prueth-sr1", .of_match_table = prueth_dt_match, diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 11b90e1da0c6..857820657bac 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2270,7 +2270,7 @@ static struct platform_driver netcp_driver = { .of_match_table = of_match, }, .probe = netcp_probe, - .remove_new = netcp_remove, + .remove = netcp_remove, }; module_platform_driver(netcp_driver); diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 554aff7c8f3b..c6957e3b7f0f 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1676,7 +1676,7 @@ static void tsi108_ether_remove(struct platform_device *pdev) static struct platform_driver tsi_eth_driver = { .probe = tsi108_init_one, - .remove_new = tsi108_ether_remove, + .remove = tsi108_ether_remove, .driver = { .name = "tsi-ethernet", }, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index e80c02948801..894911f3d560 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2570,7 +2570,7 @@ static struct pci_driver rhine_driver_pci = { static struct platform_driver rhine_driver_platform = { .probe = rhine_init_one_platform, - .remove_new = rhine_remove_one_platform, + .remove = rhine_remove_one_platform, .driver = { .name = DRV_NAME, .of_match_table = rhine_of_tbl, diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 55fff4d0d380..dd4a07c97eee 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -3247,7 +3247,7 @@ static struct pci_driver velocity_pci_driver = { static struct platform_driver velocity_platform_driver = { .probe = velocity_platform_probe, - .remove_new = velocity_platform_remove, + .remove = velocity_platform_remove, .driver = { .name = "via-velocity", .of_match_table = velocity_of_ids, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 67b61afdde96..a0e4920b4761 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -122,7 +122,7 @@ static int txgbe_pcs_write(struct mii_bus *bus, int addr, int devnum, int regnum static int txgbe_mdio_pcs_init(struct txgbe *txgbe) { struct mii_bus *mii_bus; - struct dw_xpcs *xpcs; + struct phylink_pcs *pcs; struct pci_dev *pdev; struct wx *wx; int ret = 0; @@ -147,11 +147,11 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) if (ret) return ret; - xpcs = xpcs_create_mdiodev(mii_bus, 0, PHY_INTERFACE_MODE_10GBASER); - if (IS_ERR(xpcs)) - return PTR_ERR(xpcs); + pcs = xpcs_create_pcs_mdiodev(mii_bus, 0); + if (IS_ERR(pcs)) + return PTR_ERR(pcs); - txgbe->xpcs = xpcs; + txgbe->pcs = pcs; return 0; } @@ -163,7 +163,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi struct txgbe *txgbe = wx->priv; if (interface == PHY_INTERFACE_MODE_10GBASER) - return &txgbe->xpcs->pcs; + return txgbe->pcs; return NULL; } @@ -302,7 +302,7 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data) status = rd32(wx, TXGBE_CFG_PORT_ST); up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP); - phylink_pcs_change(&txgbe->xpcs->pcs, up); + phylink_pcs_change(txgbe->pcs, up); return IRQ_HANDLED; } @@ -578,7 +578,7 @@ static int txgbe_clock_register(struct txgbe *txgbe) if (IS_ERR(clk)) return PTR_ERR(clk); - clock = clkdev_create(clk, NULL, clk_name); + clock = clkdev_create(clk, NULL, "%s", clk_name); if (!clock) { clk_unregister(clk); return -ENOMEM; @@ -778,7 +778,7 @@ err_unregister_clk: err_destroy_phylink: phylink_destroy(wx->phylink); err_destroy_xpcs: - xpcs_destroy(txgbe->xpcs); + xpcs_destroy_pcs(txgbe->pcs); err_unregister_swnode: software_node_unregister_node_group(txgbe->nodes.group); @@ -798,6 +798,6 @@ void txgbe_remove_phy(struct txgbe *txgbe) clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); phylink_destroy(txgbe->wx->phylink); - xpcs_destroy(txgbe->xpcs); + xpcs_destroy_pcs(txgbe->pcs); software_node_unregister_node_group(txgbe->nodes.group); } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 959102c4c379..cc3a7b62fe9e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -329,7 +329,7 @@ struct txgbe { struct wx *wx; struct txgbe_nodes nodes; struct txgbe_irq misc; - struct dw_xpcs *xpcs; + struct phylink_pcs *pcs; struct platform_device *sfp_dev; struct platform_device *i2c_dev; struct clk_lookup *clock; diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index b26fd15c25ae..b77f096eaf99 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1271,6 +1271,6 @@ static struct platform_driver w5100_mmio_driver = { .pm = &w5100_pm_ops, }, .probe = w5100_mmio_probe, - .remove_new = w5100_mmio_remove, + .remove = w5100_mmio_remove, }; module_platform_driver(w5100_mmio_driver); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index f165616f36fe..3e711dea3b2c 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -681,7 +681,7 @@ static struct platform_driver w5300_driver = { .pm = &w5300_pm_ops, }, .probe = w5300_probe, - .remove_new = w5300_remove, + .remove = w5300_remove, }; module_platform_driver(w5300_driver); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1072e2210aed..edb36ff07a0c 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1649,7 +1649,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match); static struct platform_driver temac_driver = { .probe = temac_probe, - .remove_new = temac_remove, + .remove = temac_remove, .driver = { .name = "xilinx_temac", .of_match_table = temac_of_match, diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index d940853acc0b..273ec5f70005 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2999,7 +2999,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, static struct platform_driver axienet_driver = { .probe = axienet_probe, - .remove_new = axienet_remove, + .remove = axienet_remove, .shutdown = axienet_shutdown, .driver = { .name = "xilinx_axienet", diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 940452d0a4d2..ecf47107146d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -7,6 +7,7 @@ * Copyright (c) 2007 - 2013 Xilinx, Inc. */ +#include <linux/clk.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/uaccess.h> @@ -1091,13 +1092,14 @@ static int xemaclite_of_probe(struct platform_device *ofdev) struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; + struct clk *clkin; int rc = 0; dev_info(dev, "Device Tree Probing\n"); /* Create an ethernet device instance */ - ndev = alloc_etherdev(sizeof(struct net_local)); + ndev = devm_alloc_etherdev(dev, sizeof(struct net_local)); if (!ndev) return -ENOMEM; @@ -1110,15 +1112,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev) /* Get IRQ for the device */ rc = platform_get_irq(ofdev, 0); if (rc < 0) - goto error; + return rc; ndev->irq = rc; lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res); - if (IS_ERR(lp->base_addr)) { - rc = PTR_ERR(lp->base_addr); - goto error; - } + if (IS_ERR(lp->base_addr)) + return PTR_ERR(lp->base_addr); ndev->mem_start = res->start; ndev->mem_end = res->end; @@ -1129,6 +1129,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); + clkin = devm_clk_get_optional_enabled(&ofdev->dev, NULL); + if (IS_ERR(clkin)) + return dev_err_probe(&ofdev->dev, PTR_ERR(clkin), + "Failed to get and enable clock from Device Tree\n"); + rc = of_get_ethdev_address(ofdev->dev.of_node, ndev); if (rc) { dev_warn(dev, "No MAC address found, using random\n"); @@ -1167,8 +1172,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev) put_node: of_node_put(lp->phy_node); -error: - free_netdev(ndev); return rc; } @@ -1197,8 +1200,6 @@ static void xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - - free_netdev(ndev); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1257,7 +1258,7 @@ static struct platform_driver xemaclite_of_driver = { .of_match_table = xemaclite_of_match, }, .probe = xemaclite_of_probe, - .remove_new = xemaclite_of_remove, + .remove = xemaclite_of_remove, }; module_platform_driver(xemaclite_of_driver); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index aef316278eb4..a2ab1c150822 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1619,7 +1619,7 @@ static struct platform_driver ixp4xx_eth_driver = { .of_match_table = of_match_ptr(ixp4xx_eth_of_match), }, .probe = ixp4xx_eth_probe, - .remove_new = ixp4xx_eth_remove, + .remove = ixp4xx_eth_remove, }; module_platform_driver(ixp4xx_eth_driver); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index fad5b6564464..4a4ed2ccf72f 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1466,7 +1466,7 @@ static struct platform_driver fjes_driver = { .name = DRV_NAME, }, .probe = fjes_probe, - .remove_new = fjes_remove, + .remove = fjes_remove, }; static acpi_status diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 7f611c74eb62..2f29b1386b1c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -827,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) + if (skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs4) @@ -937,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) + if (skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs6) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 2b6ec979a62f..9afb08dbc350 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -712,8 +712,13 @@ void netvsc_device_remove(struct hv_device *device) for (i = 0; i < net_device->num_chn; i++) { /* See also vmbus_reset_channel_cb(). */ /* only disable enabled NAPI channel */ - if (i < ndev->real_num_rx_queues) + if (i < ndev->real_num_rx_queues) { + netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_TX, + NULL); + netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_RX, + NULL); napi_disable(&net_device->chan_table[i].napi); + } netif_napi_del(&net_device->chan_table[i].napi); } @@ -1787,6 +1792,10 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); napi_enable(&net_device->chan_table[0].napi); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, + &net_device->chan_table[0].napi); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, + &net_device->chan_table[0].napi); /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device, net_device, device_info); @@ -1805,6 +1814,8 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, close: RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL); napi_disable(&net_device->chan_table[0].napi); /* Now, we can close the channel safely */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 23180f7b67b6..d6c4abfc3a28 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1557,7 +1557,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, data[i++] = xdp_tx; } - pcpu_sum = kvmalloc_array(num_possible_cpus(), + pcpu_sum = kvmalloc_array(nr_cpu_ids, sizeof(struct netvsc_ethtool_pcpu_stats), GFP_KERNEL); if (!pcpu_sum) diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index ecc2128ca9b7..c0ceeef4fcd8 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1269,10 +1269,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) ret = vmbus_open(new_sc, netvsc_ring_bytes, netvsc_ring_bytes, NULL, 0, netvsc_channel_cb, nvchan); - if (ret == 0) + if (ret == 0) { napi_enable(&nvchan->napi); - else + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX, + &nvchan->napi); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX, + &nvchan->napi); + } else { netdev_notice(ndev, "sub channel open failed: %d\n", ret); + } if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) wake_up(&nvscdev->subchan_open); diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 2930141d7dd2..e11d8eda85ea 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -235,7 +235,7 @@ static struct platform_device *ieee802154fake_dev; static struct platform_driver ieee802154fake_driver = { .probe = fakelb_probe, - .remove_new = fakelb_remove, + .remove = fakelb_remove, .driver = { .name = "ieee802154fakelb", }, diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 2c2483bbe780..1cab20b5a885 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -1047,7 +1047,7 @@ static void hwsim_remove(struct platform_device *pdev) static struct platform_driver mac802154hwsim_driver = { .probe = hwsim_probe, - .remove_new = hwsim_remove, + .remove = hwsim_remove, .driver = { .name = "mac802154_hwsim", }, diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 5f3dd5a2dcf4..f25f6e2cf58c 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -1012,7 +1012,7 @@ static const struct attribute_group *ipa_attribute_groups[] = { static struct platform_driver ipa_driver = { .probe = ipa_probe, - .remove_new = ipa_remove, + .remove = ipa_remove, .shutdown = ipa_remove, .driver = { .name = "ipa", diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c index d5b05e803219..b4ef386bdb1b 100644 --- a/drivers/net/ipvlan/ipvlan_l3s.c +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -2,6 +2,8 @@ /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> */ +#include <net/ip.h> + #include "ipvlan.h" static unsigned int ipvlan_netid __read_mostly; @@ -48,11 +50,11 @@ static struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, switch (proto) { case AF_INET: { - struct iphdr *ip4h = ip_hdr(skb); + const struct iphdr *ip4h = ip_hdr(skb); int err; err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr, - ip4h->tos, sdev); + ip4h_dscp(ip4h), sdev); if (unlikely(err)) goto out; break; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cf18e66de142..edbd5afcec41 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1117,7 +1117,7 @@ static void macvlan_dev_poll_controller(struct net_device *dev) return; } -static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) +static int macvlan_dev_netpoll_setup(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *real_dev = vlan->lowerdev; diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index c2170650415c..e55be6dc9ae7 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -198,7 +198,7 @@ static struct platform_driver aspeed_mdio_driver = { .of_match_table = aspeed_mdio_of_match, }, .probe = aspeed_mdio_probe, - .remove_new = aspeed_mdio_remove, + .remove = aspeed_mdio_remove, }; module_platform_driver(aspeed_mdio_driver); diff --git a/drivers/net/mdio/mdio-bcm-iproc.c b/drivers/net/mdio/mdio-bcm-iproc.c index 5a2d26c6afdc..91690b496793 100644 --- a/drivers/net/mdio/mdio-bcm-iproc.c +++ b/drivers/net/mdio/mdio-bcm-iproc.c @@ -208,7 +208,7 @@ static struct platform_driver iproc_mdio_driver = { #endif }, .probe = iproc_mdio_probe, - .remove_new = iproc_mdio_remove, + .remove = iproc_mdio_remove, }; module_platform_driver(iproc_mdio_driver); diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index b7bc70586ee0..074d96328f41 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -355,7 +355,7 @@ static struct platform_driver unimac_mdio_driver = { .pm = &unimac_mdio_pm_ops, }, .probe = unimac_mdio_probe, - .remove_new = unimac_mdio_remove, + .remove = unimac_mdio_remove, }; module_platform_driver(unimac_mdio_driver); diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 82088741debd..1cfd538b5105 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -207,7 +207,7 @@ MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, - .remove_new = mdio_gpio_remove, + .remove = mdio_gpio_remove, .driver = { .name = "mdio-gpio", .of_match_table = mdio_gpio_of_match, diff --git a/drivers/net/mdio/mdio-hisi-femac.c b/drivers/net/mdio/mdio-hisi-femac.c index 6703f626ee83..d78a1dc36cfd 100644 --- a/drivers/net/mdio/mdio-hisi-femac.c +++ b/drivers/net/mdio/mdio-hisi-femac.c @@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(of, hisi_femac_mdio_dt_ids); static struct platform_driver hisi_femac_mdio_driver = { .probe = hisi_femac_mdio_probe, - .remove_new = hisi_femac_mdio_remove, + .remove = hisi_femac_mdio_remove, .driver = { .name = "hisi-femac-mdio", .of_match_table = hisi_femac_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 9d8f43b28aac..dd3ed2d6430b 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -391,7 +391,7 @@ MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids); static struct platform_driver ipq4019_mdio_driver = { .probe = ipq4019_mdio_probe, - .remove_new = ipq4019_mdio_remove, + .remove = ipq4019_mdio_remove, .driver = { .name = "ipq4019-mdio", .of_match_table = ipq4019_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index f71b6e1c66e4..6253a9ab8b69 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -162,7 +162,7 @@ MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids); static struct platform_driver ipq8064_mdio_driver = { .probe = ipq8064_mdio_probe, - .remove_new = ipq8064_mdio_remove, + .remove = ipq8064_mdio_remove, .driver = { .name = "ipq8064-mdio", .of_match_table = ipq8064_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c index d35af8cd7c4d..9853be6f0f22 100644 --- a/drivers/net/mdio/mdio-moxart.c +++ b/drivers/net/mdio/mdio-moxart.c @@ -171,7 +171,7 @@ MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids); static struct platform_driver moxart_mdio_driver = { .probe = moxart_mdio_probe, - .remove_new = moxart_mdio_remove, + .remove = moxart_mdio_remove, .driver = { .name = "moxart-mdio", .of_match_table = moxart_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 62c47e0dd142..944efd33da6d 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -377,7 +377,7 @@ MODULE_DEVICE_TABLE(of, mscc_miim_match); static struct platform_driver mscc_miim_driver = { .probe = mscc_miim_probe, - .remove_new = mscc_miim_remove, + .remove = mscc_miim_remove, .driver = { .name = "mscc-miim", .of_match_table = mscc_miim_match, diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 1ce7d67ba72e..8ba0917a930a 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -342,7 +342,7 @@ static struct platform_driver mdiomux_iproc_driver = { .pm = &mdio_mux_iproc_pm_ops, }, .probe = mdio_mux_iproc_probe, - .remove_new = mdio_mux_iproc_remove, + .remove = mdio_mux_iproc_remove, }; module_platform_driver(mdiomux_iproc_driver); diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c index 1b77e0e3e6e1..476f8b72d020 100644 --- a/drivers/net/mdio/mdio-mux-bcm6368.c +++ b/drivers/net/mdio/mdio-mux-bcm6368.c @@ -173,7 +173,7 @@ static struct platform_driver bcm6368_mdiomux_driver = { .of_match_table = bcm6368_mdiomux_ids, }, .probe = bcm6368_mdiomux_probe, - .remove_new = bcm6368_mdiomux_remove, + .remove = bcm6368_mdiomux_remove, }; module_platform_driver(bcm6368_mdiomux_driver); diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index 38fb031f8979..ef77bd1abae9 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -86,7 +86,7 @@ static struct platform_driver mdio_mux_gpio_driver = { .of_match_table = mdio_mux_gpio_match, }, .probe = mdio_mux_gpio_probe, - .remove_new = mdio_mux_gpio_remove, + .remove = mdio_mux_gpio_remove, }; module_platform_driver(mdio_mux_gpio_driver); diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c index 754b0f2cf15b..08d6a6c93fb8 100644 --- a/drivers/net/mdio/mdio-mux-meson-g12a.c +++ b/drivers/net/mdio/mdio-mux-meson-g12a.c @@ -348,7 +348,7 @@ static void g12a_mdio_mux_remove(struct platform_device *pdev) static struct platform_driver g12a_mdio_mux_driver = { .probe = g12a_mdio_mux_probe, - .remove_new = g12a_mdio_mux_remove, + .remove = g12a_mdio_mux_remove, .driver = { .name = "g12a-mdio_mux", .of_match_table = g12a_mdio_mux_match, diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c index 89554021b5cc..00c66240136b 100644 --- a/drivers/net/mdio/mdio-mux-meson-gxl.c +++ b/drivers/net/mdio/mdio-mux-meson-gxl.c @@ -149,7 +149,7 @@ static void gxl_mdio_mux_remove(struct platform_device *pdev) static struct platform_driver gxl_mdio_mux_driver = { .probe = gxl_mdio_mux_probe, - .remove_new = gxl_mdio_mux_remove, + .remove = gxl_mdio_mux_remove, .driver = { .name = "gxl-mdio-mux", .of_match_table = gxl_mdio_mux_match, diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index b70e6d1ad429..9c4b1efd0d53 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -180,7 +180,7 @@ static struct platform_driver mdio_mux_mmioreg_driver = { .of_match_table = mdio_mux_mmioreg_match, }, .probe = mdio_mux_mmioreg_probe, - .remove_new = mdio_mux_mmioreg_remove, + .remove = mdio_mux_mmioreg_remove, }; module_platform_driver(mdio_mux_mmioreg_driver); diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c index 569b13383191..8e11960fc539 100644 --- a/drivers/net/mdio/mdio-mux-multiplexer.c +++ b/drivers/net/mdio/mdio-mux-multiplexer.c @@ -107,7 +107,7 @@ static struct platform_driver mdio_mux_multiplexer_driver = { .of_match_table = mdio_mux_multiplexer_match, }, .probe = mdio_mux_multiplexer_probe, - .remove_new = mdio_mux_multiplexer_remove, + .remove = mdio_mux_multiplexer_remove, }; module_platform_driver(mdio_mux_multiplexer_driver); diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index 037a38cfed56..2beb83154d39 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -104,7 +104,7 @@ static struct platform_driver octeon_mdiobus_driver = { .of_match_table = octeon_mdiobus_match, }, .probe = octeon_mdiobus_probe, - .remove_new = octeon_mdiobus_remove, + .remove = octeon_mdiobus_remove, }; module_platform_driver(octeon_mdiobus_driver); diff --git a/drivers/net/mdio/mdio-sun4i.c b/drivers/net/mdio/mdio-sun4i.c index 4511bcc73b36..ad1edadc5a08 100644 --- a/drivers/net/mdio/mdio-sun4i.c +++ b/drivers/net/mdio/mdio-sun4i.c @@ -164,7 +164,7 @@ MODULE_DEVICE_TABLE(of, sun4i_mdio_dt_ids); static struct platform_driver sun4i_mdio_driver = { .probe = sun4i_mdio_probe, - .remove_new = sun4i_mdio_remove, + .remove = sun4i_mdio_remove, .driver = { .name = "sun4i-mdio", .of_match_table = sun4i_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c index 6067d96b2b7b..1e1aa72b1eff 100644 --- a/drivers/net/mdio/mdio-thunder.c +++ b/drivers/net/mdio/mdio-thunder.c @@ -23,7 +23,6 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device_node *node; - struct fwnode_handle *fwn; struct thunder_mdiobus_nexus *nexus; int err; int i; @@ -54,7 +53,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, } i = 0; - device_for_each_child_node(&pdev->dev, fwn) { + device_for_each_child_node_scoped(&pdev->dev, fwn) { struct resource r; struct mii_bus *mii_bus; struct cavium_mdiobus *bus; @@ -106,7 +105,6 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, if (i >= ARRAY_SIZE(nexus->buses)) break; } - fwnode_handle_put(fwn); return 0; err_release_regions: diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c index 2772a3098543..a8f91a4b7fed 100644 --- a/drivers/net/mdio/mdio-xgene.c +++ b/drivers/net/mdio/mdio-xgene.c @@ -441,7 +441,7 @@ static struct platform_driver xgene_mdio_driver = { .acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match), }, .probe = xgene_mdio_probe, - .remove_new = xgene_mdio_remove, + .remove = xgene_mdio_remove, }; module_platform_driver(xgene_mdio_driver); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index de20928f7402..4ea44a2f48f7 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -1058,102 +1058,105 @@ static struct notifier_block netconsole_netdev_notifier = { .notifier_call = netconsole_netdev_event, }; -/** - * send_ext_msg_udp - send extended log message to target - * @nt: target to send message to - * @msg: extended log message to send - * @msg_len: length of message - * - * Transfer extended log @msg to @nt. If @msg is longer than - * MAX_PRINT_CHUNK, it'll be split and transmitted in multiple chunks with - * ncfrag header field added to identify them. - */ -static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, - int msg_len) +static void send_msg_no_fragmentation(struct netconsole_target *nt, + const char *msg, + int msg_len, + int release_len) { static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */ - const char *header, *body; - int offset = 0; - int header_len, body_len; - const char *msg_ready = msg; + const char *userdata = NULL; const char *release; - int release_len = 0; - int userdata_len = 0; - char *userdata = NULL; #ifdef CONFIG_NETCONSOLE_DYNAMIC userdata = nt->userdata_complete; - userdata_len = nt->userdata_length; #endif - if (nt->release) { + if (release_len) { release = init_utsname()->release; - release_len = strlen(release) + 1; + + scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); + msg_len += release_len; + } else { + memcpy(buf, msg, msg_len); } - if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK) { - /* No fragmentation needed */ - if (nt->release) { - scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); - msg_len += release_len; - } else { - memcpy(buf, msg, msg_len); - } + if (userdata) + msg_len += scnprintf(&buf[msg_len], + MAX_PRINT_CHUNK - msg_len, + "%s", userdata); - if (userdata) - msg_len += scnprintf(&buf[msg_len], - MAX_PRINT_CHUNK - msg_len, - "%s", userdata); + netpoll_send_udp(&nt->np, buf, msg_len); +} - msg_ready = buf; - netpoll_send_udp(&nt->np, msg_ready, msg_len); - return; - } +static void append_release(char *buf) +{ + const char *release; - /* need to insert extra header fields, detect header and body */ - header = msg; - body = memchr(msg, ';', msg_len); - if (WARN_ON_ONCE(!body)) - return; + release = init_utsname()->release; + scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release); +} - header_len = body - header; - body_len = msg_len - header_len - 1; - body++; +static void send_fragmented_body(struct netconsole_target *nt, char *buf, + const char *msgbody, int header_len, + int msgbody_len) +{ + const char *userdata = NULL; + int body_len, offset = 0; + int userdata_len = 0; - /* - * Transfer multiple chunks with the following extra header. - * "ncfrag=<byte-offset>/<total-bytes>" +#ifdef CONFIG_NETCONSOLE_DYNAMIC + userdata = nt->userdata_complete; + userdata_len = nt->userdata_length; +#endif + + /* body_len represents the number of bytes that will be sent. This is + * bigger than MAX_PRINT_CHUNK, thus, it will be split in multiple + * packets */ - if (nt->release) - scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release); - memcpy(buf + release_len, header, header_len); - header_len += release_len; + body_len = msgbody_len + userdata_len; - while (offset < body_len + userdata_len) { + /* In each iteration of the while loop below, we send a packet + * containing the header and a portion of the body. The body is + * composed of two parts: msgbody and userdata. We keep track of how + * many bytes have been sent so far using the offset variable, which + * ranges from 0 to the total length of the body. + */ + while (offset < body_len) { int this_header = header_len; + bool msgbody_written = false; int this_offset = 0; int this_chunk = 0; this_header += scnprintf(buf + this_header, - sizeof(buf) - this_header, + MAX_PRINT_CHUNK - this_header, ",ncfrag=%d/%d;", offset, - body_len + userdata_len); + body_len); - /* Not all body data has been written yet */ - if (offset < body_len) { - this_chunk = min(body_len - offset, + /* Not all msgbody data has been written yet */ + if (offset < msgbody_len) { + this_chunk = min(msgbody_len - offset, MAX_PRINT_CHUNK - this_header); if (WARN_ON_ONCE(this_chunk <= 0)) return; - memcpy(buf + this_header, body + offset, this_chunk); + memcpy(buf + this_header, msgbody + offset, this_chunk); this_offset += this_chunk; } - /* Body is fully written and there is pending userdata to write, - * append userdata in this chunk + + /* msgbody was finally written, either in the previous + * messages and/or in the current buf. Time to write + * the userdata. */ - if (offset + this_offset >= body_len && - offset + this_offset < userdata_len + body_len) { - int sent_userdata = (offset + this_offset) - body_len; + msgbody_written |= offset + this_offset >= msgbody_len; + + /* Msg body is fully written and there is pending userdata to + * write, append userdata in this chunk + */ + if (msgbody_written && offset + this_offset < body_len) { + /* Track how much user data was already sent. First + * time here, sent_userdata is zero + */ + int sent_userdata = (offset + this_offset) - msgbody_len; + /* offset of bytes used in current buf */ int preceding_bytes = this_chunk + this_header; if (WARN_ON_ONCE(sent_userdata < 0)) @@ -1180,6 +1183,70 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, } } +static void send_msg_fragmented(struct netconsole_target *nt, + const char *msg, + int msg_len, + int release_len) +{ + static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */ + int header_len, msgbody_len; + const char *msgbody; + + /* need to insert extra header fields, detect header and msgbody */ + msgbody = memchr(msg, ';', msg_len); + if (WARN_ON_ONCE(!msgbody)) + return; + + header_len = msgbody - msg; + msgbody_len = msg_len - header_len - 1; + msgbody++; + + /* + * Transfer multiple chunks with the following extra header. + * "ncfrag=<byte-offset>/<total-bytes>" + */ + if (release_len) + append_release(buf); + + /* Copy the header into the buffer */ + memcpy(buf + release_len, msg, header_len); + header_len += release_len; + + /* for now on, the header will be persisted, and the msgbody + * will be replaced + */ + send_fragmented_body(nt, buf, msgbody, header_len, msgbody_len); +} + +/** + * send_ext_msg_udp - send extended log message to target + * @nt: target to send message to + * @msg: extended log message to send + * @msg_len: length of message + * + * Transfer extended log @msg to @nt. If @msg is longer than + * MAX_PRINT_CHUNK, it'll be split and transmitted in multiple chunks with + * ncfrag header field added to identify them. + */ +static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, + int msg_len) +{ + int userdata_len = 0; + int release_len = 0; + +#ifdef CONFIG_NETCONSOLE_DYNAMIC + userdata_len = nt->userdata_length; +#endif + + if (nt->release) + release_len = strlen(init_utsname()->release) + 1; + + if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK) + return send_msg_no_fragmentation(nt, msg, msg_len, release_len); + + return send_msg_fragmented(nt, msg, msg_len, release_len); +} + static void write_ext_msg(struct console *con, const char *msg, unsigned int len) { diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index 1436905bc106..5fe1eaef99b5 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -103,8 +103,10 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch) struct netdevsim *ns = netdev_priv(dev); int err; + mutex_lock(&dev->lock); err = netif_set_real_num_queues(dev, ch->combined_count, ch->combined_count); + mutex_unlock(&dev->lock); if (err) return err; diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index f0d58092e7e9..88187dd4eb2d 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -39,10 +39,14 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, if (!sap->used) continue; - p += scnprintf(p, bufsize - (p - buf), - "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", - i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], - sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + if (sap->xs->props.family == AF_INET6) + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=%pI6c\n", + i, (sap->rx ? 'r' : 't'), &sap->ipaddr); + else + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=%pI4\n", + i, (sap->rx ? 'r' : 't'), &sap->ipaddr[3]); p += scnprintf(p, bufsize - (p - buf), "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", i, be32_to_cpu(sap->xs->id.spi), @@ -176,14 +180,13 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs, return ret; } - if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) sa.rx = true; - if (xs->props.family == AF_INET6) - memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); - else - memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); - } + if (xs->props.family == AF_INET6) + memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); /* the preparations worked, so save the info */ memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa)); diff --git a/drivers/net/netdevsim/macsec.c b/drivers/net/netdevsim/macsec.c index aa007b1e4b78..bdc8020d588e 100644 --- a/drivers/net/netdevsim/macsec.c +++ b/drivers/net/netdevsim/macsec.c @@ -46,7 +46,7 @@ static int nsim_macsec_add_secy(struct macsec_context *ctx) return -ENOSPC; } - netdev_dbg(ctx->netdev, "%s: adding new secy with sci %08llx at index %d\n", + netdev_dbg(ctx->netdev, "%s: adding new secy with sci %016llx at index %d\n", __func__, sci_to_cpu(ctx->secy->sci), idx); ns->macsec.nsim_secy[idx].used = true; ns->macsec.nsim_secy[idx].nsim_rxsc_count = 0; @@ -63,12 +63,12 @@ static int nsim_macsec_upd_secy(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: updating secy with sci %08llx at index %d\n", + netdev_dbg(ctx->netdev, "%s: updating secy with sci %016llx at index %d\n", __func__, sci_to_cpu(ctx->secy->sci), idx); return 0; @@ -81,12 +81,12 @@ static int nsim_macsec_del_secy(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: removing SecY with SCI %08llx at index %d\n", + netdev_dbg(ctx->netdev, "%s: removing SecY with SCI %016llx at index %d\n", __func__, sci_to_cpu(ctx->secy->sci), idx); ns->macsec.nsim_secy[idx].used = false; @@ -104,7 +104,7 @@ static int nsim_macsec_add_rxsc(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } @@ -122,7 +122,7 @@ static int nsim_macsec_add_rxsc(struct macsec_context *ctx) netdev_err(ctx->netdev, "%s: nsim_rxsc_count not full but all RXSCs used\n", __func__); - netdev_dbg(ctx->netdev, "%s: adding new rxsc with sci %08llx at index %d\n", + netdev_dbg(ctx->netdev, "%s: adding new rxsc with sci %016llx at index %d\n", __func__, sci_to_cpu(ctx->rx_sc->sci), idx); secy->nsim_rxsc[idx].used = true; secy->nsim_rxsc[idx].sci = ctx->rx_sc->sci; @@ -139,7 +139,7 @@ static int nsim_macsec_upd_rxsc(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } @@ -147,12 +147,12 @@ static int nsim_macsec_upd_rxsc(struct macsec_context *ctx) idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in RXSC table\n", __func__, sci_to_cpu(ctx->rx_sc->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: updating RXSC with sci %08llx at index %d\n", + netdev_dbg(ctx->netdev, "%s: updating RXSC with sci %016llx at index %d\n", __func__, sci_to_cpu(ctx->rx_sc->sci), idx); return 0; @@ -166,7 +166,7 @@ static int nsim_macsec_del_rxsc(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } @@ -174,12 +174,12 @@ static int nsim_macsec_del_rxsc(struct macsec_context *ctx) idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in RXSC table\n", __func__, sci_to_cpu(ctx->rx_sc->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: removing RXSC with sci %08llx at index %d\n", + netdev_dbg(ctx->netdev, "%s: removing RXSC with sci %016llx at index %d\n", __func__, sci_to_cpu(ctx->rx_sc->sci), idx); secy->nsim_rxsc[idx].used = false; @@ -197,7 +197,7 @@ static int nsim_macsec_add_rxsa(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } @@ -205,12 +205,12 @@ static int nsim_macsec_add_rxsa(struct macsec_context *ctx) idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in RXSC table\n", __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n", + netdev_dbg(ctx->netdev, "%s: RXSC with sci %016llx, AN %u\n", __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num); return 0; @@ -224,7 +224,7 @@ static int nsim_macsec_upd_rxsa(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } @@ -232,12 +232,12 @@ static int nsim_macsec_upd_rxsa(struct macsec_context *ctx) idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in RXSC table\n", __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n", + netdev_dbg(ctx->netdev, "%s: RXSC with sci %016llx, AN %u\n", __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num); return 0; @@ -251,7 +251,7 @@ static int nsim_macsec_del_rxsa(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } @@ -259,12 +259,12 @@ static int nsim_macsec_del_rxsa(struct macsec_context *ctx) idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in RXSC table\n", __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n", + netdev_dbg(ctx->netdev, "%s: RXSC with sci %016llx, AN %u\n", __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num); return 0; @@ -277,12 +277,12 @@ static int nsim_macsec_add_txsa(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n", + netdev_dbg(ctx->netdev, "%s: SECY with sci %016llx, AN %u\n", __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num); return 0; @@ -295,12 +295,12 @@ static int nsim_macsec_upd_txsa(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n", + netdev_dbg(ctx->netdev, "%s: SECY with sci %016llx, AN %u\n", __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num); return 0; @@ -313,12 +313,12 @@ static int nsim_macsec_del_txsa(struct macsec_context *ctx) idx = nsim_macsec_find_secy(ns, ctx->secy->sci); if (idx < 0) { - netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + netdev_err(ctx->netdev, "%s: sci %016llx not found in secy table\n", __func__, sci_to_cpu(ctx->secy->sci)); return -ENOENT; } - netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n", + netdev_dbg(ctx->netdev, "%s: SECY with sci %016llx, AN %u\n", __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num); return 0; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 017a6102be0a..cad85bb0cf54 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -22,6 +22,7 @@ #include <net/netdev_queues.h> #include <net/page_pool/helpers.h> #include <net/netlink.h> +#include <net/net_shaper.h> #include <net/pkt_cls.h> #include <net/rtnetlink.h> #include <net/udp_tunnel.h> @@ -475,6 +476,43 @@ static int nsim_stop(struct net_device *dev) return 0; } +static int nsim_shaper_set(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static int nsim_shaper_del(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static int nsim_shaper_group(struct net_shaper_binding *binding, + int leaves_count, + const struct net_shaper *leaves, + const struct net_shaper *root, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static void nsim_shaper_cap(struct net_shaper_binding *binding, + enum net_shaper_scope scope, + unsigned long *flags) +{ + *flags = ULONG_MAX; +} + +static const struct net_shaper_ops nsim_shaper_ops = { + .set = nsim_shaper_set, + .delete = nsim_shaper_del, + .group = nsim_shaper_group, + .capabilities = nsim_shaper_cap, +}; + static const struct net_device_ops nsim_netdev_ops = { .ndo_start_xmit = nsim_start_xmit, .ndo_set_rx_mode = nsim_set_rx_mode, @@ -496,6 +534,7 @@ static const struct net_device_ops nsim_netdev_ops = { .ndo_bpf = nsim_bpf, .ndo_open = nsim_open, .ndo_stop = nsim_stop, + .net_shaper_ops = &nsim_shaper_ops, }; static const struct net_device_ops nsim_vf_netdev_ops = { diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 059269557d92..cd8360b9bbde 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -20,6 +20,7 @@ struct netkit { struct net_device __rcu *peer; struct bpf_mprog_entry __rcu *active; enum netkit_action policy; + enum netkit_scrub scrub; struct bpf_mprog_bundle bundle; /* Needed in slow-path */ @@ -50,12 +51,24 @@ netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb, return ret; } -static void netkit_prep_forward(struct sk_buff *skb, bool xnet) +static void netkit_xnet(struct sk_buff *skb) { - skb_scrub_packet(skb, xnet); skb->priority = 0; + skb->mark = 0; +} + +static void netkit_prep_forward(struct sk_buff *skb, + bool xnet, bool xnet_scrub) +{ + skb_scrub_packet(skb, false); nf_skip_egress(skb, true); skb_reset_mac_header(skb); + if (!xnet) + return; + ipvs_reset(skb); + skb_clear_tstamp(skb); + if (xnet_scrub) + netkit_xnet(skb); } static struct netkit *netkit_priv(const struct net_device *dev) @@ -80,7 +93,8 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) !pskb_may_pull(skb, ETH_HLEN) || skb_orphan_frags(skb, GFP_ATOMIC))) goto drop; - netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer))); + netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)), + nk->scrub); eth_skb_pkt_type(skb, peer); skb->dev = peer; entry = rcu_dereference(nk->active); @@ -297,20 +311,6 @@ static int netkit_check_policy(int policy, struct nlattr *tb, } } -static int netkit_check_mode(int mode, struct nlattr *tb, - struct netlink_ext_ack *extack) -{ - switch (mode) { - case NETKIT_L2: - case NETKIT_L3: - return 0; - default: - NL_SET_ERR_MSG_ATTR(extack, tb, - "Provided device mode can only be L2 or L3"); - return -EINVAL; - } -} - static int netkit_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { @@ -332,8 +332,10 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, struct netlink_ext_ack *extack) { struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr; - enum netkit_action default_prim = NETKIT_PASS; - enum netkit_action default_peer = NETKIT_PASS; + enum netkit_action policy_prim = NETKIT_PASS; + enum netkit_action policy_peer = NETKIT_PASS; + enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT; + enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT; enum netkit_mode mode = NETKIT_L3; unsigned char ifname_assign_type; struct ifinfomsg *ifmp = NULL; @@ -344,13 +346,8 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, int err; if (data) { - if (data[IFLA_NETKIT_MODE]) { - attr = data[IFLA_NETKIT_MODE]; - mode = nla_get_u32(attr); - err = netkit_check_mode(mode, attr, extack); - if (err < 0) - return err; - } + if (data[IFLA_NETKIT_MODE]) + mode = nla_get_u32(data[IFLA_NETKIT_MODE]); if (data[IFLA_NETKIT_PEER_INFO]) { attr = data[IFLA_NETKIT_PEER_INFO]; ifmp = nla_data(attr); @@ -362,17 +359,21 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, return err; tbp = peer_tb; } + if (data[IFLA_NETKIT_SCRUB]) + scrub_prim = nla_get_u32(data[IFLA_NETKIT_SCRUB]); + if (data[IFLA_NETKIT_PEER_SCRUB]) + scrub_peer = nla_get_u32(data[IFLA_NETKIT_PEER_SCRUB]); if (data[IFLA_NETKIT_POLICY]) { attr = data[IFLA_NETKIT_POLICY]; - default_prim = nla_get_u32(attr); - err = netkit_check_policy(default_prim, attr, extack); + policy_prim = nla_get_u32(attr); + err = netkit_check_policy(policy_prim, attr, extack); if (err < 0) return err; } if (data[IFLA_NETKIT_PEER_POLICY]) { attr = data[IFLA_NETKIT_PEER_POLICY]; - default_peer = nla_get_u32(attr); - err = netkit_check_policy(default_peer, attr, extack); + policy_peer = nla_get_u32(attr); + err = netkit_check_policy(policy_peer, attr, extack); if (err < 0) return err; } @@ -409,7 +410,8 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, nk = netkit_priv(peer); nk->primary = false; - nk->policy = default_peer; + nk->policy = policy_peer; + nk->scrub = scrub_peer; nk->mode = mode; bpf_mprog_bundle_init(&nk->bundle); @@ -434,7 +436,8 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, nk = netkit_priv(dev); nk->primary = true; - nk->policy = default_prim; + nk->policy = policy_prim; + nk->scrub = scrub_prim; nk->mode = mode; bpf_mprog_bundle_init(&nk->bundle); @@ -874,6 +877,18 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[], return -EACCES; } + if (data[IFLA_NETKIT_SCRUB]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB], + "netkit scrubbing cannot be changed after device creation"); + return -EACCES; + } + + if (data[IFLA_NETKIT_PEER_SCRUB]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB], + "netkit scrubbing cannot be changed after device creation"); + return -EACCES; + } + if (data[IFLA_NETKIT_PEER_INFO]) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO], "netkit peer info cannot be changed after device creation"); @@ -908,8 +923,10 @@ static size_t netkit_get_size(const struct net_device *dev) { return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */ nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */ - nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_SCRUB */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */ nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */ + nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */ 0; } @@ -924,11 +941,15 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev) return -EMSGSIZE; if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode)) return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub)) + return -EMSGSIZE; if (peer) { nk = netkit_priv(peer); if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy)) return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_PEER_SCRUB, nk->scrub)) + return -EMSGSIZE; } return 0; @@ -936,9 +957,11 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev) static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = { [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) }, + [IFLA_NETKIT_MODE] = NLA_POLICY_MAX(NLA_U32, NETKIT_L3), [IFLA_NETKIT_POLICY] = { .type = NLA_U32 }, - [IFLA_NETKIT_MODE] = { .type = NLA_U32 }, [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 }, + [IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT), + [IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT), [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT, .reject_message = "Primary attribute is read-only" }, }; diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c index d0a722d43368..61944574d087 100644 --- a/drivers/net/pcs/pcs-rzn1-miic.c +++ b/drivers/net/pcs/pcs-rzn1-miic.c @@ -552,7 +552,7 @@ static struct platform_driver miic_driver = { .of_match_table = miic_of_mtable, }, .probe = miic_probe, - .remove_new = miic_remove, + .remove = miic_remove, }; module_platform_driver(miic_driver); diff --git a/drivers/net/pcs/pcs-xpcs-nxp.c b/drivers/net/pcs/pcs-xpcs-nxp.c index d16fc58cd48d..e8efe94cf4ec 100644 --- a/drivers/net/pcs/pcs-xpcs-nxp.c +++ b/drivers/net/pcs/pcs-xpcs-nxp.c @@ -152,26 +152,18 @@ static int nxp_sja1110_pma_config(struct dw_xpcs *xpcs, /* Enable TX and RX PLLs and circuits. * Release reset of PMA to enable data flow to/from PCS. */ - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE); - if (ret < 0) - return ret; - - val = ret & ~(SJA1110_TXPLL_PD | SJA1110_TXPD | SJA1110_RXCH_PD | - SJA1110_RXBIAS_PD | SJA1110_RESET_SER_EN | - SJA1110_RESET_SER | SJA1110_RESET_DES); - val |= SJA1110_RXPKDETEN | SJA1110_RCVEN; - - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE, val); + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE, + SJA1110_TXPLL_PD | SJA1110_TXPD | SJA1110_RXCH_PD | + SJA1110_RXBIAS_PD | SJA1110_RESET_SER_EN | + SJA1110_RESET_SER | SJA1110_RESET_DES | + SJA1110_RXPKDETEN | SJA1110_RCVEN, + SJA1110_RXPKDETEN | SJA1110_RCVEN); if (ret < 0) return ret; /* Program continuous-time linear equalizer (CTLE) settings. */ - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_CDR_CTLE, - rx_cdr_ctle); - if (ret < 0) - return ret; - - return 0; + return xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_CDR_CTLE, + rx_cdr_ctle); } int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs) diff --git a/drivers/net/pcs/pcs-xpcs-wx.c b/drivers/net/pcs/pcs-xpcs-wx.c index 5f5cd3596cb8..fc52f7aa5f59 100644 --- a/drivers/net/pcs/pcs-xpcs-wx.c +++ b/drivers/net/pcs/pcs-xpcs-wx.c @@ -46,25 +46,23 @@ #define TXGBE_VCO_CAL_LD0 0x72 #define TXGBE_VCO_CAL_REF0 0x76 -static int txgbe_read_pma(struct dw_xpcs *xpcs, int reg) +static int txgbe_write_pma(struct dw_xpcs *xpcs, int reg, u16 val) { - return xpcs_read(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg); + return xpcs_write(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg, val); } -static int txgbe_write_pma(struct dw_xpcs *xpcs, int reg, u16 val) +static int txgbe_modify_pma(struct dw_xpcs *xpcs, int reg, u16 mask, u16 set) { - return xpcs_write(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg, val); + return xpcs_modify(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg, mask, + set); } static void txgbe_pma_config_10gbaser(struct dw_xpcs *xpcs) { - int val; - txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x21); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0); - val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); - val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); - txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); + txgbe_modify_pma(xpcs, TXGBE_TX_GENCTL1, TXGBE_TX_GENCTL1_VBOOST_LVL, + FIELD_PREP(TXGBE_TX_GENCTL1_VBOOST_LVL, 0x5)); txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); txgbe_write_pma(xpcs, TXGBE_VCO_CAL_LD0, 0x549); @@ -78,38 +76,29 @@ static void txgbe_pma_config_10gbaser(struct dw_xpcs *xpcs) txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_CTLE_POLE(2) | TXGBE_RX_EQ_CTL0_CTLE_BOOST(5)); - val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); - val &= ~TXGBE_RX_EQ_ATTN_LVL0; - txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + txgbe_modify_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, TXGBE_RX_EQ_ATTN_LVL0, 0); txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0xBE); - val = txgbe_read_pma(xpcs, TXGBE_AFE_DFE_ENABLE); - val &= ~(TXGBE_DFE_EN_0 | TXGBE_AFE_EN_0); - txgbe_write_pma(xpcs, TXGBE_AFE_DFE_ENABLE, val); - val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_CTL4); - val &= ~TXGBE_RX_EQ_CTL4_CONT_ADAPT0; - txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL4, val); + txgbe_modify_pma(xpcs, TXGBE_AFE_DFE_ENABLE, + TXGBE_DFE_EN_0 | TXGBE_AFE_EN_0, 0); + txgbe_modify_pma(xpcs, TXGBE_RX_EQ_CTL4, TXGBE_RX_EQ_CTL4_CONT_ADAPT0, + 0); } static void txgbe_pma_config_1g(struct dw_xpcs *xpcs) { - int val; - - val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); - val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); - val &= ~TXGBE_TX_GENCTL1_VBOOST_EN0; - txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); + txgbe_modify_pma(xpcs, TXGBE_TX_GENCTL1, + TXGBE_TX_GENCTL1_VBOOST_LVL | + TXGBE_TX_GENCTL1_VBOOST_EN0, + FIELD_PREP(TXGBE_TX_GENCTL1_VBOOST_LVL, 0x5)); txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_VGA1_GAIN(7) | TXGBE_RX_EQ_CTL0_VGA2_GAIN(7) | TXGBE_RX_EQ_CTL0_CTLE_BOOST(6)); - val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); - val &= ~TXGBE_RX_EQ_ATTN_LVL0; - txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + txgbe_modify_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, TXGBE_RX_EQ_ATTN_LVL0, 0); txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0); - val = txgbe_read_pma(xpcs, TXGBE_RX_GEN_CTL3); - val = u16_replace_bits(val, 0x4, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0); - txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL3, val); + txgbe_modify_pma(xpcs, TXGBE_RX_GEN_CTL3, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0, + FIELD_PREP(TXGBE_RX_GEN_CTL3_LOS_TRSHLD0, 0x4)); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x20); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0x46); @@ -172,7 +161,7 @@ static bool txgbe_xpcs_mode_quirk(struct dw_xpcs *xpcs) int txgbe_xpcs_switch_mode(struct dw_xpcs *xpcs, phy_interface_t interface) { - int val, ret; + int ret; switch (interface) { case PHY_INTERFACE_MODE_10GBASER: @@ -194,9 +183,8 @@ int txgbe_xpcs_switch_mode(struct dw_xpcs *xpcs, phy_interface_t interface) if (interface == PHY_INTERFACE_MODE_10GBASER) { xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBR); - val = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1); - val |= MDIO_CTRL1_SPEED10G; - xpcs_write(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, val); + xpcs_modify(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_SPEED10G, MDIO_CTRL1_SPEED10G); txgbe_pma_config_10gbaser(xpcs); } else { xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBX); diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 82463f9d50c8..7246a910728d 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -107,49 +107,9 @@ static const int xpcs_2500basex_features[] = { __ETHTOOL_LINK_MODE_MASK_NBITS, }; -static const phy_interface_t xpcs_usxgmii_interfaces[] = { - PHY_INTERFACE_MODE_USXGMII, -}; - -static const phy_interface_t xpcs_10gkr_interfaces[] = { - PHY_INTERFACE_MODE_10GKR, -}; - -static const phy_interface_t xpcs_xlgmii_interfaces[] = { - PHY_INTERFACE_MODE_XLGMII, -}; - -static const phy_interface_t xpcs_10gbaser_interfaces[] = { - PHY_INTERFACE_MODE_10GBASER, -}; - -static const phy_interface_t xpcs_sgmii_interfaces[] = { - PHY_INTERFACE_MODE_SGMII, -}; - -static const phy_interface_t xpcs_1000basex_interfaces[] = { - PHY_INTERFACE_MODE_1000BASEX, -}; - -static const phy_interface_t xpcs_2500basex_interfaces[] = { - PHY_INTERFACE_MODE_2500BASEX, -}; - -enum { - DW_XPCS_USXGMII, - DW_XPCS_10GKR, - DW_XPCS_XLGMII, - DW_XPCS_10GBASER, - DW_XPCS_SGMII, - DW_XPCS_1000BASEX, - DW_XPCS_2500BASEX, - DW_XPCS_INTERFACE_MAX, -}; - struct dw_xpcs_compat { + phy_interface_t interface; const int *supported; - const phy_interface_t *interface; - int num_interfaces; int an_mode; int (*pma_config)(struct dw_xpcs *xpcs); }; @@ -161,26 +121,28 @@ struct dw_xpcs_desc { }; static const struct dw_xpcs_compat * -xpcs_find_compat(const struct dw_xpcs_desc *desc, phy_interface_t interface) +xpcs_find_compat(struct dw_xpcs *xpcs, phy_interface_t interface) { - int i, j; + const struct dw_xpcs_compat *compat; - for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) { - const struct dw_xpcs_compat *compat = &desc->compat[i]; - - for (j = 0; j < compat->num_interfaces; j++) - if (compat->interface[j] == interface) - return compat; - } + for (compat = xpcs->desc->compat; compat->supported; compat++) + if (compat->interface == interface) + return compat; return NULL; } +struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs) +{ + return &xpcs->pcs; +} +EXPORT_SYMBOL_GPL(xpcs_to_phylink_pcs); + int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface) { const struct dw_xpcs_compat *compat; - compat = xpcs_find_compat(xpcs->desc, interface); + compat = xpcs_find_compat(xpcs, interface); if (!compat) return -ENODEV; @@ -213,6 +175,11 @@ int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val) return mdiodev_c45_write(xpcs->mdiodev, dev, reg, val); } +int xpcs_modify(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set) +{ + return mdiodev_c45_modify(xpcs->mdiodev, dev, reg, mask, set); +} + static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set) { @@ -230,6 +197,12 @@ static int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg, return xpcs_write(xpcs, dev, DW_VENDOR | reg, val); } +static int xpcs_modify_vendor(struct dw_xpcs *xpcs, int dev, int reg, u16 mask, + u16 set) +{ + return xpcs_modify(xpcs, dev, DW_VENDOR | reg, mask, set); +} + int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg) { return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg); @@ -240,20 +213,22 @@ int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val) return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val); } +static int xpcs_modify_vpcs(struct dw_xpcs *xpcs, int reg, u16 mask, u16 val) +{ + return xpcs_modify_vendor(xpcs, MDIO_MMD_PCS, reg, mask, val); +} + static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev) { - /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */ - unsigned int retries = 12; - int ret; + int ret, val; - do { - msleep(50); - ret = xpcs_read(xpcs, dev, MDIO_CTRL1); - if (ret < 0) - return ret; - } while (ret & MDIO_CTRL1_RESET && --retries); + ret = read_poll_timeout(xpcs_read, val, + val < 0 || !(val & BMCR_RESET), + 50000, 600000, true, xpcs, dev, MII_BMCR); + if (val < 0) + ret = val; - return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0; + return ret; } static int xpcs_soft_reset(struct dw_xpcs *xpcs, @@ -275,7 +250,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs, return -EINVAL; } - ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET); + ret = xpcs_write(xpcs, dev, MII_BMCR, BMCR_RESET); if (ret < 0) return ret; @@ -336,7 +311,7 @@ static int xpcs_read_fault_c73(struct dw_xpcs *xpcs, return 0; } -static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed) +static void xpcs_link_up_usxgmii(struct dw_xpcs *xpcs, int speed) { int ret, speed_sel; @@ -364,37 +339,25 @@ static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed) return; } - ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1); - if (ret < 0) - goto out; - - ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_EN); - if (ret < 0) - goto out; - - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); - if (ret < 0) - goto out; - - ret &= ~DW_USXGMII_SS_MASK; - ret |= speed_sel | DW_USXGMII_FULL; - - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret); + ret = xpcs_modify_vpcs(xpcs, MDIO_CTRL1, DW_USXGMII_EN, DW_USXGMII_EN); if (ret < 0) goto out; - ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1); + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, MII_BMCR, DW_USXGMII_SS_MASK, + speed_sel | DW_USXGMII_FULL); if (ret < 0) goto out; - ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST); + ret = xpcs_modify_vpcs(xpcs, MDIO_CTRL1, DW_USXGMII_RST, + DW_USXGMII_RST); if (ret < 0) goto out; return; out: - pr_err("%s: XPCS access returned %pe\n", __func__, ERR_PTR(ret)); + dev_err(&xpcs->mdiodev->dev, "%s: XPCS access returned %pe\n", + __func__, ERR_PTR(ret)); } static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs, @@ -451,13 +414,9 @@ static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs, if (ret < 0) return ret; - ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_CTRL1); - if (ret < 0) - return ret; - - ret |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART; - - return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret); + return xpcs_modify(xpcs, MDIO_MMD_AN, MDIO_CTRL1, + MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART, + MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs, @@ -592,7 +551,7 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported, int i; xpcs = phylink_pcs_to_xpcs(pcs); - compat = xpcs_find_compat(xpcs->desc, state->interface); + compat = xpcs_find_compat(xpcs, state->interface); if (!compat) return -EINVAL; @@ -610,62 +569,72 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported, void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces) { - int i, j; - - for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) { - const struct dw_xpcs_compat *compat = &xpcs->desc->compat[i]; + const struct dw_xpcs_compat *compat; - for (j = 0; j < compat->num_interfaces; j++) - __set_bit(compat->interface[j], interfaces); - } + for (compat = xpcs->desc->compat; compat->supported; compat++) + __set_bit(compat->interface, interfaces); } EXPORT_SYMBOL_GPL(xpcs_get_interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable) { + u16 mask, val; int ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0); - if (ret < 0) - return ret; + mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | + DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | + DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | + DW_VR_MII_EEE_MULT_FACT_100NS; - if (enable) { - /* Enable EEE */ - ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | + if (enable) + val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | - mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT; - } else { - ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | - DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | - DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | - DW_VR_MII_EEE_MULT_FACT_100NS); - } + FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS, + mult_fact_100ns); + else + val = 0; - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, ret); + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask, + val); if (ret < 0) return ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1); - if (ret < 0) - return ret; + return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, + DW_VR_MII_EEE_TRN_LPI, + enable ? DW_VR_MII_EEE_TRN_LPI : 0); +} +EXPORT_SYMBOL_GPL(xpcs_config_eee); - if (enable) - ret |= DW_VR_MII_EEE_TRN_LPI; - else - ret &= ~DW_VR_MII_EEE_TRN_LPI; +static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface) +{ + struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); + const struct dw_xpcs_compat *compat; + int ret; + + if (!xpcs->need_reset) + return; - return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret); + compat = xpcs_find_compat(xpcs, interface); + if (!compat) { + dev_err(&xpcs->mdiodev->dev, "unsupported interface %s\n", + phy_modes(interface)); + return; + } + + ret = xpcs_soft_reset(xpcs, compat); + if (ret) + dev_err(&xpcs->mdiodev->dev, "soft reset failed: %pe\n", + ERR_PTR(ret)); + + xpcs->need_reset = false; } -EXPORT_SYMBOL_GPL(xpcs_config_eee); static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int neg_mode) { int ret, mdio_ctrl, tx_conf; - - if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) - xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); + u16 mask, val; /* For AN for C37 SGMII mode, the settings are :- * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case @@ -677,63 +646,60 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, * speed/duplex mode change by HW after SGMII AN complete) * 5) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 1b (Enable SGMII AN) * + * Note that VR_MII_MMD_CTRL is MII_BMCR. + * * Note: Since it is MAC side SGMII, there is no need to set * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from * PHY about the link state change after C28 AN is completed * between PHY and Link Partner. There is also no need to * trigger AN restart for MAC-side SGMII. */ - mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL); + mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_BMCR); if (mdio_ctrl < 0) return mdio_ctrl; - if (mdio_ctrl & AN_CL37_EN) { - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, - mdio_ctrl & ~AN_CL37_EN); + if (mdio_ctrl & BMCR_ANENABLE) { + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MII_BMCR, + mdio_ctrl & ~BMCR_ANENABLE); if (ret < 0) return ret; } - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL); - if (ret < 0) - return ret; + mask = DW_VR_MII_PCS_MODE_MASK | DW_VR_MII_TX_CONFIG_MASK; + val = FIELD_PREP(DW_VR_MII_PCS_MODE_MASK, + DW_VR_MII_PCS_MODE_C37_SGMII); - ret &= ~(DW_VR_MII_PCS_MODE_MASK | DW_VR_MII_TX_CONFIG_MASK); - ret |= (DW_VR_MII_PCS_MODE_C37_SGMII << - DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT & - DW_VR_MII_PCS_MODE_MASK); if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { - ret |= DW_VR_MII_AN_CTRL_8BIT; + mask |= DW_VR_MII_AN_CTRL_8BIT; + val |= DW_VR_MII_AN_CTRL_8BIT; /* Hardware requires it to be PHY side SGMII */ tx_conf = DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII; } else { tx_conf = DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII; } - ret |= tx_conf << DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & - DW_VR_MII_TX_CONFIG_MASK; - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); - if (ret < 0) - return ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1); + val |= FIELD_PREP(DW_VR_MII_TX_CONFIG_MASK, tx_conf); + + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, mask, val); if (ret < 0) return ret; + mask = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) - ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; - else - ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; + val = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; - if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) - ret |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL; + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { + mask |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL; + val |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL; + } - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, mask, val); if (ret < 0) return ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, - mdio_ctrl | AN_CL37_EN); + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MII_BMCR, + mdio_ctrl | BMCR_ANENABLE); return ret; } @@ -745,34 +711,36 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, phy_interface_t interface = PHY_INTERFACE_MODE_1000BASEX; int ret, mdio_ctrl, adv; bool changed = 0; - - if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) - xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); + u16 mask, val; /* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must * be disabled first:- * 1) VR_MII_MMD_CTRL Bit(12)[AN_ENABLE] = 0b * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 00b (1000BASE-X C37) + * + * Note that VR_MII_MMD_CTRL is MII_BMCR. */ - mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL); + mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_BMCR); if (mdio_ctrl < 0) return mdio_ctrl; - if (mdio_ctrl & AN_CL37_EN) { - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, - mdio_ctrl & ~AN_CL37_EN); + if (mdio_ctrl & BMCR_ANENABLE) { + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MII_BMCR, + mdio_ctrl & ~BMCR_ANENABLE); if (ret < 0) return ret; } - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL); - if (ret < 0) - return ret; + mask = DW_VR_MII_PCS_MODE_MASK; + val = FIELD_PREP(DW_VR_MII_PCS_MODE_MASK, + DW_VR_MII_PCS_MODE_C37_1000BASEX); + + if (!xpcs->pcs.poll) { + mask |= DW_VR_MII_AN_INTR_EN; + val |= DW_VR_MII_AN_INTR_EN; + } - ret &= ~DW_VR_MII_PCS_MODE_MASK; - if (!xpcs->pcs.poll) - ret |= DW_VR_MII_AN_INTR_EN; - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, mask, val); if (ret < 0) return ret; @@ -796,8 +764,8 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, return ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, - mdio_ctrl | AN_CL37_EN); + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MII_BMCR, + mdio_ctrl | BMCR_ANENABLE); if (ret < 0) return ret; } @@ -809,31 +777,26 @@ static int xpcs_config_2500basex(struct dw_xpcs *xpcs) { int ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1); - if (ret < 0) - return ret; - ret |= DW_VR_MII_DIG_CTRL1_2G5_EN; - ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, + DW_VR_MII_DIG_CTRL1_2G5_EN | + DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW, + DW_VR_MII_DIG_CTRL1_2G5_EN); if (ret < 0) return ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL); - if (ret < 0) - return ret; - ret &= ~AN_CL37_EN; - ret |= SGMII_SPEED_SS6; - ret &= ~SGMII_SPEED_SS13; - return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, ret); + return xpcs_modify(xpcs, MDIO_MMD_VEND2, MII_BMCR, + BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_SPEED100, + BMCR_SPEED1000); } -int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, - const unsigned long *advertising, unsigned int neg_mode) +static int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, + const unsigned long *advertising, + unsigned int neg_mode) { const struct dw_xpcs_compat *compat; int ret; - compat = xpcs_find_compat(xpcs->desc, interface); + compat = xpcs_find_compat(xpcs, interface); if (!compat) return -ENODEV; @@ -841,6 +804,14 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, ret = txgbe_xpcs_switch_mode(xpcs, interface); if (ret) return ret; + + /* Wangxun devices need backplane CL37 AN enabled for + * SGMII and 1000base-X + */ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX) + xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, + DW_CL37_BP | DW_EN_VSMMD1); } switch (compat->an_mode) { @@ -881,7 +852,6 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, return 0; } -EXPORT_SYMBOL_GPL(xpcs_do_config); static int xpcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, @@ -989,8 +959,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs, state->link = true; - speed_value = (ret & DW_VR_MII_AN_STS_C37_ANSGM_SP) >> - DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT; + speed_value = FIELD_GET(DW_VR_MII_AN_STS_C37_ANSGM_SP, ret); if (speed_value == DW_VR_MII_C37_ANSGM_SP_1000) state->speed = SPEED_1000; else if (speed_value == DW_VR_MII_C37_ANSGM_SP_100) @@ -1007,14 +976,14 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs, state->link = true; - speed = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); + speed = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_BMCR); if (speed < 0) return speed; - speed &= SGMII_SPEED_SS13 | SGMII_SPEED_SS6; - if (speed == SGMII_SPEED_SS6) + speed &= BMCR_SPEED100 | BMCR_SPEED1000; + if (speed == BMCR_SPEED1000) state->speed = SPEED_1000; - else if (speed == SGMII_SPEED_SS13) + else if (speed == BMCR_SPEED100) state->speed = SPEED_100; else if (speed == 0) state->speed = SPEED_10; @@ -1023,9 +992,9 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs, if (duplex < 0) return duplex; - if (duplex & DW_FULL_DUPLEX) + if (duplex & ADVERTISE_1000XFULL) state->duplex = DUPLEX_FULL; - else if (duplex & DW_HALF_DUPLEX) + else if (duplex & ADVERTISE_1000XHALF) state->duplex = DUPLEX_HALF; xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, 0); @@ -1074,13 +1043,13 @@ static int xpcs_get_state_2500basex(struct dw_xpcs *xpcs, { int ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_STS); + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_BMSR); if (ret < 0) { state->link = 0; return ret; } - state->link = !!(ret & DW_VR_MII_MMD_STS_LINK_STS); + state->link = !!(ret & BMSR_LSTATUS); if (!state->link) return 0; @@ -1098,7 +1067,7 @@ static void xpcs_get_state(struct phylink_pcs *pcs, const struct dw_xpcs_compat *compat; int ret; - compat = xpcs_find_compat(xpcs->desc, state->interface); + compat = xpcs_find_compat(xpcs, state->interface); if (!compat) return; @@ -1108,108 +1077,94 @@ static void xpcs_get_state(struct phylink_pcs *pcs, break; case DW_AN_C73: ret = xpcs_get_state_c73(xpcs, state, compat); - if (ret) { - pr_err("xpcs_get_state_c73 returned %pe\n", - ERR_PTR(ret)); - return; - } + if (ret) + dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n", + "xpcs_get_state_c73", ERR_PTR(ret)); break; case DW_AN_C37_SGMII: ret = xpcs_get_state_c37_sgmii(xpcs, state); - if (ret) { - pr_err("xpcs_get_state_c37_sgmii returned %pe\n", - ERR_PTR(ret)); - } + if (ret) + dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n", + "xpcs_get_state_c37_sgmii", ERR_PTR(ret)); break; case DW_AN_C37_1000BASEX: ret = xpcs_get_state_c37_1000basex(xpcs, state); - if (ret) { - pr_err("xpcs_get_state_c37_1000basex returned %pe\n", - ERR_PTR(ret)); - } + if (ret) + dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n", + "xpcs_get_state_c37_1000basex", ERR_PTR(ret)); break; case DW_2500BASEX: ret = xpcs_get_state_2500basex(xpcs, state); - if (ret) { - pr_err("xpcs_get_state_2500basex returned %pe\n", - ERR_PTR(ret)); - } + if (ret) + dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n", + "xpcs_get_state_2500basex", ERR_PTR(ret)); break; default: return; } } -static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int neg_mode, - int speed, int duplex) +static void xpcs_link_up_sgmii_1000basex(struct dw_xpcs *xpcs, + unsigned int neg_mode, + phy_interface_t interface, + int speed, int duplex) { - int val, ret; + int ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) return; - val = mii_bmcr_encode_fixed(speed, duplex); - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val); - if (ret) - pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret)); -} - -static void xpcs_link_up_1000basex(struct dw_xpcs *xpcs, unsigned int neg_mode, - int speed, int duplex) -{ - int val, ret; - - if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) - return; + if (interface == PHY_INTERFACE_MODE_1000BASEX) { + if (speed != SPEED_1000) { + dev_err(&xpcs->mdiodev->dev, + "%s: speed %dMbps not supported\n", + __func__, speed); + return; + } - switch (speed) { - case SPEED_1000: - val = BMCR_SPEED1000; - break; - case SPEED_100: - case SPEED_10: - default: - pr_err("%s: speed = %d\n", __func__, speed); - return; + if (duplex != DUPLEX_FULL) + dev_err(&xpcs->mdiodev->dev, + "%s: half duplex not supported\n", + __func__); } - if (duplex == DUPLEX_FULL) - val |= BMCR_FULLDPLX; - else - pr_err("%s: half duplex not supported\n", __func__); - - ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val); + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MII_BMCR, + mii_bmcr_encode_fixed(speed, duplex)); if (ret) - pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret)); + dev_err(&xpcs->mdiodev->dev, "%s: xpcs_write returned %pe\n", + __func__, ERR_PTR(ret)); } -void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, - phy_interface_t interface, int speed, int duplex) +static void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, + phy_interface_t interface, int speed, int duplex) { struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); - if (interface == PHY_INTERFACE_MODE_USXGMII) - return xpcs_config_usxgmii(xpcs, speed); - if (interface == PHY_INTERFACE_MODE_SGMII) - return xpcs_link_up_sgmii(xpcs, neg_mode, speed, duplex); - if (interface == PHY_INTERFACE_MODE_1000BASEX) - return xpcs_link_up_1000basex(xpcs, neg_mode, speed, duplex); + switch (interface) { + case PHY_INTERFACE_MODE_USXGMII: + xpcs_link_up_usxgmii(xpcs, speed); + break; + + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + xpcs_link_up_sgmii_1000basex(xpcs, neg_mode, interface, speed, + duplex); + break; + + default: + break; + } } -EXPORT_SYMBOL_GPL(xpcs_link_up); static void xpcs_an_restart(struct phylink_pcs *pcs) { struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); - int ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); - if (ret >= 0) { - ret |= BMCR_ANRESTART; - xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret); - } + xpcs_modify(xpcs, MDIO_MMD_VEND2, MII_BMCR, BMCR_ANRESTART, + BMCR_ANRESTART); } -static int xpcs_get_id(struct dw_xpcs *xpcs) +static int xpcs_read_ids(struct dw_xpcs *xpcs) { int ret; u32 id; @@ -1275,76 +1230,62 @@ static int xpcs_get_id(struct dw_xpcs *xpcs) return 0; } -static const struct dw_xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { - [DW_XPCS_USXGMII] = { +static const struct dw_xpcs_compat synopsys_xpcs_compat[] = { + { + .interface = PHY_INTERFACE_MODE_USXGMII, .supported = xpcs_usxgmii_features, - .interface = xpcs_usxgmii_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_usxgmii_interfaces), .an_mode = DW_AN_C73, - }, - [DW_XPCS_10GKR] = { + }, { + .interface = PHY_INTERFACE_MODE_10GKR, .supported = xpcs_10gkr_features, - .interface = xpcs_10gkr_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_10gkr_interfaces), .an_mode = DW_AN_C73, - }, - [DW_XPCS_XLGMII] = { + }, { + .interface = PHY_INTERFACE_MODE_XLGMII, .supported = xpcs_xlgmii_features, - .interface = xpcs_xlgmii_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_xlgmii_interfaces), .an_mode = DW_AN_C73, - }, - [DW_XPCS_10GBASER] = { + }, { + .interface = PHY_INTERFACE_MODE_10GBASER, .supported = xpcs_10gbaser_features, - .interface = xpcs_10gbaser_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_10gbaser_interfaces), .an_mode = DW_10GBASER, - }, - [DW_XPCS_SGMII] = { + }, { + .interface = PHY_INTERFACE_MODE_SGMII, .supported = xpcs_sgmii_features, - .interface = xpcs_sgmii_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces), .an_mode = DW_AN_C37_SGMII, - }, - [DW_XPCS_1000BASEX] = { + }, { + .interface = PHY_INTERFACE_MODE_1000BASEX, .supported = xpcs_1000basex_features, - .interface = xpcs_1000basex_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_1000basex_interfaces), .an_mode = DW_AN_C37_1000BASEX, - }, - [DW_XPCS_2500BASEX] = { + }, { + .interface = PHY_INTERFACE_MODE_2500BASEX, .supported = xpcs_2500basex_features, - .interface = xpcs_2500basex_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces), .an_mode = DW_2500BASEX, - }, + }, { + } }; -static const struct dw_xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { - [DW_XPCS_SGMII] = { +static const struct dw_xpcs_compat nxp_sja1105_xpcs_compat[] = { + { + .interface = PHY_INTERFACE_MODE_SGMII, .supported = xpcs_sgmii_features, - .interface = xpcs_sgmii_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces), .an_mode = DW_AN_C37_SGMII, .pma_config = nxp_sja1105_sgmii_pma_config, - }, + }, { + } }; -static const struct dw_xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { - [DW_XPCS_SGMII] = { +static const struct dw_xpcs_compat nxp_sja1110_xpcs_compat[] = { + { + .interface = PHY_INTERFACE_MODE_SGMII, .supported = xpcs_sgmii_features, - .interface = xpcs_sgmii_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces), .an_mode = DW_AN_C37_SGMII, .pma_config = nxp_sja1110_sgmii_pma_config, - }, - [DW_XPCS_2500BASEX] = { + }, { + .interface = PHY_INTERFACE_MODE_2500BASEX, .supported = xpcs_2500basex_features, - .interface = xpcs_2500basex_interfaces, - .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces), .an_mode = DW_2500BASEX, .pma_config = nxp_sja1110_2500basex_pma_config, - }, + }, { + } }; static const struct dw_xpcs_desc xpcs_desc_list[] = { @@ -1365,12 +1306,33 @@ static const struct dw_xpcs_desc xpcs_desc_list[] = { static const struct phylink_pcs_ops xpcs_phylink_ops = { .pcs_validate = xpcs_validate, + .pcs_pre_config = xpcs_pre_config, .pcs_config = xpcs_config, .pcs_get_state = xpcs_get_state, .pcs_an_restart = xpcs_an_restart, .pcs_link_up = xpcs_link_up, }; +static int xpcs_identify(struct dw_xpcs *xpcs) +{ + int i, ret; + + ret = xpcs_read_ids(xpcs); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(xpcs_desc_list); i++) { + const struct dw_xpcs_desc *entry = &xpcs_desc_list[i]; + + if ((xpcs->info.pcs & entry->mask) == entry->id) { + xpcs->desc = entry; + return 0; + } + } + + return -ENODEV; +} + static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev) { struct dw_xpcs *xpcs; @@ -1427,7 +1389,6 @@ static void xpcs_clear_clks(struct dw_xpcs *xpcs) static int xpcs_init_id(struct dw_xpcs *xpcs) { const struct dw_xpcs_info *info; - int i, ret; info = dev_get_platdata(&xpcs->mdiodev->dev); if (!info) { @@ -1437,45 +1398,10 @@ static int xpcs_init_id(struct dw_xpcs *xpcs) xpcs->info = *info; } - ret = xpcs_get_id(xpcs); - if (ret < 0) - return ret; - - for (i = 0; i < ARRAY_SIZE(xpcs_desc_list); i++) { - const struct dw_xpcs_desc *desc = &xpcs_desc_list[i]; - - if ((xpcs->info.pcs & desc->mask) != desc->id) - continue; - - xpcs->desc = desc; - - break; - } - - if (!xpcs->desc) - return -ENODEV; - - return 0; + return xpcs_identify(xpcs); } -static int xpcs_init_iface(struct dw_xpcs *xpcs, phy_interface_t interface) -{ - const struct dw_xpcs_compat *compat; - - compat = xpcs_find_compat(xpcs->desc, interface); - if (!compat) - return -EINVAL; - - if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { - xpcs->pcs.poll = false; - return 0; - } - - return xpcs_soft_reset(xpcs, compat); -} - -static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, - phy_interface_t interface) +static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev) { struct dw_xpcs *xpcs; int ret; @@ -1492,9 +1418,10 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, if (ret) goto out_clear_clks; - ret = xpcs_init_iface(xpcs, interface); - if (ret) - goto out_clear_clks; + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) + xpcs->pcs.poll = false; + else + xpcs->need_reset = true; return xpcs; @@ -1511,14 +1438,12 @@ out_free_data: * xpcs_create_mdiodev() - create a DW xPCS instance with the MDIO @addr * @bus: pointer to the MDIO-bus descriptor for the device to be looked at * @addr: device MDIO-bus ID - * @interface: requested PHY interface * * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if * the PCS device couldn't be found on the bus and other negative errno related * to the data allocation and MDIO-bus communications. */ -struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, - phy_interface_t interface) +struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr) { struct mdio_device *mdiodev; struct dw_xpcs *xpcs; @@ -1527,7 +1452,7 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, if (IS_ERR(mdiodev)) return ERR_CAST(mdiodev); - xpcs = xpcs_create(mdiodev, interface); + xpcs = xpcs_create(mdiodev); /* xpcs_create() has taken a refcount on the mdiodev if it was * successful. If xpcs_create() fails, this will free the mdio @@ -1541,10 +1466,21 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, } EXPORT_SYMBOL_GPL(xpcs_create_mdiodev); +struct phylink_pcs *xpcs_create_pcs_mdiodev(struct mii_bus *bus, int addr) +{ + struct dw_xpcs *xpcs; + + xpcs = xpcs_create_mdiodev(bus, addr); + if (IS_ERR(xpcs)) + return ERR_CAST(xpcs); + + return &xpcs->pcs; +} +EXPORT_SYMBOL_GPL(xpcs_create_pcs_mdiodev); + /** * xpcs_create_fwnode() - Create a DW xPCS instance from @fwnode * @fwnode: fwnode handle poining to the DW XPCS device - * @interface: requested PHY interface * * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if * the fwnode device is unavailable or the PCS device couldn't be found on the @@ -1552,8 +1488,7 @@ EXPORT_SYMBOL_GPL(xpcs_create_mdiodev); * other negative errno related to the data allocations and MDIO-bus * communications. */ -struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, - phy_interface_t interface) +struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode) { struct mdio_device *mdiodev; struct dw_xpcs *xpcs; @@ -1565,7 +1500,7 @@ struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, if (!mdiodev) return ERR_PTR(-EPROBE_DEFER); - xpcs = xpcs_create(mdiodev, interface); + xpcs = xpcs_create(mdiodev); /* xpcs_create() has taken a refcount on the mdiodev if it was * successful. If xpcs_create() fails, this will free the mdio @@ -1590,5 +1525,11 @@ void xpcs_destroy(struct dw_xpcs *xpcs) } EXPORT_SYMBOL_GPL(xpcs_destroy); +void xpcs_destroy_pcs(struct phylink_pcs *pcs) +{ + xpcs_destroy(phylink_pcs_to_xpcs(pcs)); +} +EXPORT_SYMBOL_GPL(xpcs_destroy_pcs); + MODULE_DESCRIPTION("Synopsys DesignWare XPCS library"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index fa05adfae220..adc5a0b3c883 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -54,14 +54,9 @@ /* Clause 37 Defines */ /* VR MII MMD registers offsets */ -#define DW_VR_MII_MMD_CTRL 0x0000 -#define DW_VR_MII_MMD_STS 0x0001 -#define DW_VR_MII_MMD_STS_LINK_STS BIT(2) #define DW_VR_MII_DIG_CTRL1 0x8000 #define DW_VR_MII_AN_CTRL 0x8001 #define DW_VR_MII_AN_INTR_STS 0x8002 -/* Enable 2.5G Mode */ -#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2) /* EEE Mode Control Register */ #define DW_VR_MII_EEE_MCTRL0 0x8006 #define DW_VR_MII_EEE_MCTRL1 0x800b @@ -69,6 +64,7 @@ /* VR_MII_DIG_CTRL1 */ #define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) +#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2) #define DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL BIT(0) /* VR_MII_DIG_CTRL2 */ @@ -77,11 +73,9 @@ /* VR_MII_AN_CTRL */ #define DW_VR_MII_AN_CTRL_8BIT BIT(8) -#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 #define DW_VR_MII_TX_CONFIG_MASK BIT(3) #define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 #define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0 -#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1 #define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1) #define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0 #define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 @@ -90,22 +84,12 @@ /* VR_MII_AN_INTR_STS */ #define DW_VR_MII_AN_STS_C37_ANCMPLT_INTR BIT(0) #define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) -#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2 #define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) #define DW_VR_MII_C37_ANSGM_SP_10 0x0 #define DW_VR_MII_C37_ANSGM_SP_100 0x1 #define DW_VR_MII_C37_ANSGM_SP_1000 0x2 #define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4) -/* SR MII MMD Control defines */ -#define AN_CL37_EN BIT(12) /* Enable Clause 37 auto-nego */ -#define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */ -#define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */ - -/* SR MII MMD AN Advertisement defines */ -#define DW_HALF_DUPLEX BIT(6) -#define DW_FULL_DUPLEX BIT(5) - /* VR MII EEE Control 0 defines */ #define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */ #define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */ @@ -114,7 +98,6 @@ #define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */ #define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */ -#define DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT 8 #define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8) /* VR MII EEE Control 1 defines */ @@ -123,8 +106,27 @@ #define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \ static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma } +struct dw_xpcs_desc; + +enum dw_xpcs_clock { + DW_XPCS_CORE_CLK, + DW_XPCS_PAD_CLK, + DW_XPCS_NUM_CLKS, +}; + +struct dw_xpcs { + struct dw_xpcs_info info; + const struct dw_xpcs_desc *desc; + struct mdio_device *mdiodev; + struct clk_bulk_data clks[DW_XPCS_NUM_CLKS]; + struct phylink_pcs pcs; + phy_interface_t interface; + bool need_reset; +}; + int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg); int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val); +int xpcs_modify(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set); int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg); int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val); int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 01b235b3bb7e..ee3ea0b56d48 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -292,8 +292,8 @@ config MICREL_PHY config MICROCHIP_T1S_PHY tristate "Microchip 10BASE-T1S Ethernet PHYs" help - Currently supports the LAN8670/1/2 Rev.B1 and LAN8650/1 Rev.B0 Internal - PHYs. + Currently supports the LAN8670/1/2 Rev.B1/C1/C2 and + LAN8650/1 Rev.B0/B1 Internal PHYs. config MICROCHIP_PHY tristate "Microchip PHYs" diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h index 2465345081f8..0c78bfabace5 100644 --- a/drivers/net/phy/aquantia/aquantia.h +++ b/drivers/net/phy/aquantia/aquantia.h @@ -177,6 +177,7 @@ static const struct aqr107_hw_stat aqr107_hw_stats[] = { struct aqr107_priv { u64 sgmii_stats[AQR107_SGMII_STAT_SZ]; unsigned long leds_active_low; + unsigned long leds_active_high; }; #if IS_REACHABLE(CONFIG_HWMON) diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c index 201c8df93fad..00ad2313fed3 100644 --- a/drivers/net/phy/aquantia/aquantia_leds.c +++ b/drivers/net/phy/aquantia/aquantia_leds.c @@ -121,13 +121,13 @@ int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable { return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index), VEND1_GLOBAL_LED_DRIVE_VDD, - enable ? VEND1_GLOBAL_LED_DRIVE_VDD : 0); + enable ? 0 : VEND1_GLOBAL_LED_DRIVE_VDD); } int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes) { + bool force_active_low = false, force_active_high = false; struct aqr107_priv *priv = phydev->priv; - bool active_low = false; u32 mode; if (index >= AQR_MAX_LEDS) @@ -136,7 +136,10 @@ int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) { switch (mode) { case PHY_LED_ACTIVE_LOW: - active_low = true; + force_active_low = true; + break; + case PHY_LED_ACTIVE_HIGH: + force_active_high = true; break; default: return -EINVAL; @@ -144,8 +147,14 @@ int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long } /* Save LED driver vdd state to restore on SW reset */ - if (active_low) + if (force_active_low) priv->leds_active_low |= BIT(index); - return aqr_phy_led_active_low_set(phydev, index, active_low); + if (force_active_high) + priv->leds_active_high |= BIT(index); + + if (force_active_high || force_active_low) + return aqr_phy_led_active_low_set(phydev, index, force_active_low); + + unreachable(); } diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c index c33a5ef34ba0..38d0dd5c80a4 100644 --- a/drivers/net/phy/aquantia/aquantia_main.c +++ b/drivers/net/phy/aquantia/aquantia_main.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/bitfield.h> +#include <linux/of.h> #include <linux/phy.h> #include "aquantia.h" @@ -41,6 +42,7 @@ #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI 4 #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6 #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI 7 +#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF 9 #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10 #define MDIO_AN_VEND_PROV 0xc400 @@ -71,6 +73,11 @@ #define MDIO_AN_TX_VEND_INT_MASK2 0xd401 #define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0) +#define PMAPMD_RSVD_VEND_PROV 0xe400 +#define PMAPMD_RSVD_VEND_PROV_MDI_CONF GENMASK(1, 0) +#define PMAPMD_RSVD_VEND_PROV_MDI_REVERSE BIT(0) +#define PMAPMD_RSVD_VEND_PROV_MDI_FORCE BIT(1) + #define MDIO_AN_RX_LP_STAT1 0xe820 #define MDIO_AN_RX_LP_STAT1_1000BASET_FULL BIT(15) #define MDIO_AN_RX_LP_STAT1_1000BASET_HALF BIT(14) @@ -342,9 +349,19 @@ static int aqr107_read_status(struct phy_device *phydev) if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE) return 0; - val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS); - if (val < 0) - return val; + /** + * The status register is not immediately correct on line side link up. + * Poll periodically until it reflects the correct ON state. + * Only return fail for read error, timeout defaults to OFF state. + */ + ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PHYXS, + MDIO_PHYXS_VEND_IF_STATUS, val, + (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val) != + MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF), + AQR107_OP_IN_PROG_SLEEP, + AQR107_OP_IN_PROG_TIMEOUT, false); + if (ret && ret != -ETIMEDOUT) + return ret; switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) { case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR: @@ -371,7 +388,9 @@ static int aqr107_read_status(struct phy_device *phydev) case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII: phydev->interface = PHY_INTERFACE_MODE_2500BASEX; break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF: default: + phydev->link = false; phydev->interface = PHY_INTERFACE_MODE_NA; break; } @@ -485,10 +504,33 @@ static void aqr107_chip_info(struct phy_device *phydev) fw_major, fw_minor, build_id, prov_id); } +static int aqr107_config_mdi(struct phy_device *phydev) +{ + struct device_node *np = phydev->mdio.dev.of_node; + u32 mdi_conf; + int ret; + + ret = of_property_read_u32(np, "marvell,mdi-cfg-order", &mdi_conf); + + /* Do nothing in case property "marvell,mdi-cfg-order" is not present */ + if (ret == -EINVAL || ret == -ENOSYS) + return 0; + + if (ret) + return ret; + + if (mdi_conf & ~PMAPMD_RSVD_VEND_PROV_MDI_REVERSE) + return -EINVAL; + + return phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_RSVD_VEND_PROV, + PMAPMD_RSVD_VEND_PROV_MDI_CONF, + mdi_conf | PMAPMD_RSVD_VEND_PROV_MDI_FORCE); +} + static int aqr107_config_init(struct phy_device *phydev) { struct aqr107_priv *priv = phydev->priv; - u32 led_active_low; + u32 led_idx; int ret; /* Check that the PHY interface type is compatible */ @@ -514,9 +556,19 @@ static int aqr107_config_init(struct phy_device *phydev) if (ret) return ret; + ret = aqr107_config_mdi(phydev); + if (ret) + return ret; + /* Restore LED polarity state after reset */ - for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) { - ret = aqr_phy_led_active_low_set(phydev, led_active_low, true); + for_each_set_bit(led_idx, &priv->leds_active_low, AQR_MAX_LEDS) { + ret = aqr_phy_led_active_low_set(phydev, led_idx, true); + if (ret) + return ret; + } + + for_each_set_bit(led_idx, &priv->leds_active_high, AQR_MAX_LEDS) { + ret = aqr_phy_led_active_low_set(phydev, led_idx, false); if (ret) return ret; } diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index 3c032868ef04..b672c55a7a4e 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -151,6 +151,13 @@ #define XWAY_MMD_LED3H 0x01E8 #define XWAY_MMD_LED3L 0x01E9 +#define XWAY_GPHY_MAX_LEDS 3 +#define XWAY_GPHY_LED_INV(idx) BIT(12 + (idx)) +#define XWAY_GPHY_LED_EN(idx) BIT(8 + (idx)) +#define XWAY_GPHY_LED_DA(idx) BIT(idx) +#define XWAY_MMD_LEDxH(idx) (XWAY_MMD_LED0H + 2 * (idx)) +#define XWAY_MMD_LEDxL(idx) (XWAY_MMD_LED0L + 2 * (idx)) + #define PHY_ID_PHY11G_1_3 0x030260D1 #define PHY_ID_PHY22F_1_3 0x030260E1 #define PHY_ID_PHY11G_1_4 0xD565A400 @@ -229,20 +236,12 @@ static int xway_gphy_rgmii_init(struct phy_device *phydev) XWAY_MDIO_MIICTRL_TXSKEW_MASK, val); } -static int xway_gphy_config_init(struct phy_device *phydev) +static int xway_gphy_init_leds(struct phy_device *phydev) { int err; u32 ledxh; u32 ledxl; - /* Mask all interrupts */ - err = phy_write(phydev, XWAY_MDIO_IMASK, 0); - if (err) - return err; - - /* Clear all pending interrupts */ - phy_read(phydev, XWAY_MDIO_ISTAT); - /* Ensure that integrated led function is enabled for all leds */ err = phy_write(phydev, XWAY_MDIO_LED, XWAY_MDIO_LED_LED0_EN | @@ -276,6 +275,26 @@ static int xway_gphy_config_init(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2H, ledxh); phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2L, ledxl); + return 0; +} + +static int xway_gphy_config_init(struct phy_device *phydev) +{ + struct device_node *np = phydev->mdio.dev.of_node; + int err; + + /* Mask all interrupts */ + err = phy_write(phydev, XWAY_MDIO_IMASK, 0); + if (err) + return err; + + /* Use default LED configuration if 'leds' node isn't defined */ + if (!of_get_child_by_name(np, "leds")) + xway_gphy_init_leds(phydev); + + /* Clear all pending interrupts */ + phy_read(phydev, XWAY_MDIO_ISTAT); + err = xway_gphy_rgmii_init(phydev); if (err) return err; @@ -347,6 +366,172 @@ static irqreturn_t xway_gphy_handle_interrupt(struct phy_device *phydev) return IRQ_HANDLED; } +static int xway_gphy_led_brightness_set(struct phy_device *phydev, + u8 index, enum led_brightness value) +{ + int ret; + + if (index >= XWAY_GPHY_MAX_LEDS) + return -EINVAL; + + /* clear EN and set manual LED state */ + ret = phy_modify(phydev, XWAY_MDIO_LED, + ((value == LED_OFF) ? XWAY_GPHY_LED_EN(index) : 0) | + XWAY_GPHY_LED_DA(index), + (value == LED_OFF) ? 0 : XWAY_GPHY_LED_DA(index)); + if (ret) + return ret; + + /* clear HW LED setup */ + if (value == LED_OFF) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDxH(index), 0); + if (ret) + return ret; + + return phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDxL(index), 0); + } else { + return 0; + } +} + +static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)); + +static int xway_gphy_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + if (index >= XWAY_GPHY_MAX_LEDS) + return -EINVAL; + + /* activity triggers are not possible without combination with a link + * trigger. + */ + if (rules & (BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX)) && + !(rules & (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000)))) + return -EOPNOTSUPP; + + /* All other combinations of the supported triggers are allowed */ + if (rules & ~supported_triggers) + return -EOPNOTSUPP; + + return 0; +} + +static int xway_gphy_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int lval, hval; + + if (index >= XWAY_GPHY_MAX_LEDS) + return -EINVAL; + + hval = phy_read_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDxH(index)); + if (hval < 0) + return hval; + + lval = phy_read_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDxL(index)); + if (lval < 0) + return lval; + + if (hval & XWAY_MMD_LEDxH_CON_LINK10) + *rules |= BIT(TRIGGER_NETDEV_LINK_10); + + if (hval & XWAY_MMD_LEDxH_CON_LINK100) + *rules |= BIT(TRIGGER_NETDEV_LINK_100); + + if (hval & XWAY_MMD_LEDxH_CON_LINK1000) + *rules |= BIT(TRIGGER_NETDEV_LINK_1000); + + if ((hval & XWAY_MMD_LEDxH_CON_LINK10) && + (hval & XWAY_MMD_LEDxH_CON_LINK100) && + (hval & XWAY_MMD_LEDxH_CON_LINK1000)) + *rules |= BIT(TRIGGER_NETDEV_LINK); + + if (lval & XWAY_MMD_LEDxL_PULSE_TXACT) + *rules |= BIT(TRIGGER_NETDEV_TX); + + if (lval & XWAY_MMD_LEDxL_PULSE_RXACT) + *rules |= BIT(TRIGGER_NETDEV_RX); + + return 0; +} + +static int xway_gphy_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + u16 hval = 0, lval = 0; + int ret; + + if (index >= XWAY_GPHY_MAX_LEDS) + return -EINVAL; + + if (rules & BIT(TRIGGER_NETDEV_LINK) || + rules & BIT(TRIGGER_NETDEV_LINK_10)) + hval |= XWAY_MMD_LEDxH_CON_LINK10; + + if (rules & BIT(TRIGGER_NETDEV_LINK) || + rules & BIT(TRIGGER_NETDEV_LINK_100)) + hval |= XWAY_MMD_LEDxH_CON_LINK100; + + if (rules & BIT(TRIGGER_NETDEV_LINK) || + rules & BIT(TRIGGER_NETDEV_LINK_1000)) + hval |= XWAY_MMD_LEDxH_CON_LINK1000; + + if (rules & BIT(TRIGGER_NETDEV_TX)) + lval |= XWAY_MMD_LEDxL_PULSE_TXACT; + + if (rules & BIT(TRIGGER_NETDEV_RX)) + lval |= XWAY_MMD_LEDxL_PULSE_RXACT; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDxH(index), hval); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDxL(index), lval); + if (ret) + return ret; + + return phy_set_bits(phydev, XWAY_MDIO_LED, XWAY_GPHY_LED_EN(index)); +} + +static int xway_gphy_led_polarity_set(struct phy_device *phydev, int index, + unsigned long modes) +{ + bool force_active_low = false, force_active_high = false; + u32 mode; + + if (index >= XWAY_GPHY_MAX_LEDS) + return -EINVAL; + + for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) { + switch (mode) { + case PHY_LED_ACTIVE_LOW: + force_active_low = true; + break; + case PHY_LED_ACTIVE_HIGH: + force_active_high = true; + break; + default: + return -EINVAL; + } + } + + if (force_active_low) + return phy_set_bits(phydev, XWAY_MDIO_LED, XWAY_GPHY_LED_INV(index)); + + if (force_active_high) + return phy_clear_bits(phydev, XWAY_MDIO_LED, XWAY_GPHY_LED_INV(index)); + + unreachable(); +} + static struct phy_driver xway_gphy[] = { { .phy_id = PHY_ID_PHY11G_1_3, @@ -359,6 +544,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY22F_1_3, .phy_id_mask = 0xffffffff, @@ -370,6 +560,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY11G_1_4, .phy_id_mask = 0xffffffff, @@ -381,6 +576,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY22F_1_4, .phy_id_mask = 0xffffffff, @@ -392,6 +592,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY11G_1_5, .phy_id_mask = 0xffffffff, @@ -402,6 +607,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY22F_1_5, .phy_id_mask = 0xffffffff, @@ -412,6 +622,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY11G_VR9_1_1, .phy_id_mask = 0xffffffff, @@ -422,6 +637,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY22F_VR9_1_1, .phy_id_mask = 0xffffffff, @@ -432,6 +652,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY11G_VR9_1_2, .phy_id_mask = 0xffffffff, @@ -442,6 +667,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, { .phy_id = PHY_ID_PHY22F_VR9_1_2, .phy_id_mask = 0xffffffff, @@ -452,6 +682,11 @@ static struct phy_driver xway_gphy[] = { .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .led_brightness_set = xway_gphy_led_brightness_set, + .led_hw_is_supported = xway_gphy_led_hw_is_supported, + .led_hw_control_get = xway_gphy_led_hw_control_get, + .led_hw_control_set = xway_gphy_led_hw_control_set, + .led_polarity_set = xway_gphy_led_polarity_set, }, }; module_phy_driver(xway_gphy); diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c index c812f16eaa3a..5107f58338af 100644 --- a/drivers/net/phy/marvell-88q2xxx.c +++ b/drivers/net/phy/marvell-88q2xxx.c @@ -101,6 +101,22 @@ struct mmd_val { u16 val; }; +static const struct mmd_val mv88q2110_init_seq0[] = { + { MDIO_MMD_PCS, 0xffe4, 0x07b5 }, + { MDIO_MMD_PCS, 0xffe4, 0x06b6 }, +}; + +static const struct mmd_val mv88q2110_init_seq1[] = { + { MDIO_MMD_PCS, 0xffde, 0x402f }, + { MDIO_MMD_PCS, 0xfe34, 0x4040 }, + { MDIO_MMD_PCS, 0xfe2a, 0x3c1d }, + { MDIO_MMD_PCS, 0xfe34, 0x0040 }, + { MDIO_MMD_AN, 0x8032, 0x0064 }, + { MDIO_MMD_AN, 0x8031, 0x0a01 }, + { MDIO_MMD_AN, 0x8031, 0x0c01 }, + { MDIO_MMD_PCS, 0xffdb, 0x0010 }, +}; + static const struct mmd_val mv88q222x_revb0_init_seq0[] = { { MDIO_MMD_PCS, 0x8033, 0x6801 }, { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 }, @@ -174,20 +190,54 @@ static const struct mmd_val mv88q222x_revb1_revb2_init_seq1[] = { { MDIO_MMD_PCS, 0xfe11, 0x1105 }, }; +static int mv88q2xxx_write_mmd_vals(struct phy_device *phydev, + const struct mmd_val *vals, size_t len) +{ + int ret; + + for (; len; vals++, len--) { + ret = phy_write_mmd(phydev, vals->devad, vals->regnum, + vals->val); + if (ret < 0) + return ret; + } + + return 0; +} + static int mv88q2xxx_soft_reset(struct phy_device *phydev) { int ret; int val; - ret = phy_write_mmd(phydev, MDIO_MMD_PCS, - MDIO_PCS_1000BT1_CTRL, MDIO_PCS_1000BT1_CTRL_RESET); + /* Enable RESET of DCL */ + if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) { + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x48); + if (ret < 0) + return ret; + } + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_CTRL, + MDIO_PCS_1000BT1_CTRL_RESET); if (ret < 0) return ret; - return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PCS, - MDIO_PCS_1000BT1_CTRL, val, - !(val & MDIO_PCS_1000BT1_CTRL_RESET), - 50000, 600000, true); + ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PCS, + MDIO_PCS_1000BT1_CTRL, val, + !(val & MDIO_PCS_1000BT1_CTRL_RESET), + 50000, 600000, true); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xffe4, 0xc); + if (ret < 0) + return ret; + + /* Disable RESET of DCL */ + if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) + return phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x58); + + return 0; } static int mv88q2xxx_read_link_gbit(struct phy_device *phydev) @@ -390,15 +440,6 @@ static int mv88q2xxx_get_features(struct phy_device *phydev) if (ret) return ret; - /* The PHY signalizes it supports autonegotiation. Unfortunately, so - * far it was not possible to get a link even when following the init - * sequence provided by Marvell. Disable it for now until a proper - * workaround is found or a new PHY revision is released. - */ - if (phydev->drv->phy_id == MARVELL_PHY_ID_88Q2110) - linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - phydev->supported); - return 0; } @@ -705,60 +746,37 @@ static int mv88q2xxx_probe(struct phy_device *phydev) return mv88q2xxx_hwmon_probe(phydev); } -static int mv88q222x_soft_reset(struct phy_device *phydev) +static int mv88q2110_config_init(struct phy_device *phydev) { int ret; - /* Enable RESET of DCL */ - if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) { - ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x48); - if (ret < 0) - return ret; - } - - ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_CTRL, - MDIO_PCS_1000BT1_CTRL_RESET); + ret = mv88q2xxx_write_mmd_vals(phydev, mv88q2110_init_seq0, + ARRAY_SIZE(mv88q2110_init_seq0)); if (ret < 0) return ret; - ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xffe4, 0xc); + usleep_range(5000, 10000); + + ret = mv88q2xxx_write_mmd_vals(phydev, mv88q2110_init_seq1, + ARRAY_SIZE(mv88q2110_init_seq1)); if (ret < 0) return ret; - /* Disable RESET of DCL */ - if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) - return phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x58); - - return 0; -} - -static int mv88q222x_write_mmd_vals(struct phy_device *phydev, - const struct mmd_val *vals, size_t len) -{ - int ret; - - for (; len; vals++, len--) { - ret = phy_write_mmd(phydev, vals->devad, vals->regnum, - vals->val); - if (ret < 0) - return ret; - } - - return 0; + return mv88q2xxx_config_init(phydev); } static int mv88q222x_revb0_config_init(struct phy_device *phydev) { int ret; - ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq0, + ret = mv88q2xxx_write_mmd_vals(phydev, mv88q222x_revb0_init_seq0, ARRAY_SIZE(mv88q222x_revb0_init_seq0)); if (ret < 0) return ret; usleep_range(5000, 10000); - ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq1, + ret = mv88q2xxx_write_mmd_vals(phydev, mv88q222x_revb0_init_seq1, ARRAY_SIZE(mv88q222x_revb0_init_seq1)); if (ret < 0) return ret; @@ -772,17 +790,17 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev) int ret; if (is_rev_b1) - ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_init_seq0, + ret = mv88q2xxx_write_mmd_vals(phydev, mv88q222x_revb1_init_seq0, ARRAY_SIZE(mv88q222x_revb1_init_seq0)); else - ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb2_init_seq0, + ret = mv88q2xxx_write_mmd_vals(phydev, mv88q222x_revb2_init_seq0, ARRAY_SIZE(mv88q222x_revb2_init_seq0)); if (ret < 0) return ret; usleep_range(3000, 5000); - ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_revb2_init_seq1, + ret = mv88q2xxx_write_mmd_vals(phydev, mv88q222x_revb1_revb2_init_seq1, ARRAY_SIZE(mv88q222x_revb1_revb2_init_seq1)); if (ret < 0) return ret; @@ -888,7 +906,7 @@ static struct phy_driver mv88q2xxx_driver[] = { .name = "mv88q2110", .get_features = mv88q2xxx_get_features, .config_aneg = mv88q2xxx_config_aneg, - .config_init = mv88q2xxx_config_init, + .config_init = mv88q2110_config_init, .read_status = mv88q2xxx_read_status, .soft_reset = mv88q2xxx_soft_reset, .set_loopback = genphy_c45_loopback, @@ -906,7 +924,7 @@ static struct phy_driver mv88q2xxx_driver[] = { .aneg_done = genphy_c45_aneg_done, .config_init = mv88q222x_config_init, .read_status = mv88q2xxx_read_status, - .soft_reset = mv88q222x_soft_reset, + .soft_reset = mv88q2xxx_soft_reset, .config_intr = mv88q2xxx_config_intr, .handle_interrupt = mv88q2xxx_handle_interrupt, .set_loopback = genphy_c45_loopback, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 9964bf3dea2f..28aec37acd2c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -176,6 +176,7 @@ #define MII_M1011_PHY_STATUS_FULLDUPLEX 0x2000 #define MII_M1011_PHY_STATUS_RESOLVED 0x0800 #define MII_M1011_PHY_STATUS_LINK 0x0400 +#define MII_M1011_PHY_STATUS_MDIX BIT(6) #define MII_88E3016_PHY_SPEC_CTRL 0x10 #define MII_88E3016_DISABLE_SCRAMBLER 0x0200 @@ -1722,6 +1723,19 @@ static int marvell_read_status_page(struct phy_device *phydev, int page) phydev->duplex = DUPLEX_UNKNOWN; phydev->port = fiber ? PORT_FIBRE : PORT_TP; + if (fiber) { + phydev->mdix = ETH_TP_MDI_INVALID; + } else { + /* The MDI-X state is set regardless of Autoneg being enabled + * and reflects forced MDI-X state as well as auto resolution + */ + if (status & MII_M1011_PHY_STATUS_RESOLVED) + phydev->mdix = status & MII_M1011_PHY_STATUS_MDIX ? + ETH_TP_MDI_X : ETH_TP_MDI; + else + phydev->mdix = ETH_TP_MDI_INVALID; + } + if (phydev->autoneg == AUTONEG_ENABLE) err = marvell_read_status_page_an(phydev, fiber, status); else diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c index f4f9412d0cd7..a931832b1418 100644 --- a/drivers/net/phy/mediatek-ge-soc.c +++ b/drivers/net/phy/mediatek-ge-soc.c @@ -110,7 +110,7 @@ #define MTK_PHY_CR_TX_AMP_OFFSET_D_MASK GENMASK(6, 0) #define MTK_PHY_RG_AD_CAL_COMP 0x17a -#define MTK_PHY_AD_CAL_COMP_OUT_SHIFT (8) +#define MTK_PHY_AD_CAL_COMP_OUT_MASK GENMASK(8, 8) #define MTK_PHY_RG_AD_CAL_CLK 0x17b #define MTK_PHY_DA_CAL_CLK BIT(0) @@ -342,7 +342,8 @@ static int cal_cycle(struct phy_device *phydev, int devad, ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_AD_CAL_CLK, reg_val, reg_val & MTK_PHY_DA_CAL_CLK, 500, - ANALOG_INTERNAL_OPERATION_MAX_US, false); + ANALOG_INTERNAL_OPERATION_MAX_US, + false); if (ret) { phydev_err(phydev, "Calibration cycle timeout\n"); return ret; @@ -350,8 +351,10 @@ static int cal_cycle(struct phy_device *phydev, int devad, phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_AD_CALIN, MTK_PHY_DA_CALIN_FLAG); - ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_AD_CAL_COMP) >> - MTK_PHY_AD_CAL_COMP_OUT_SHIFT; + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_AD_CAL_COMP); + if (ret < 0) + return ret; + ret = FIELD_GET(MTK_PHY_AD_CAL_COMP_OUT_MASK, ret); phydev_dbg(phydev, "cal_val: 0x%x, ret: %d\n", cal_val, ret); return ret; @@ -408,16 +411,17 @@ static int tx_offset_cal_efuse(struct phy_device *phydev, u32 *buf) static int tx_amp_fill_result(struct phy_device *phydev, u16 *buf) { - int i; - int bias[16] = {}; - const int vals_9461[16] = { 7, 1, 4, 7, - 7, 1, 4, 7, - 7, 1, 4, 7, - 7, 1, 4, 7 }; const int vals_9481[16] = { 10, 6, 6, 10, 10, 6, 6, 10, 10, 6, 6, 10, 10, 6, 6, 10 }; + const int vals_9461[16] = { 7, 1, 4, 7, + 7, 1, 4, 7, + 7, 1, 4, 7, + 7, 1, 4, 7 }; + int bias[16] = {}; + int i; + switch (phydev->drv->phy_id) { case MTK_GPHY_ID_MT7981: /* We add some calibration to efuse values @@ -440,40 +444,72 @@ static int tx_amp_fill_result(struct phy_device *phydev, u16 *buf) } phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TXVLD_DA_RG, - MTK_PHY_DA_TX_I2MPB_A_GBE_MASK, (buf[0] + bias[0]) << 10); + MTK_PHY_DA_TX_I2MPB_A_GBE_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_A_GBE_MASK, + buf[0] + bias[0])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TXVLD_DA_RG, - MTK_PHY_DA_TX_I2MPB_A_TBT_MASK, buf[0] + bias[1]); + MTK_PHY_DA_TX_I2MPB_A_TBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_A_TBT_MASK, + buf[0] + bias[1])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_A2, - MTK_PHY_DA_TX_I2MPB_A_HBT_MASK, (buf[0] + bias[2]) << 10); + MTK_PHY_DA_TX_I2MPB_A_HBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_A_HBT_MASK, + buf[0] + bias[2])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_A2, - MTK_PHY_DA_TX_I2MPB_A_TST_MASK, buf[0] + bias[3]); + MTK_PHY_DA_TX_I2MPB_A_TST_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_A_TST_MASK, + buf[0] + bias[3])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_B1, - MTK_PHY_DA_TX_I2MPB_B_GBE_MASK, (buf[1] + bias[4]) << 8); + MTK_PHY_DA_TX_I2MPB_B_GBE_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_B_GBE_MASK, + buf[1] + bias[4])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_B1, - MTK_PHY_DA_TX_I2MPB_B_TBT_MASK, buf[1] + bias[5]); + MTK_PHY_DA_TX_I2MPB_B_TBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_B_TBT_MASK, + buf[1] + bias[5])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_B2, - MTK_PHY_DA_TX_I2MPB_B_HBT_MASK, (buf[1] + bias[6]) << 8); + MTK_PHY_DA_TX_I2MPB_B_HBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_B_HBT_MASK, + buf[1] + bias[6])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_B2, - MTK_PHY_DA_TX_I2MPB_B_TST_MASK, buf[1] + bias[7]); + MTK_PHY_DA_TX_I2MPB_B_TST_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_B_TST_MASK, + buf[1] + bias[7])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_C1, - MTK_PHY_DA_TX_I2MPB_C_GBE_MASK, (buf[2] + bias[8]) << 8); + MTK_PHY_DA_TX_I2MPB_C_GBE_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_C_GBE_MASK, + buf[2] + bias[8])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_C1, - MTK_PHY_DA_TX_I2MPB_C_TBT_MASK, buf[2] + bias[9]); + MTK_PHY_DA_TX_I2MPB_C_TBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_C_TBT_MASK, + buf[2] + bias[9])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_C2, - MTK_PHY_DA_TX_I2MPB_C_HBT_MASK, (buf[2] + bias[10]) << 8); + MTK_PHY_DA_TX_I2MPB_C_HBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_C_HBT_MASK, + buf[2] + bias[10])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_C2, - MTK_PHY_DA_TX_I2MPB_C_TST_MASK, buf[2] + bias[11]); + MTK_PHY_DA_TX_I2MPB_C_TST_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_C_TST_MASK, + buf[2] + bias[11])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_D1, - MTK_PHY_DA_TX_I2MPB_D_GBE_MASK, (buf[3] + bias[12]) << 8); + MTK_PHY_DA_TX_I2MPB_D_GBE_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_D_GBE_MASK, + buf[3] + bias[12])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_D1, - MTK_PHY_DA_TX_I2MPB_D_TBT_MASK, buf[3] + bias[13]); + MTK_PHY_DA_TX_I2MPB_D_TBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_D_TBT_MASK, + buf[3] + bias[13])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_D2, - MTK_PHY_DA_TX_I2MPB_D_HBT_MASK, (buf[3] + bias[14]) << 8); + MTK_PHY_DA_TX_I2MPB_D_HBT_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_D_HBT_MASK, + buf[3] + bias[14])); phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TX_I2MPB_TEST_MODE_D2, - MTK_PHY_DA_TX_I2MPB_D_TST_MASK, buf[3] + bias[15]); + MTK_PHY_DA_TX_I2MPB_D_TST_MASK, + FIELD_PREP(MTK_PHY_DA_TX_I2MPB_D_TST_MASK, + buf[3] + bias[15])); return 0; } @@ -662,7 +698,8 @@ static int tx_vcm_cal_sw(struct phy_device *phydev, u8 rg_txreserve_x) goto restore; /* We calibrate TX-VCM in different logic. Check upper index and then - * lower index. If this calibration is valid, apply lower index's result. + * lower index. If this calibration is valid, apply lower index's + * result. */ ret = upper_ret - lower_ret; if (ret == 1) { @@ -691,7 +728,8 @@ static int tx_vcm_cal_sw(struct phy_device *phydev, u8 rg_txreserve_x) } else if (upper_idx == TXRESERVE_MAX && upper_ret == 0 && lower_ret == 0) { ret = 0; - phydev_warn(phydev, "TX-VCM SW cal result at high margin 0x%x\n", + phydev_warn(phydev, + "TX-VCM SW cal result at high margin 0x%x\n", upper_idx); } else { ret = -EINVAL; @@ -795,7 +833,8 @@ static void mt7981_phy_finetune(struct phy_device *phydev) /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234, - MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK, + MTK_PHY_TR_OPEN_LOOP_EN_MASK | + MTK_PHY_LPF_X_AVERAGE_MASK, BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0x9)); /* rg_tr_lpf_cnt_val = 512 */ @@ -864,7 +903,8 @@ static void mt7988_phy_finetune(struct phy_device *phydev) /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234, - MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK, + MTK_PHY_TR_OPEN_LOOP_EN_MASK | + MTK_PHY_LPF_X_AVERAGE_MASK, BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0xa)); /* rg_tr_lpf_cnt_val = 1023 */ @@ -976,7 +1016,8 @@ static void mt798x_phy_eee(struct phy_device *phydev) phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_3); - __phy_modify(phydev, MTK_PHY_LPI_REG_14, MTK_PHY_LPI_WAKE_TIMER_1000_MASK, + __phy_modify(phydev, MTK_PHY_LPI_REG_14, + MTK_PHY_LPI_WAKE_TIMER_1000_MASK, FIELD_PREP(MTK_PHY_LPI_WAKE_TIMER_1000_MASK, 0x19c)); __phy_modify(phydev, MTK_PHY_LPI_REG_1c, MTK_PHY_SMI_DET_ON_THRESH_MASK, @@ -986,7 +1027,8 @@ static void mt798x_phy_eee(struct phy_device *phydev) phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG122, MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK, - FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK, 0xff)); + FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK, + 0xff)); } static int cal_sw(struct phy_device *phydev, enum CAL_ITEM cal_item, @@ -1069,10 +1111,10 @@ static int start_cal(struct phy_device *phydev, enum CAL_ITEM cal_item, static int mt798x_phy_calibration(struct phy_device *phydev) { + struct nvmem_cell *cell; int ret = 0; - u32 *buf; size_t len; - struct nvmem_cell *cell; + u32 *buf; cell = nvmem_cell_get(&phydev->mdio.dev, "phy-cal-data"); if (IS_ERR(cell)) { @@ -1146,7 +1188,8 @@ static int mt798x_phy_hw_led_on_set(struct phy_device *phydev, u8 index, (index ? 16 : 0), &priv->led_state); if (changed) return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? - MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL, + MTK_PHY_LED1_ON_CTRL : + MTK_PHY_LED0_ON_CTRL, MTK_PHY_LED_ON_MASK, on ? MTK_PHY_LED_ON_FORCE_ON : 0); else @@ -1156,7 +1199,8 @@ static int mt798x_phy_hw_led_on_set(struct phy_device *phydev, u8 index, static int mt798x_phy_hw_led_blink_set(struct phy_device *phydev, u8 index, bool blinking) { - unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + (index ? 16 : 0); + unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + + (index ? 16 : 0); struct mtk_socphy_priv *priv = phydev->priv; bool changed; @@ -1169,8 +1213,10 @@ static int mt798x_phy_hw_led_blink_set(struct phy_device *phydev, u8 index, (index ? 16 : 0), &priv->led_state); if (changed) return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ? - MTK_PHY_LED1_BLINK_CTRL : MTK_PHY_LED0_BLINK_CTRL, - blinking ? MTK_PHY_LED_BLINK_FORCE_BLINK : 0); + MTK_PHY_LED1_BLINK_CTRL : + MTK_PHY_LED0_BLINK_CTRL, + blinking ? + MTK_PHY_LED_BLINK_FORCE_BLINK : 0); else return 0; } @@ -1210,14 +1256,15 @@ static int mt798x_phy_led_brightness_set(struct phy_device *phydev, return mt798x_phy_hw_led_on_set(phydev, index, (value != LED_OFF)); } -static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_FULL_DUPLEX) | - BIT(TRIGGER_NETDEV_HALF_DUPLEX) | - BIT(TRIGGER_NETDEV_LINK) | - BIT(TRIGGER_NETDEV_LINK_10) | - BIT(TRIGGER_NETDEV_LINK_100) | - BIT(TRIGGER_NETDEV_LINK_1000) | - BIT(TRIGGER_NETDEV_RX) | - BIT(TRIGGER_NETDEV_TX)); +static const unsigned long supported_triggers = + BIT(TRIGGER_NETDEV_FULL_DUPLEX) | + BIT(TRIGGER_NETDEV_HALF_DUPLEX) | + BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX); static int mt798x_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, unsigned long rules) @@ -1235,7 +1282,8 @@ static int mt798x_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index, unsigned long *rules) { - unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + (index ? 16 : 0); + unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + + (index ? 16 : 0); unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0); unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0); struct mtk_socphy_priv *priv = phydev->priv; @@ -1256,8 +1304,8 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index, if (blink < 0) return -EIO; - if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX | - MTK_PHY_LED_ON_LINKDOWN)) || + if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | + MTK_PHY_LED_ON_HDX | MTK_PHY_LED_ON_LINKDOWN)) || (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX))) set_bit(bit_netdev, &priv->led_state); else @@ -1331,17 +1379,23 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index, if (rules & BIT(TRIGGER_NETDEV_RX)) { blink |= (on & MTK_PHY_LED_ON_LINK) ? - (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) | - ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) | - ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) : + (((on & MTK_PHY_LED_ON_LINK10) ? + MTK_PHY_LED_BLINK_10RX : 0) | + ((on & MTK_PHY_LED_ON_LINK100) ? + MTK_PHY_LED_BLINK_100RX : 0) | + ((on & MTK_PHY_LED_ON_LINK1000) ? + MTK_PHY_LED_BLINK_1000RX : 0)) : MTK_PHY_LED_BLINK_RX; } if (rules & BIT(TRIGGER_NETDEV_TX)) { blink |= (on & MTK_PHY_LED_ON_LINK) ? - (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) | - ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) | - ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) : + (((on & MTK_PHY_LED_ON_LINK10) ? + MTK_PHY_LED_BLINK_10TX : 0) | + ((on & MTK_PHY_LED_ON_LINK100) ? + MTK_PHY_LED_BLINK_100TX : 0) | + ((on & MTK_PHY_LED_ON_LINK1000) ? + MTK_PHY_LED_BLINK_1000TX : 0)) : MTK_PHY_LED_BLINK_TX; } @@ -1398,7 +1452,8 @@ static int mt7988_phy_fix_leds_polarities(struct phy_device *phydev) /* Only now setup pinctrl to avoid bogus blinking */ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "gbe-led"); if (IS_ERR(pinctrl)) - dev_err(&phydev->mdio.bus->dev, "Failed to setup PHY LED pinctrl\n"); + dev_err(&phydev->mdio.bus->dev, + "Failed to setup PHY LED pinctrl\n"); return 0; } @@ -1415,7 +1470,7 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev) * LED_C and LED_D respectively. At the same time those pins are used to * bootstrap configuration of the reference clock source (LED_A), * DRAM DDRx16b x2/x1 (LED_B) and boot device (LED_C, LED_D). - * In practise this is done using a LED and a resistor pulling the pin + * In practice this is done using a LED and a resistor pulling the pin * either to GND or to VIO. * The detected value at boot time is accessible at run-time using the * TPBANK0 register located in the gpio base of the pinctrl, in order diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index a5ef8fe50704..71d6050b2833 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -6,6 +6,7 @@ #include <linux/delay.h> #include <linux/mii.h> #include <linux/phy.h> +#include <linux/sort.h> #include <linux/ethtool.h> #include <linux/ethtool_netlink.h> #include <linux/bitfield.h> @@ -226,6 +227,47 @@ #define MICROCHIP_CABLE_MAX_TIME_DIFF \ (MICROCHIP_CABLE_MIN_TIME_DIFF + MICROCHIP_CABLE_TIME_MARGIN) +#define LAN887X_INT_STS 0xf000 +#define LAN887X_INT_MSK 0xf001 +#define LAN887X_INT_MSK_T1_PHY_INT_MSK BIT(2) +#define LAN887X_INT_MSK_LINK_UP_MSK BIT(1) +#define LAN887X_INT_MSK_LINK_DOWN_MSK BIT(0) + +#define LAN887X_MX_CHIP_TOP_LINK_MSK (LAN887X_INT_MSK_LINK_UP_MSK |\ + LAN887X_INT_MSK_LINK_DOWN_MSK) + +#define LAN887X_MX_CHIP_TOP_ALL_MSK (LAN887X_INT_MSK_T1_PHY_INT_MSK |\ + LAN887X_MX_CHIP_TOP_LINK_MSK) + +#define LAN887X_COEFF_PWR_DN_CONFIG_100 0x0404 +#define LAN887X_COEFF_PWR_DN_CONFIG_100_V 0x16d6 +#define LAN887X_SQI_CONFIG_100 0x042e +#define LAN887X_SQI_CONFIG_100_V 0x9572 +#define LAN887X_SQI_MSE_100 0x483 + +#define LAN887X_POKE_PEEK_100 0x040d +#define LAN887X_POKE_PEEK_100_EN BIT(0) + +#define LAN887X_COEFF_MOD_CONFIG 0x080d +#define LAN887X_COEFF_MOD_CONFIG_DCQ_COEFF_EN BIT(8) + +#define LAN887X_DCQ_SQI_STATUS 0x08b2 + +/* SQI raw samples count */ +#define SQI_SAMPLES 200 + +/* Samples percentage considered for SQI calculation */ +#define SQI_INLINERS_PERCENT 60 + +/* Samples count considered for SQI calculation */ +#define SQI_INLIERS_NUM (SQI_SAMPLES * SQI_INLINERS_PERCENT / 100) + +/* Start offset of samples */ +#define SQI_INLIERS_START ((SQI_SAMPLES - SQI_INLIERS_NUM) / 2) + +/* End offset of samples */ +#define SQI_INLIERS_END (SQI_INLIERS_START + SQI_INLIERS_NUM) + #define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>" #define DRIVER_DESC "Microchip LAN87XX/LAN937x/LAN887x T1 PHY driver" @@ -1474,6 +1516,49 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data) ethtool_puts(&data, lan887x_hw_stats[i].string); } +static int lan887x_config_intr(struct phy_device *phydev) +{ + int rc; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* Clear the interrupt status before enabling interrupts */ + rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS); + if (rc < 0) + return rc; + + /* Unmask for enabling interrupt */ + rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_MSK, + (u16)~LAN887X_MX_CHIP_TOP_ALL_MSK); + } else { + rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_MSK, + GENMASK(15, 0)); + if (rc < 0) + return rc; + + rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS); + } + + return rc < 0 ? rc : 0; +} + +static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (irq_status & LAN887X_MX_CHIP_TOP_LINK_MSK) { + phy_trigger_machine(phydev); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + static int lan887x_cd_reset(struct phy_device *phydev, enum cable_diag_state cd_done) { @@ -1504,6 +1589,10 @@ static int lan887x_cd_reset(struct phy_device *phydev, if (rc < 0) return rc; + rc = lan887x_config_intr(phydev); + if (rc < 0) + return rc; + rc = lan887x_phy_reconfig(phydev); if (rc < 0) return rc; @@ -1830,6 +1919,145 @@ static int lan887x_cable_test_get_status(struct phy_device *phydev, return lan887x_cable_test_report(phydev); } +/* Compare block to sort in ascending order */ +static int sqi_compare(const void *a, const void *b) +{ + return *(u16 *)a - *(u16 *)b; +} + +static int lan887x_get_sqi_100M(struct phy_device *phydev) +{ + u16 rawtable[SQI_SAMPLES]; + u32 sqiavg = 0; + u8 sqinum = 0; + int rc, i; + + /* Configuration of SQI 100M */ + rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_COEFF_PWR_DN_CONFIG_100, + LAN887X_COEFF_PWR_DN_CONFIG_100_V); + if (rc < 0) + return rc; + + rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_SQI_CONFIG_100, + LAN887X_SQI_CONFIG_100_V); + if (rc < 0) + return rc; + + rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_SQI_CONFIG_100); + if (rc != LAN887X_SQI_CONFIG_100_V) + return -EINVAL; + + rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_POKE_PEEK_100, + LAN887X_POKE_PEEK_100_EN, + LAN887X_POKE_PEEK_100_EN); + if (rc < 0) + return rc; + + /* Required before reading register + * otherwise it will return high value + */ + msleep(50); + + /* Link check before raw readings */ + rc = genphy_c45_read_link(phydev); + if (rc < 0) + return rc; + + if (!phydev->link) + return -ENETDOWN; + + /* Get 200 SQI raw readings */ + for (i = 0; i < SQI_SAMPLES; i++) { + rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_POKE_PEEK_100, + LAN887X_POKE_PEEK_100_EN); + if (rc < 0) + return rc; + + rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_SQI_MSE_100); + if (rc < 0) + return rc; + + rawtable[i] = (u16)rc; + } + + /* Link check after raw readings */ + rc = genphy_c45_read_link(phydev); + if (rc < 0) + return rc; + + if (!phydev->link) + return -ENETDOWN; + + /* Sort SQI raw readings in ascending order */ + sort(rawtable, SQI_SAMPLES, sizeof(u16), sqi_compare, NULL); + + /* Keep inliers and discard outliers */ + for (i = SQI_INLIERS_START; i < SQI_INLIERS_END; i++) + sqiavg += rawtable[i]; + + /* Handle invalid samples */ + if (sqiavg != 0) { + /* Get SQI average */ + sqiavg /= SQI_INLIERS_NUM; + + if (sqiavg < 75) + sqinum = 7; + else if (sqiavg < 94) + sqinum = 6; + else if (sqiavg < 119) + sqinum = 5; + else if (sqiavg < 150) + sqinum = 4; + else if (sqiavg < 189) + sqinum = 3; + else if (sqiavg < 237) + sqinum = 2; + else if (sqiavg < 299) + sqinum = 1; + else + sqinum = 0; + } + + return sqinum; +} + +static int lan887x_get_sqi(struct phy_device *phydev) +{ + int rc, val; + + if (phydev->speed != SPEED_1000 && + phydev->speed != SPEED_100) + return -ENETDOWN; + + if (phydev->speed == SPEED_100) + return lan887x_get_sqi_100M(phydev); + + /* Writing DCQ_COEFF_EN to trigger a SQI read */ + rc = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_COEFF_MOD_CONFIG, + LAN887X_COEFF_MOD_CONFIG_DCQ_COEFF_EN); + if (rc < 0) + return rc; + + /* Wait for DCQ done */ + rc = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + LAN887X_COEFF_MOD_CONFIG, val, ((val & + LAN887X_COEFF_MOD_CONFIG_DCQ_COEFF_EN) != + LAN887X_COEFF_MOD_CONFIG_DCQ_COEFF_EN), + 10, 200, true); + if (rc < 0) + return rc; + + rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_DCQ_SQI_STATUS); + if (rc < 0) + return rc; + + return FIELD_GET(T1_DCQ_SQI_MSK, rc); +} + static struct phy_driver microchip_t1_phy_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX), @@ -1881,6 +2109,10 @@ static struct phy_driver microchip_t1_phy_driver[] = { .read_status = genphy_c45_read_status, .cable_test_start = lan887x_cable_test_start, .cable_test_get_status = lan887x_cable_test_get_status, + .config_intr = lan887x_config_intr, + .handle_interrupt = lan887x_handle_interrupt, + .get_sqi = lan887x_get_sqi, + .get_sqi_max = lan87xx_get_sqi_max, } }; diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c index 3614839a8e51..75d291154b4c 100644 --- a/drivers/net/phy/microchip_t1s.c +++ b/drivers/net/phy/microchip_t1s.c @@ -3,8 +3,8 @@ * Driver for Microchip 10BASE-T1S PHYs * * Support: Microchip Phys: - * lan8670/1/2 Rev.B1 - * lan8650/1 Rev.B0 Internal PHYs + * lan8670/1/2 Rev.B1/C1/C2 + * lan8650/1 Rev.B0/B1 Internal PHYs */ #include <linux/kernel.h> @@ -12,7 +12,10 @@ #include <linux/phy.h> #define PHY_ID_LAN867X_REVB1 0x0007C162 -#define PHY_ID_LAN865X_REVB0 0x0007C1B3 +#define PHY_ID_LAN867X_REVC1 0x0007C164 +#define PHY_ID_LAN867X_REVC2 0x0007C165 +/* Both Rev.B0 and B1 clause 22 PHYID's are same due to B1 chip limitation */ +#define PHY_ID_LAN865X_REVB 0x0007C1B3 #define LAN867X_REG_STS2 0x0019 @@ -23,6 +26,12 @@ #define LAN865X_REG_CFGPARAM_CTRL 0x00DA #define LAN865X_REG_STS2 0x0019 +/* Collision Detector Control 0 Register */ +#define LAN86XX_REG_COL_DET_CTRL0 0x0087 +#define COL_DET_CTRL0_ENABLE_BIT_MASK BIT(15) +#define COL_DET_ENABLE BIT(15) +#define COL_DET_DISABLE 0x0000 + #define LAN865X_CFGPARAM_READ_ENABLE BIT(1) /* The arrays below are pulled from the following table from AN1699 @@ -59,29 +68,45 @@ static const u16 lan867x_revb1_fixup_masks[12] = { 0x0600, 0x7F00, 0x2000, 0xFFFF, }; -/* LAN865x Rev.B0 configuration parameters from AN1760 */ -static const u32 lan865x_revb0_fixup_registers[28] = { - 0x0091, 0x0081, 0x0043, 0x0044, - 0x0045, 0x0053, 0x0054, 0x0055, - 0x0040, 0x0050, 0x00D0, 0x00E9, - 0x00F5, 0x00F4, 0x00F8, 0x00F9, +/* LAN865x Rev.B0/B1 configuration parameters from AN1760 + * As per the Configuration Application Note AN1760 published in the below link, + * https://www.microchip.com/en-us/application-notes/an1760 + * Revision F (DS60001760G - June 2024) + */ +static const u32 lan865x_revb_fixup_registers[17] = { + 0x00D0, 0x00E0, 0x00E9, 0x00F5, + 0x00F4, 0x00F8, 0x00F9, 0x0081, + 0x0091, 0x0043, 0x0044, 0x0045, + 0x0053, 0x0054, 0x0055, 0x0040, + 0x0050, +}; + +static const u16 lan865x_revb_fixup_values[17] = { + 0x3F31, 0xC000, 0x9E50, 0x1CF8, + 0xC020, 0xB900, 0x4E53, 0x0080, + 0x9660, 0x00FF, 0xFFFF, 0x0000, + 0x00FF, 0xFFFF, 0x0000, 0x0002, + 0x0002, +}; + +static const u16 lan865x_revb_fixup_cfg_regs[2] = { + 0x0084, 0x008A, +}; + +static const u32 lan865x_revb_sqi_fixup_regs[12] = { 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7, 0x00B8, 0x00B9, 0x00BA, 0x00BB, }; -static const u16 lan865x_revb0_fixup_values[28] = { - 0x9660, 0x00C0, 0x00FF, 0xFFFF, - 0x0000, 0x00FF, 0xFFFF, 0x0000, - 0x0002, 0x0002, 0x5F21, 0x9E50, - 0x1CF8, 0xC020, 0x9B00, 0x4E53, +static const u16 lan865x_revb_sqi_fixup_values[12] = { 0x0103, 0x0910, 0x1D26, 0x002A, 0x0103, 0x070D, 0x1720, 0x0027, 0x0509, 0x0E13, 0x1C25, 0x002B, }; -static const u16 lan865x_revb0_fixup_cfg_regs[5] = { - 0x0084, 0x008A, 0x00AD, 0x00AE, 0x00AF +static const u16 lan865x_revb_sqi_fixup_cfg_regs[3] = { + 0x00AD, 0x00AE, 0x00AF, }; /* Pulled from AN1760 describing 'indirect read' @@ -92,7 +117,7 @@ static const u16 lan865x_revb0_fixup_cfg_regs[5] = { * * 0x4 refers to memory map selector 4, which maps to MDIO_MMD_VEND2 */ -static int lan865x_revb0_indirect_read(struct phy_device *phydev, u16 addr) +static int lan865x_revb_indirect_read(struct phy_device *phydev, u16 addr) { int ret; @@ -112,15 +137,18 @@ static int lan865x_revb0_indirect_read(struct phy_device *phydev, u16 addr) /* This is pulled straight from AN1760 from 'calculation of offset 1' & * 'calculation of offset 2' */ -static int lan865x_generate_cfg_offsets(struct phy_device *phydev, s8 offsets[2]) +static int lan865x_generate_cfg_offsets(struct phy_device *phydev, s8 offsets[]) { const u16 fixup_regs[2] = {0x0004, 0x0008}; int ret; for (int i = 0; i < ARRAY_SIZE(fixup_regs); i++) { - ret = lan865x_revb0_indirect_read(phydev, fixup_regs[i]); + ret = lan865x_revb_indirect_read(phydev, fixup_regs[i]); if (ret < 0) return ret; + + /* 5-bit signed value, sign extend */ + ret &= GENMASK(4, 0); if (ret & BIT(4)) offsets[i] = ret | 0xE0; else @@ -130,13 +158,15 @@ static int lan865x_generate_cfg_offsets(struct phy_device *phydev, s8 offsets[2] return 0; } -static int lan865x_read_cfg_params(struct phy_device *phydev, u16 cfg_params[]) +static int lan865x_read_cfg_params(struct phy_device *phydev, + const u16 cfg_regs[], u16 cfg_params[], + u8 count) { int ret; - for (int i = 0; i < ARRAY_SIZE(lan865x_revb0_fixup_cfg_regs); i++) { + for (int i = 0; i < count; i++) { ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, - lan865x_revb0_fixup_cfg_regs[i]); + cfg_regs[i]); if (ret < 0) return ret; cfg_params[i] = (u16)ret; @@ -145,13 +175,14 @@ static int lan865x_read_cfg_params(struct phy_device *phydev, u16 cfg_params[]) return 0; } -static int lan865x_write_cfg_params(struct phy_device *phydev, u16 cfg_params[]) +static int lan865x_write_cfg_params(struct phy_device *phydev, + const u16 cfg_regs[], u16 cfg_params[], + u8 count) { int ret; - for (int i = 0; i < ARRAY_SIZE(lan865x_revb0_fixup_cfg_regs); i++) { - ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, - lan865x_revb0_fixup_cfg_regs[i], + for (int i = 0; i < count; i++) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, cfg_regs[i], cfg_params[i]); if (ret) return ret; @@ -160,60 +191,90 @@ static int lan865x_write_cfg_params(struct phy_device *phydev, u16 cfg_params[]) return 0; } -static int lan865x_setup_cfgparam(struct phy_device *phydev) +static int lan865x_setup_cfgparam(struct phy_device *phydev, s8 offsets[]) { - u16 cfg_params[ARRAY_SIZE(lan865x_revb0_fixup_cfg_regs)]; - u16 cfg_results[5]; - s8 offsets[2]; + u16 cfg_results[ARRAY_SIZE(lan865x_revb_fixup_cfg_regs)]; + u16 cfg_params[ARRAY_SIZE(lan865x_revb_fixup_cfg_regs)]; int ret; - ret = lan865x_generate_cfg_offsets(phydev, offsets); + ret = lan865x_read_cfg_params(phydev, lan865x_revb_fixup_cfg_regs, + cfg_params, ARRAY_SIZE(cfg_params)); if (ret) return ret; - ret = lan865x_read_cfg_params(phydev, cfg_params); + cfg_results[0] = FIELD_PREP(GENMASK(15, 10), 9 + offsets[0]) | + FIELD_PREP(GENMASK(9, 4), 14 + offsets[0]) | + 0x03; + cfg_results[1] = FIELD_PREP(GENMASK(15, 10), 40 + offsets[1]); + + return lan865x_write_cfg_params(phydev, lan865x_revb_fixup_cfg_regs, + cfg_results, ARRAY_SIZE(cfg_results)); +} + +static int lan865x_setup_sqi_cfgparam(struct phy_device *phydev, s8 offsets[]) +{ + u16 cfg_results[ARRAY_SIZE(lan865x_revb_sqi_fixup_cfg_regs)]; + u16 cfg_params[ARRAY_SIZE(lan865x_revb_sqi_fixup_cfg_regs)]; + int ret; + + ret = lan865x_read_cfg_params(phydev, lan865x_revb_sqi_fixup_cfg_regs, + cfg_params, ARRAY_SIZE(cfg_params)); if (ret) return ret; - cfg_results[0] = (cfg_params[0] & 0x000F) | - FIELD_PREP(GENMASK(15, 10), 9 + offsets[0]) | - FIELD_PREP(GENMASK(15, 4), 14 + offsets[0]); - cfg_results[1] = (cfg_params[1] & 0x03FF) | - FIELD_PREP(GENMASK(15, 10), 40 + offsets[1]); - cfg_results[2] = (cfg_params[2] & 0xC0C0) | - FIELD_PREP(GENMASK(15, 8), 5 + offsets[0]) | - (9 + offsets[0]); - cfg_results[3] = (cfg_params[3] & 0xC0C0) | - FIELD_PREP(GENMASK(15, 8), 9 + offsets[0]) | - (14 + offsets[0]); - cfg_results[4] = (cfg_params[4] & 0xC0C0) | - FIELD_PREP(GENMASK(15, 8), 17 + offsets[0]) | - (22 + offsets[0]); - - return lan865x_write_cfg_params(phydev, cfg_results); + cfg_results[0] = FIELD_PREP(GENMASK(13, 8), 5 + offsets[0]) | + (9 + offsets[0]); + cfg_results[1] = FIELD_PREP(GENMASK(13, 8), 9 + offsets[0]) | + (14 + offsets[0]); + cfg_results[2] = FIELD_PREP(GENMASK(13, 8), 17 + offsets[0]) | + (22 + offsets[0]); + + return lan865x_write_cfg_params(phydev, lan865x_revb_sqi_fixup_cfg_regs, + cfg_results, ARRAY_SIZE(cfg_results)); } -static int lan865x_revb0_config_init(struct phy_device *phydev) +static int lan865x_revb_config_init(struct phy_device *phydev) { + s8 offsets[2]; int ret; /* Reference to AN1760 * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/SupportingCollateral/AN-LAN8650-1-Configuration-60001760.pdf */ - for (int i = 0; i < ARRAY_SIZE(lan865x_revb0_fixup_registers); i++) { + ret = lan865x_generate_cfg_offsets(phydev, offsets); + if (ret) + return ret; + + for (int i = 0; i < ARRAY_SIZE(lan865x_revb_fixup_registers); i++) { ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, - lan865x_revb0_fixup_registers[i], - lan865x_revb0_fixup_values[i]); + lan865x_revb_fixup_registers[i], + lan865x_revb_fixup_values[i]); if (ret) return ret; + + if (i == 1) { + ret = lan865x_setup_cfgparam(phydev, offsets); + if (ret) + return ret; + } } - /* Function to calculate and write the configuration parameters in the - * 0x0084, 0x008A, 0x00AD, 0x00AE and 0x00AF registers (from AN1760) - */ - return lan865x_setup_cfgparam(phydev); + + ret = lan865x_setup_sqi_cfgparam(phydev, offsets); + if (ret) + return ret; + + for (int i = 0; i < ARRAY_SIZE(lan865x_revb_sqi_fixup_regs); i++) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + lan865x_revb_sqi_fixup_regs[i], + lan865x_revb_sqi_fixup_values[i]); + if (ret) + return ret; + } + + return 0; } -static int lan867x_revb1_config_init(struct phy_device *phydev) +static int lan867x_check_reset_complete(struct phy_device *phydev) { int err; @@ -235,6 +296,69 @@ static int lan867x_revb1_config_init(struct phy_device *phydev) } } + return 0; +} + +static int lan867x_revc_config_init(struct phy_device *phydev) +{ + s8 offsets[2]; + int ret; + + ret = lan867x_check_reset_complete(phydev); + if (ret) + return ret; + + ret = lan865x_generate_cfg_offsets(phydev, offsets); + if (ret) + return ret; + + /* LAN867x Rev.C1/C2 configuration settings are equal to the first 9 + * configuration settings and all the sqi fixup settings from LAN865x + * Rev.B0/B1. So the same fixup registers and values from LAN865x + * Rev.B0/B1 are used for LAN867x Rev.C1/C2 to avoid duplication. + * Refer the below links for the comparison. + * https://www.microchip.com/en-us/application-notes/an1760 + * Revision F (DS60001760G - June 2024) + * https://www.microchip.com/en-us/application-notes/an1699 + * Revision E (DS60001699F - June 2024) + */ + for (int i = 0; i < 9; i++) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + lan865x_revb_fixup_registers[i], + lan865x_revb_fixup_values[i]); + if (ret) + return ret; + + if (i == 1) { + ret = lan865x_setup_cfgparam(phydev, offsets); + if (ret) + return ret; + } + } + + ret = lan865x_setup_sqi_cfgparam(phydev, offsets); + if (ret) + return ret; + + for (int i = 0; i < ARRAY_SIZE(lan865x_revb_sqi_fixup_regs); i++) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + lan865x_revb_sqi_fixup_regs[i], + lan865x_revb_sqi_fixup_values[i]); + if (ret) + return ret; + } + + return 0; +} + +static int lan867x_revb1_config_init(struct phy_device *phydev) +{ + int err; + + err = lan867x_check_reset_complete(phydev); + if (err) + return err; + /* Reference to AN1699 * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/SupportingCollateral/AN-LAN8670-1-2-config-60001699.pdf * AN1699 says Read, Modify, Write, but the Write is not required if the @@ -253,6 +377,36 @@ static int lan867x_revb1_config_init(struct phy_device *phydev) return 0; } +/* As per LAN8650/1 Rev.B0/B1 AN1760 (Revision F (DS60001760G - June 2024)) and + * LAN8670/1/2 Rev.C1/C2 AN1699 (Revision E (DS60001699F - June 2024)), under + * normal operation, the device should be operated in PLCA mode. Disabling + * collision detection is recommended to allow the device to operate in noisy + * environments or when reflections and other inherent transmission line + * distortion cause poor signal quality. Collision detection must be re-enabled + * if the device is configured to operate in CSMA/CD mode. + * + * AN1760: https://www.microchip.com/en-us/application-notes/an1760 + * AN1699: https://www.microchip.com/en-us/application-notes/an1699 + */ +static int lan86xx_plca_set_cfg(struct phy_device *phydev, + const struct phy_plca_cfg *plca_cfg) +{ + int ret; + + ret = genphy_c45_plca_set_cfg(phydev, plca_cfg); + if (ret) + return ret; + + if (plca_cfg->enabled) + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, + LAN86XX_REG_COL_DET_CTRL0, + COL_DET_CTRL0_ENABLE_BIT_MASK, + COL_DET_DISABLE); + + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, LAN86XX_REG_COL_DET_CTRL0, + COL_DET_CTRL0_ENABLE_BIT_MASK, COL_DET_ENABLE); +} + static int lan86xx_read_status(struct phy_device *phydev) { /* The phy has some limitations, namely: @@ -308,15 +462,35 @@ static struct phy_driver microchip_t1s_driver[] = { .get_plca_status = genphy_c45_plca_get_status, }, { - PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB0), - .name = "LAN865X Rev.B0 Internal Phy", + PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC1), + .name = "LAN867X Rev.C1", .features = PHY_BASIC_T1S_P2MP_FEATURES, - .config_init = lan865x_revb0_config_init, + .config_init = lan867x_revc_config_init, + .read_status = lan86xx_read_status, + .get_plca_cfg = genphy_c45_plca_get_cfg, + .set_plca_cfg = lan86xx_plca_set_cfg, + .get_plca_status = genphy_c45_plca_get_status, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC2), + .name = "LAN867X Rev.C2", + .features = PHY_BASIC_T1S_P2MP_FEATURES, + .config_init = lan867x_revc_config_init, + .read_status = lan86xx_read_status, + .get_plca_cfg = genphy_c45_plca_get_cfg, + .set_plca_cfg = lan86xx_plca_set_cfg, + .get_plca_status = genphy_c45_plca_get_status, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB), + .name = "LAN865X Rev.B0/B1 Internal Phy", + .features = PHY_BASIC_T1S_P2MP_FEATURES, + .config_init = lan865x_revb_config_init, .read_status = lan86xx_read_status, .read_mmd = lan865x_phy_read_mmd, .write_mmd = lan865x_phy_write_mmd, .get_plca_cfg = genphy_c45_plca_get_cfg, - .set_plca_cfg = genphy_c45_plca_set_cfg, + .set_plca_cfg = lan86xx_plca_set_cfg, .get_plca_status = genphy_c45_plca_get_status, }, }; @@ -325,7 +499,9 @@ module_phy_driver(microchip_t1s_driver); static struct mdio_device_id __maybe_unused tbl[] = { { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1) }, - { PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB0) }, + { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC1) }, + { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC2) }, + { PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB) }, { } }; diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index e5f8ac4b4604..db3c1f72b407 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -38,6 +38,7 @@ #define PHY_MIISTAT 0x18 /* MII state */ #define PHY_IMASK 0x19 /* interrupt mask */ #define PHY_ISTAT 0x1A /* interrupt status */ +#define PHY_LED 0x1B /* LEDs */ #define PHY_FWV 0x1E /* firmware version */ #define PHY_MIISTAT_SPD_MASK GENMASK(2, 0) @@ -61,6 +62,11 @@ PHY_IMASK_ADSC | \ PHY_IMASK_ANC) +#define GPY_MAX_LEDS 4 +#define PHY_LED_POLARITY(idx) BIT(12 + (idx)) +#define PHY_LED_HWCONTROL(idx) BIT(8 + (idx)) +#define PHY_LED_ON(idx) BIT(idx) + #define PHY_FWV_REL_MASK BIT(15) #define PHY_FWV_MAJOR_MASK GENMASK(11, 8) #define PHY_FWV_MINOR_MASK GENMASK(7, 0) @@ -72,6 +78,23 @@ #define PHY_MDI_MDI_X_CD 0x1 #define PHY_MDI_MDI_X_CROSS 0x0 +/* LED */ +#define VSPEC1_LED(idx) (1 + (idx)) +#define VSPEC1_LED_BLINKS GENMASK(15, 12) +#define VSPEC1_LED_PULSE GENMASK(11, 8) +#define VSPEC1_LED_CON GENMASK(7, 4) +#define VSPEC1_LED_BLINKF GENMASK(3, 0) + +#define VSPEC1_LED_LINK10 BIT(0) +#define VSPEC1_LED_LINK100 BIT(1) +#define VSPEC1_LED_LINK1000 BIT(2) +#define VSPEC1_LED_LINK2500 BIT(3) + +#define VSPEC1_LED_TXACT BIT(0) +#define VSPEC1_LED_RXACT BIT(1) +#define VSPEC1_LED_COL BIT(2) +#define VSPEC1_LED_NO_CON BIT(3) + /* SGMII */ #define VSPEC1_SGMII_CTRL 0x08 #define VSPEC1_SGMII_CTRL_ANEN BIT(12) /* Aneg enable */ @@ -835,6 +858,165 @@ static int gpy115_loopback(struct phy_device *phydev, bool enable) return genphy_soft_reset(phydev); } +static int gpy_led_brightness_set(struct phy_device *phydev, + u8 index, enum led_brightness value) +{ + int ret; + + if (index >= GPY_MAX_LEDS) + return -EINVAL; + + /* clear HWCONTROL and set manual LED state */ + ret = phy_modify(phydev, PHY_LED, + ((value == LED_OFF) ? PHY_LED_HWCONTROL(index) : 0) | + PHY_LED_ON(index), + (value == LED_OFF) ? 0 : PHY_LED_ON(index)); + if (ret) + return ret; + + /* ToDo: set PWM brightness */ + + /* clear HW LED setup */ + if (value == LED_OFF) + return phy_write_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_LED(index), 0); + else + return 0; +} + +static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)); + +static int gpy_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + if (index >= GPY_MAX_LEDS) + return -EINVAL; + + /* All combinations of the supported triggers are allowed */ + if (rules & ~supported_triggers) + return -EOPNOTSUPP; + + return 0; +} + +static int gpy_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int val; + + if (index >= GPY_MAX_LEDS) + return -EINVAL; + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_LED(index)); + if (val < 0) + return val; + + if (FIELD_GET(VSPEC1_LED_CON, val) & VSPEC1_LED_LINK10) + *rules |= BIT(TRIGGER_NETDEV_LINK_10); + + if (FIELD_GET(VSPEC1_LED_CON, val) & VSPEC1_LED_LINK100) + *rules |= BIT(TRIGGER_NETDEV_LINK_100); + + if (FIELD_GET(VSPEC1_LED_CON, val) & VSPEC1_LED_LINK1000) + *rules |= BIT(TRIGGER_NETDEV_LINK_1000); + + if (FIELD_GET(VSPEC1_LED_CON, val) & VSPEC1_LED_LINK2500) + *rules |= BIT(TRIGGER_NETDEV_LINK_2500); + + if (FIELD_GET(VSPEC1_LED_CON, val) == (VSPEC1_LED_LINK10 | + VSPEC1_LED_LINK100 | + VSPEC1_LED_LINK1000 | + VSPEC1_LED_LINK2500)) + *rules |= BIT(TRIGGER_NETDEV_LINK); + + if (FIELD_GET(VSPEC1_LED_PULSE, val) & VSPEC1_LED_TXACT) + *rules |= BIT(TRIGGER_NETDEV_TX); + + if (FIELD_GET(VSPEC1_LED_PULSE, val) & VSPEC1_LED_RXACT) + *rules |= BIT(TRIGGER_NETDEV_RX); + + return 0; +} + +static int gpy_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + u16 val = 0; + int ret; + + if (index >= GPY_MAX_LEDS) + return -EINVAL; + + if (rules & BIT(TRIGGER_NETDEV_LINK) || + rules & BIT(TRIGGER_NETDEV_LINK_10)) + val |= FIELD_PREP(VSPEC1_LED_CON, VSPEC1_LED_LINK10); + + if (rules & BIT(TRIGGER_NETDEV_LINK) || + rules & BIT(TRIGGER_NETDEV_LINK_100)) + val |= FIELD_PREP(VSPEC1_LED_CON, VSPEC1_LED_LINK100); + + if (rules & BIT(TRIGGER_NETDEV_LINK) || + rules & BIT(TRIGGER_NETDEV_LINK_1000)) + val |= FIELD_PREP(VSPEC1_LED_CON, VSPEC1_LED_LINK1000); + + if (rules & BIT(TRIGGER_NETDEV_LINK) || + rules & BIT(TRIGGER_NETDEV_LINK_2500)) + val |= FIELD_PREP(VSPEC1_LED_CON, VSPEC1_LED_LINK2500); + + if (rules & BIT(TRIGGER_NETDEV_TX)) + val |= FIELD_PREP(VSPEC1_LED_PULSE, VSPEC1_LED_TXACT); + + if (rules & BIT(TRIGGER_NETDEV_RX)) + val |= FIELD_PREP(VSPEC1_LED_PULSE, VSPEC1_LED_RXACT); + + /* allow RX/TX pulse without link indication */ + if ((rules & BIT(TRIGGER_NETDEV_TX) || rules & BIT(TRIGGER_NETDEV_RX)) && + !(val & VSPEC1_LED_CON)) + val |= FIELD_PREP(VSPEC1_LED_PULSE, VSPEC1_LED_NO_CON) | VSPEC1_LED_CON; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_LED(index), val); + if (ret) + return ret; + + return phy_set_bits(phydev, PHY_LED, PHY_LED_HWCONTROL(index)); +} + +static int gpy_led_polarity_set(struct phy_device *phydev, int index, + unsigned long modes) +{ + bool force_active_low = false, force_active_high = false; + u32 mode; + + if (index >= GPY_MAX_LEDS) + return -EINVAL; + + for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) { + switch (mode) { + case PHY_LED_ACTIVE_LOW: + force_active_low = true; + break; + case PHY_LED_ACTIVE_HIGH: + force_active_high = true; + break; + default: + return -EINVAL; + } + } + + if (force_active_low) + return phy_set_bits(phydev, PHY_LED, PHY_LED_POLARITY(index)); + + if (force_active_high) + return phy_clear_bits(phydev, PHY_LED, PHY_LED_POLARITY(index)); + + unreachable(); +} + static struct phy_driver gpy_drivers[] = { { PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx), @@ -852,6 +1034,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { .phy_id = PHY_ID_GPY115B, @@ -870,6 +1057,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy115_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_GPY115C), @@ -887,6 +1079,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy115_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { .phy_id = PHY_ID_GPY211B, @@ -905,6 +1102,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_GPY211C), @@ -922,6 +1124,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { .phy_id = PHY_ID_GPY212B, @@ -940,6 +1147,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_GPY212C), @@ -957,6 +1169,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { .phy_id = PHY_ID_GPY215B, @@ -975,6 +1192,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_GPY215C), @@ -992,6 +1214,11 @@ static struct phy_driver gpy_drivers[] = { .set_wol = gpy_set_wol, .get_wol = gpy_get_wol, .set_loopback = gpy_loopback, + .led_brightness_set = gpy_led_brightness_set, + .led_hw_is_supported = gpy_led_hw_is_supported, + .led_hw_control_get = gpy_led_hw_control_get, + .led_hw_control_set = gpy_led_hw_control_set, + .led_polarity_set = gpy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_GPY241B), diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 5af5ade4fc64..7e328c2a29a4 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/mii.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/phy.h> #include <linux/processor.h> #include <linux/property.h> @@ -185,6 +186,8 @@ #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb) +#define TJA11XX_REVERSE_MODE BIT(0) + struct nxp_c45_phy; struct nxp_c45_skb_cb { @@ -1510,6 +1513,8 @@ static int nxp_c45_get_delays(struct phy_device *phydev) static int nxp_c45_set_phy_mode(struct phy_device *phydev) { + struct nxp_c45_phy *priv = phydev->priv; + u16 basic_config; int ret; ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES); @@ -1561,8 +1566,15 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev) phydev_err(phydev, "rmii mode not supported\n"); return -EINVAL; } + + basic_config = MII_BASIC_CONFIG_RMII; + + /* This is not PHY_INTERFACE_MODE_REVRMII */ + if (priv->flags & TJA11XX_REVERSE_MODE) + basic_config |= MII_BASIC_CONFIG_REV; + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, - MII_BASIC_CONFIG_RMII); + basic_config); break; case PHY_INTERFACE_MODE_SGMII: if (!(ret & SGMII_ABILITY)) { @@ -1623,6 +1635,20 @@ static int nxp_c45_get_features(struct phy_device *phydev) return genphy_c45_pma_read_abilities(phydev); } +static int nxp_c45_parse_dt(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + struct nxp_c45_phy *priv = phydev->priv; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return 0; + + if (of_property_read_bool(node, "nxp,rmii-refclk-out")) + priv->flags |= TJA11XX_REVERSE_MODE; + + return 0; +} + static int nxp_c45_probe(struct phy_device *phydev) { struct nxp_c45_phy *priv; @@ -1642,6 +1668,8 @@ static int nxp_c45_probe(struct phy_device *phydev) phydev->priv = priv; + nxp_c45_parse_dt(phydev); + mutex_init(&priv->ptp_lock); phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, diff --git a/drivers/net/phy/nxp-c45-tja11xx.h b/drivers/net/phy/nxp-c45-tja11xx.h index f364fca68f0b..8b5fc383752b 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.h +++ b/drivers/net/phy/nxp-c45-tja11xx.h @@ -28,6 +28,7 @@ struct nxp_c45_phy { int extts_index; bool extts; struct nxp_c45_macsec *macsec; + u32 flags; }; #if IS_ENABLED(CONFIG_MACSEC) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 1f98b6a96c15..4e8db12d6092 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -413,6 +413,39 @@ void of_set_phy_eee_broken(struct phy_device *phydev) } /** + * of_set_phy_timing_role - Set the master/slave mode of the PHY + * + * @phydev: The phy_device struct + * + * Set master/slave configuration of the PHY based on the device tree. + */ +void of_set_phy_timing_role(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + const char *master; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return; + + if (!node) + return; + + if (of_property_read_string(node, "timing-role", &master)) + return; + + if (strcmp(master, "forced-master") == 0) + phydev->master_slave_set = MASTER_SLAVE_CFG_MASTER_FORCE; + else if (strcmp(master, "forced-slave") == 0) + phydev->master_slave_set = MASTER_SLAVE_CFG_SLAVE_FORCE; + else if (strcmp(master, "preferred-master") == 0) + phydev->master_slave_set = MASTER_SLAVE_CFG_MASTER_PREFERRED; + else if (strcmp(master, "preferred-slave") == 0) + phydev->master_slave_set = MASTER_SLAVE_CFG_SLAVE_PREFERRED; + else + phydev_warn(phydev, "Unknown master-slave mode %s\n", master); +} + +/** * phy_resolve_aneg_pause - Determine pause autoneg results * * @phydev: The phy_device struct diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 499797646580..563497a3274c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3358,11 +3358,17 @@ static int of_phy_led(struct phy_device *phydev, if (index > U8_MAX) return -EINVAL; + if (of_property_read_bool(led, "active-high")) + set_bit(PHY_LED_ACTIVE_HIGH, &modes); if (of_property_read_bool(led, "active-low")) set_bit(PHY_LED_ACTIVE_LOW, &modes); if (of_property_read_bool(led, "inactive-high-impedance")) set_bit(PHY_LED_INACTIVE_HIGH_IMPEDANCE, &modes); + if (WARN_ON(modes & BIT(PHY_LED_ACTIVE_LOW) && + modes & BIT(PHY_LED_ACTIVE_HIGH))) + return -EINVAL; + if (modes) { /* Return error if asked to set polarity modes but not supported */ if (!phydev->drv->led_polarity_set) @@ -3421,6 +3427,16 @@ static int of_phy_leds(struct phy_device *phydev) if (!leds) return 0; + /* Check if the PHY driver have at least an OP to + * set the LEDs. + */ + if (!(phydev->drv->led_brightness_set || + phydev->drv->led_blink_set || + phydev->drv->led_hw_control_set)) { + phydev_dbg(phydev, "ignoring leds node defined with no PHY driver support\n"); + goto exit; + } + for_each_available_child_of_node_scoped(leds, led) { err = of_phy_led(phydev, led); if (err) { @@ -3430,6 +3446,7 @@ static int of_phy_leds(struct phy_device *phydev) } } +exit: of_node_put(leds); return 0; } @@ -3609,6 +3626,9 @@ static int phy_probe(struct device *dev) */ of_set_phy_eee_broken(phydev); + /* Get master/slave strap overrides */ + of_set_phy_timing_role(phydev); + /* The Pause Frame bits indicate that the PHY can support passing * pause frames. During autonegotiation, the PHYs will determine if * they should allow pause frames to pass. The MAC driver should then diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 4309317de3d1..b5870f8666ac 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -79,7 +79,6 @@ struct phylink { unsigned int pcs_state; bool mac_link_dropped; - bool using_mac_select_pcs; struct sfp_bus *sfp_bus; bool sfp_may_have_phy; @@ -599,15 +598,8 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface, * max speed at full duplex. */ if (mac_capabilities & - phylink_cap_from_speed_duplex(max_speed, DUPLEX_FULL)) { - /* Although a duplex-matching phy might exist, we - * conservatively remove these modes because the MAC - * will not be aware of the half-duplex nature of the - * link. - */ + phylink_cap_from_speed_duplex(max_speed, DUPLEX_FULL)) matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD)); - matched_caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD); - } break; } case RATE_MATCH_CRS: @@ -656,17 +648,15 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { + struct phylink_pcs *pcs = NULL; unsigned long capabilities; - struct phylink_pcs *pcs; int ret; /* Get the PCS for this interface mode */ - if (pl->using_mac_select_pcs) { + if (pl->mac_ops->mac_select_pcs) { pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); if (IS_ERR(pcs)) return PTR_ERR(pcs); - } else { - pcs = pl->pcs; } if (pcs) { @@ -1182,7 +1172,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, state->interface, state->advertising); - if (pl->using_mac_select_pcs) { + if (pl->mac_ops->mac_select_pcs) { pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); if (IS_ERR(pcs)) { phylink_err(pl, @@ -1191,7 +1181,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, return; } - pcs_changed = pcs && pl->pcs != pcs; + pcs_changed = pl->pcs != pcs; } phylink_pcs_poll_stop(pl); @@ -1698,7 +1688,6 @@ struct phylink *phylink_create(struct phylink_config *config, phy_interface_t iface, const struct phylink_mac_ops *mac_ops) { - bool using_mac_select_pcs = false; struct phylink *pl; int ret; @@ -1709,11 +1698,6 @@ struct phylink *phylink_create(struct phylink_config *config, return ERR_PTR(-EINVAL); } - if (mac_ops->mac_select_pcs && - mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) != - ERR_PTR(-EOPNOTSUPP)) - using_mac_select_pcs = true; - pl = kzalloc(sizeof(*pl), GFP_KERNEL); if (!pl) return ERR_PTR(-ENOMEM); @@ -1732,7 +1716,6 @@ struct phylink *phylink_create(struct phylink_config *config, return ERR_PTR(-EINVAL); } - pl->using_mac_select_pcs = using_mac_select_pcs; pl->phy_state.interface = iface; pl->link_interface = iface; if (iface == PHY_INTERFACE_MODE_MOCA) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 166f6a728373..f65d7f1f348e 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -80,18 +80,22 @@ #define RTL822X_VND2_GANLPAR 0xa414 -#define RTL822X_VND2_PHYSR 0xa434 - #define RTL8366RB_POWER_SAVE 0x15 #define RTL8366RB_POWER_SAVE_ON BIT(12) #define RTL9000A_GINMR 0x14 #define RTL9000A_GINMR_LINK_STATUS BIT(4) -#define RTLGEN_SPEED_MASK 0x0630 +#define RTL_VND2_PHYSR 0xa434 +#define RTL_VND2_PHYSR_DUPLEX BIT(3) +#define RTL_VND2_PHYSR_SPEEDL GENMASK(5, 4) +#define RTL_VND2_PHYSR_SPEEDH GENMASK(10, 9) +#define RTL_VND2_PHYSR_MASTER BIT(11) +#define RTL_VND2_PHYSR_SPEED_MASK (RTL_VND2_PHYSR_SPEEDL | RTL_VND2_PHYSR_SPEEDH) #define RTL_GENERIC_PHYID 0x001cc800 #define RTL_8211FVD_PHYID 0x001cc878 +#define RTL_8221B 0x001cc840 #define RTL_8221B_VB_CG 0x001cc849 #define RTL_8221B_VN_CG 0x001cc84a #define RTL_8251B 0x001cc862 @@ -660,9 +664,18 @@ static int rtl8366rb_config_init(struct phy_device *phydev) } /* get actual speed to cover the downshift case */ -static void rtlgen_decode_speed(struct phy_device *phydev, int val) +static void rtlgen_decode_physr(struct phy_device *phydev, int val) { - switch (val & RTLGEN_SPEED_MASK) { + /* bit 3 + * 0: Half Duplex + * 1: Full Duplex + */ + if (val & RTL_VND2_PHYSR_DUPLEX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + switch (val & RTL_VND2_PHYSR_SPEED_MASK) { case 0x0000: phydev->speed = SPEED_10; break; @@ -684,6 +697,19 @@ static void rtlgen_decode_speed(struct phy_device *phydev, int val) default: break; } + + /* bit 11 + * 0: Slave Mode + * 1: Master Mode + */ + if (phydev->speed >= 1000) { + if (val & RTL_VND2_PHYSR_MASTER) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + else + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + } else { + phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; + } } static int rtlgen_read_status(struct phy_device *phydev) @@ -701,7 +727,7 @@ static int rtlgen_read_status(struct phy_device *phydev) if (val < 0) return val; - rtlgen_decode_speed(phydev, val); + rtlgen_decode_physr(phydev, val); return 0; } @@ -924,17 +950,25 @@ static void rtl822xb_update_interface(struct phy_device *phydev) static int rtl822x_read_status(struct phy_device *phydev) { - if (phydev->autoneg == AUTONEG_ENABLE) { - int lpadv = phy_read_paged(phydev, 0xa5d, 0x13); + int lpadv, ret; - if (lpadv < 0) - return lpadv; + ret = rtlgen_read_status(phydev); + if (ret < 0) + return ret; - mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, - lpadv); + if (phydev->autoneg == AUTONEG_DISABLE || + !phydev->autoneg_complete) { + mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0); + return 0; } - return rtlgen_read_status(phydev); + lpadv = phy_read_paged(phydev, 0xa5d, 0x13); + if (lpadv < 0) + return lpadv; + + mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, lpadv); + + return 0; } static int rtl822xb_read_status(struct phy_device *phydev) @@ -993,6 +1027,10 @@ static int rtl822x_c45_read_status(struct phy_device *phydev) if (ret < 0) return ret; + if (phydev->autoneg == AUTONEG_DISABLE || + !genphy_c45_aneg_done(phydev)) + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, 0); + /* Vendor register as C45 has no standardized support for 1000BaseT */ if (phydev->autoneg == AUTONEG_ENABLE) { val = phy_read_mmd(phydev, MDIO_MMD_VEND2, @@ -1007,11 +1045,11 @@ static int rtl822x_c45_read_status(struct phy_device *phydev) return 0; /* Read actual speed from vendor register. */ - val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_PHYSR); + val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_VND2_PHYSR); if (val < 0) return val; - rtlgen_decode_speed(phydev, val); + rtlgen_decode_physr(phydev, val); return 0; } @@ -1040,6 +1078,23 @@ static bool rtlgen_supports_2_5gbps(struct phy_device *phydev) return val >= 0 && val & MDIO_PMA_SPEED_2_5G; } +/* On internal PHY's MMD reads over C22 always return 0. + * Check a MMD register which is known to be non-zero. + */ +static bool rtlgen_supports_mmd(struct phy_device *phydev) +{ + int val; + + phy_lock_mdio_bus(phydev); + __phy_write(phydev, MII_MMD_CTRL, MDIO_MMD_PCS); + __phy_write(phydev, MII_MMD_DATA, MDIO_PCS_EEE_ABLE); + __phy_write(phydev, MII_MMD_CTRL, MDIO_MMD_PCS | MII_MMD_CTRL_NOINCR); + val = __phy_read(phydev, MII_MMD_DATA); + phy_unlock_mdio_bus(phydev); + + return val > 0; +} + static int rtlgen_match_phy_device(struct phy_device *phydev) { return phydev->phy_id == RTL_GENERIC_PHYID && @@ -1049,7 +1104,8 @@ static int rtlgen_match_phy_device(struct phy_device *phydev) static int rtl8226_match_phy_device(struct phy_device *phydev) { return phydev->phy_id == RTL_GENERIC_PHYID && - rtlgen_supports_2_5gbps(phydev); + rtlgen_supports_2_5gbps(phydev) && + rtlgen_supports_mmd(phydev); } static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id, @@ -1061,6 +1117,11 @@ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id, return !is_c45 && (id == phydev->phy_id); } +static int rtl8221b_match_phy_device(struct phy_device *phydev) +{ + return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev); +} + static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev) { return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false); @@ -1081,9 +1142,22 @@ static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev) return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true); } -static int rtl8251b_c22_match_phy_device(struct phy_device *phydev) +static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev) { - return rtlgen_is_c45_match(phydev, RTL_8251B, false); + if (phydev->is_c45) + return false; + + switch (phydev->phy_id) { + case RTL_GENERIC_PHYID: + case RTL_8221B: + case RTL_8251B: + case 0x001cc841: + break; + default: + return false; + } + + return rtlgen_supports_2_5gbps(phydev) && !rtlgen_supports_mmd(phydev); } static int rtl8251b_c45_match_phy_device(struct phy_device *phydev) @@ -1345,10 +1419,8 @@ static struct phy_driver realtek_drvs[] = { .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, - .read_mmd = rtl822x_read_mmd, - .write_mmd = rtl822x_write_mmd, }, { - PHY_ID_MATCH_EXACT(0x001cc840), + .match_phy_device = rtl8221b_match_phy_device, .name = "RTL8226B_RTL8221B 2.5Gbps PHY", .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, @@ -1359,8 +1431,6 @@ static struct phy_driver realtek_drvs[] = { .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, - .read_mmd = rtl822x_read_mmd, - .write_mmd = rtl822x_write_mmd, }, { PHY_ID_MATCH_EXACT(0x001cc838), .name = "RTL8226-CG 2.5Gbps PHY", @@ -1438,8 +1508,9 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - .match_phy_device = rtl8251b_c22_match_phy_device, - .name = "RTL8126A-internal 5Gbps PHY", + .match_phy_device = rtl_internal_nbaset_match_phy_device, + .name = "Realtek Internal NBASE-T PHY", + .flags = PHY_IS_INTERNAL, .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, .read_status = rtl822x_read_status, diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index a5684ef5884b..7dbcbf0a4ee2 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -466,7 +466,8 @@ static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id, static const struct sfp_quirk sfp_quirks[] = { // Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly // report 2500MBd NRZ in their EEPROM - SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex), + SFP_QUIRK("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex, + sfp_fixup_ignore_tx_fault), // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd // NRZ in their EEPROM @@ -3146,7 +3147,7 @@ static void sfp_shutdown(struct platform_device *pdev) static struct platform_driver sfp_driver = { .probe = sfp_probe, - .remove_new = sfp_remove, + .remove = sfp_remove, .shutdown = sfp_shutdown, .driver = { .name = "sfp", diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 150aea7c9c36..e1853599d9ba 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -627,12 +627,13 @@ int smsc_phy_probe(struct phy_device *phydev) phydev->priv = priv; /* Make clk optional to keep DTB backward compatibility. */ - refclk = devm_clk_get_optional_enabled(dev, NULL); + refclk = devm_clk_get_optional_enabled_with_rate(dev, NULL, + 50 * 1000 * 1000); if (IS_ERR(refclk)) return dev_err_probe(dev, PTR_ERR(refclk), "Failed to request clock\n"); - return clk_set_rate(refclk, 50 * 1000 * 1000); + return 0; } EXPORT_SYMBOL_GPL(smsc_phy_probe); diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index 18191d5a8bd4..a1b27b69f010 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -1946,8 +1946,7 @@ static void team_netpoll_cleanup(struct net_device *dev) mutex_unlock(&team->lock); } -static int team_netpoll_setup(struct net_device *dev, - struct netpoll_info *npifo) +static int team_netpoll_setup(struct net_device *dev) { struct team *team = netdev_priv(dev); struct team_port *port; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 9a0f6eb32016..d7a865ef370b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -71,7 +71,7 @@ #include <linux/bpf_trace.h> #include <linux/mutex.h> #include <linux/ieee802154.h> -#include <linux/if_ltalk.h> +#include <uapi/linux/if_ltalk.h> #include <uapi/linux/if_fddi.h> #include <uapi/linux/if_hippi.h> #include <uapi/linux/if_fc.h> diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index cb7d2f798fb4..091bc2aca7e8 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -177,9 +177,9 @@ static int sr9700_get_eeprom(struct net_device *netdev, static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); - __le16 res; + int err, res; + __le16 word; int rc = 0; - int err; if (phy_id) { netdev_dbg(netdev, "Only internal phy supported\n"); @@ -197,14 +197,14 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) if (value & NSR_LINKST) rc = 1; } - err = sr_share_read_word(dev, 1, loc, &res); + err = sr_share_read_word(dev, 1, loc, &word); if (err < 0) return err; if (rc == 1) - res = le16_to_cpu(res) | BMSR_LSTATUS; + res = le16_to_cpu(word) | BMSR_LSTATUS; else - res = le16_to_cpu(res) & ~BMSR_LSTATUS; + res = le16_to_cpu(word) & ~BMSR_LSTATUS; netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", phy_id, loc, res); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index b70654c7ad34..6793fa09f9d1 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -201,6 +201,14 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) adapter->link_speed = ret >> 16; if (ret & 1) { /* Link is up. */ + /* + * From vmxnet3 v9, the hypervisor reports the speed in Gbps. + * Convert the speed to Mbps before rporting it to the kernel. + * Max link speed supported is 10000G. + */ + if (VMXNET3_VERSION_GE_9(adapter) && + adapter->link_speed < 10000) + adapter->link_speed = adapter->link_speed * 1000; netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", adapter->link_speed); netif_carrier_on(adapter->netdev); diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 6e9a3795846a..841b59d1c1c2 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1435,11 +1435,11 @@ errout: /* Watch incoming packets to learn mapping between Ethernet address * and Tunnel endpoint. - * Return true if packet is bogus and should be dropped. */ -static bool vxlan_snoop(struct net_device *dev, - union vxlan_addr *src_ip, const u8 *src_mac, - u32 src_ifindex, __be32 vni) +static enum skb_drop_reason vxlan_snoop(struct net_device *dev, + union vxlan_addr *src_ip, + const u8 *src_mac, u32 src_ifindex, + __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; @@ -1447,7 +1447,7 @@ static bool vxlan_snoop(struct net_device *dev, /* Ignore packets from invalid src-address */ if (!is_valid_ether_addr(src_mac)) - return true; + return SKB_DROP_REASON_MAC_INVALID_SOURCE; #if IS_ENABLED(CONFIG_IPV6) if (src_ip->sa.sa_family == AF_INET6 && @@ -1461,15 +1461,15 @@ static bool vxlan_snoop(struct net_device *dev, if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) && rdst->remote_ifindex == ifindex)) - return false; + return SKB_NOT_DROPPED_YET; /* Don't migrate static entries, drop packets */ if (f->state & (NUD_PERMANENT | NUD_NOARP)) - return true; + return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS; /* Don't override an fdb with nexthop with a learnt entry */ if (rcu_access_pointer(f->nh)) - return true; + return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS; if (net_ratelimit()) netdev_info(dev, @@ -1497,7 +1497,7 @@ static bool vxlan_snoop(struct net_device *dev, spin_unlock(&vxlan->hash_lock[hash_index]); } - return false; + return SKB_NOT_DROPPED_YET; } static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) @@ -1551,9 +1551,11 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan) #endif } -static bool vxlan_remcsum(struct vxlanhdr *unparsed, - struct sk_buff *skb, u32 vxflags) +static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed, + struct sk_buff *skb, + u32 vxflags) { + enum skb_drop_reason reason; size_t start, offset; if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) @@ -1562,15 +1564,17 @@ static bool vxlan_remcsum(struct vxlanhdr *unparsed, start = vxlan_rco_start(unparsed->vx_vni); offset = start + vxlan_rco_offset(unparsed->vx_vni); - if (!pskb_may_pull(skb, offset + sizeof(u16))) - return false; + reason = pskb_may_pull_reason(skb, offset + sizeof(u16)); + if (reason) + return reason; skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); out: unparsed->vx_flags &= ~VXLAN_HF_RCO; unparsed->vx_vni &= VXLAN_VNI_MASK; - return true; + + return SKB_NOT_DROPPED_YET; } static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, @@ -1604,9 +1608,9 @@ out: unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } -static bool vxlan_set_mac(struct vxlan_dev *vxlan, - struct vxlan_sock *vs, - struct sk_buff *skb, __be32 vni) +static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan, + struct vxlan_sock *vs, + struct sk_buff *skb, __be32 vni) { union vxlan_addr saddr; u32 ifindex = skb->dev->ifindex; @@ -1617,7 +1621,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) - return false; + return SKB_DROP_REASON_LOCAL_MAC; /* Get address from the outer IP header */ if (vxlan_get_sk_family(vs) == AF_INET) { @@ -1630,11 +1634,11 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, #endif } - if ((vxlan->cfg.flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni)) - return false; + if (!(vxlan->cfg.flags & VXLAN_F_LEARN)) + return SKB_NOT_DROPPED_YET; - return true; + return vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, + ifindex, vni); } static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, @@ -1671,13 +1675,15 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 protocol = htons(ETH_P_TEB); + enum skb_drop_reason reason; bool raw_proto = false; void *oiph; __be32 vni = 0; int nh; /* Need UDP and VXLAN header to be present */ - if (!pskb_may_pull(skb, VXLAN_HLEN)) + reason = pskb_may_pull_reason(skb, VXLAN_HLEN); + if (reason) goto drop; unparsed = *vxlan_hdr(skb); @@ -1686,6 +1692,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", ntohl(vxlan_hdr(skb)->vx_flags), ntohl(vxlan_hdr(skb)->vx_vni)); + reason = SKB_DROP_REASON_VXLAN_INVALID_HDR; /* Return non vxlan pkt */ goto drop; } @@ -1699,8 +1706,10 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode); - if (!vxlan) + if (!vxlan) { + reason = SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND; goto drop; + } /* For backwards compatibility, only allow reserved fields to be * used by VXLAN extensions if explicitly requested. @@ -1713,12 +1722,16 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) } if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, - !net_eq(vxlan->net, dev_net(vxlan->dev)))) + !net_eq(vxlan->net, dev_net(vxlan->dev)))) { + reason = SKB_DROP_REASON_NOMEM; goto drop; + } - if (vs->flags & VXLAN_F_REMCSUM_RX) - if (unlikely(!vxlan_remcsum(&unparsed, skb, vs->flags))) + if (vs->flags & VXLAN_F_REMCSUM_RX) { + reason = vxlan_remcsum(&unparsed, skb, vs->flags); + if (unlikely(reason)) goto drop; + } if (vxlan_collect_metadata(vs)) { IP_TUNNEL_DECLARE_FLAGS(flags) = { }; @@ -1728,8 +1741,10 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags, key32_to_tunnel_id(vni), sizeof(*md)); - if (!tun_dst) + if (!tun_dst) { + reason = SKB_DROP_REASON_NOMEM; goto drop; + } md = ip_tunnel_info_opts(&tun_dst->u.tun_info); @@ -1753,11 +1768,13 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) * is more robust and provides a little more security in * adding extensions to VXLAN. */ + reason = SKB_DROP_REASON_VXLAN_INVALID_HDR; goto drop; } if (!raw_proto) { - if (!vxlan_set_mac(vxlan, vs, skb, vni)) + reason = vxlan_set_mac(vxlan, vs, skb, vni); + if (reason) goto drop; } else { skb_reset_mac_header(skb); @@ -1773,7 +1790,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) skb_reset_network_header(skb); - if (!pskb_inet_may_pull(skb)) { + reason = pskb_inet_may_pull_reason(skb); + if (reason) { DEV_STATS_INC(vxlan->dev, rx_length_errors); DEV_STATS_INC(vxlan->dev, rx_errors); vxlan_vnifilter_count(vxlan, vni, vninode, @@ -1785,6 +1803,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) oiph = skb->head + nh; if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { + reason = SKB_DROP_REASON_IP_TUNNEL_ECN; DEV_STATS_INC(vxlan->dev, rx_frame_errors); DEV_STATS_INC(vxlan->dev, rx_errors); vxlan_vnifilter_count(vxlan, vni, vninode, @@ -1799,6 +1818,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) dev_core_stats_rx_dropped_inc(vxlan->dev); vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX_DROPS, 0); + reason = SKB_DROP_REASON_DEV_READY; goto drop; } @@ -1811,8 +1831,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) return 0; drop: + reason = reason ?: SKB_DROP_REASON_NOT_SPECIFIED; /* Consume bad packet */ - kfree_skb(skb); + kfree_skb_reason(skb, reason); return 0; } @@ -2268,7 +2289,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, rcu_read_lock(); dev = skb->dev; if (unlikely(!(dev->flags & IFF_UP))) { - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY); goto drop; } @@ -2319,7 +2340,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, DEV_STATS_INC(dev, tx_errors); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND); return -ENOENT; } @@ -2352,13 +2373,16 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, bool use_cache; bool udp_sum = false; bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); + enum skb_drop_reason reason; bool no_eth_encap; __be32 vni = 0; no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB); - if (!skb_vlan_inet_prepare(skb, no_eth_encap)) + reason = skb_vlan_inet_prepare(skb, no_eth_encap); + if (reason) goto drop; + reason = SKB_DROP_REASON_NOT_SPECIFIED; old_iph = ip_hdr(skb); info = skb_tunnel_info(skb); @@ -2462,6 +2486,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos, use_cache ? dst_cache : NULL); if (IS_ERR(rt)) { err = PTR_ERR(rt); + reason = SKB_DROP_REASON_IP_OUTNOROUTES; goto tx_error; } @@ -2513,8 +2538,10 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); - if (err < 0) + if (err < 0) { + reason = SKB_DROP_REASON_NOMEM; goto tx_error; + } udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr, pkey->u.ipv4.dst, tos, ttl, df, @@ -2534,6 +2561,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(ndst)) { err = PTR_ERR(ndst); ndst = NULL; + reason = SKB_DROP_REASON_IP_OUTNOROUTES; goto tx_error; } @@ -2574,8 +2602,10 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), vni, md, flags, udp_sum); - if (err < 0) + if (err < 0) { + reason = SKB_DROP_REASON_NOMEM; goto tx_error; + } udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, &saddr, &pkey->u.ipv6.dst, tos, ttl, @@ -2590,7 +2620,7 @@ out_unlock: drop: dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); - dev_kfree_skb(skb); + kfree_skb_reason(skb, reason); return; tx_error: @@ -2602,7 +2632,7 @@ tx_error: dst_release(ndst); DEV_STATS_INC(dev, tx_errors); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); - kfree_skb(skb); + kfree_skb_reason(skb, reason); } static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev, @@ -2708,7 +2738,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (info && info->mode & IP_TUNNEL_INFO_TX) vxlan_xmit_one(skb, dev, vni, NULL, false); else - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_TUNNEL_TXINFO); return NETDEV_TX_OK; } } @@ -2771,7 +2801,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE); return NETDEV_TX_OK; } } @@ -2794,7 +2824,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (fdst) vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); else - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE); } return NETDEV_TX_OK; diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c index 60eb95a06d55..8735891ee128 100644 --- a/drivers/net/vxlan/vxlan_mdb.c +++ b/drivers/net/vxlan/vxlan_mdb.c @@ -284,7 +284,7 @@ int vxlan_mdb_dump(struct net_device *dev, struct sk_buff *skb, ASSERT_RTNL(); - NL_ASSERT_DUMP_CTX_FITS(struct vxlan_mdb_dump_ctx); + NL_ASSERT_CTX_FITS(struct vxlan_mdb_dump_ctx); nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWMDB, sizeof(*bpm), @@ -1712,7 +1712,7 @@ netdev_tx_t vxlan_mdb_xmit(struct vxlan_dev *vxlan, vxlan_xmit_one(skb, vxlan->dev, src_vni, rcu_dereference(fremote->rd), false); else - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE); return NETDEV_TX_OK; } diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c index 413a3c1d15bb..1e4c8e85d598 100644 --- a/drivers/net/wan/framer/pef2256/pef2256.c +++ b/drivers/net/wan/framer/pef2256/pef2256.c @@ -863,7 +863,7 @@ static struct platform_driver pef2256_driver = { .of_match_table = pef2256_id_table, }, .probe = pef2256_probe, - .remove_new = pef2256_remove, + .remove = pef2256_remove, }; module_platform_driver(pef2256_driver); diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c index 8fcfbde31a1c..8976dea8e17e 100644 --- a/drivers/net/wan/fsl_qmc_hdlc.c +++ b/drivers/net/wan/fsl_qmc_hdlc.c @@ -799,7 +799,7 @@ static struct platform_driver qmc_hdlc_driver = { .of_match_table = qmc_hdlc_id_table, }, .probe = qmc_hdlc_probe, - .remove_new = qmc_hdlc_remove, + .remove = qmc_hdlc_remove, }; module_platform_driver(qmc_hdlc_driver); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 605e70f7baac..f999798a5612 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1290,7 +1290,7 @@ MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); static struct platform_driver ucc_hdlc_driver = { .probe = ucc_hdlc_probe, - .remove_new = ucc_hdlc_remove, + .remove = ucc_hdlc_remove, .driver = { .name = DRV_NAME, .pm = HDLC_PM_OPS, diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 931c5ca79ea5..720c5dc889ea 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -1534,7 +1534,7 @@ static void ixp4xx_hss_remove(struct platform_device *pdev) static struct platform_driver ixp4xx_hss_driver = { .driver.name = DRV_NAME, .probe = ixp4xx_hss_probe, - .remove_new = ixp4xx_hss_remove, + .remove = ixp4xx_hss_remove, }; module_platform_driver(ixp4xx_hss_driver); diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c index 5dcb9a84a12e..64dab8b57611 100644 --- a/drivers/net/wwan/qcom_bam_dmux.c +++ b/drivers/net/wwan/qcom_bam_dmux.c @@ -896,7 +896,7 @@ MODULE_DEVICE_TABLE(of, bam_dmux_of_match); static struct platform_driver bam_dmux_driver = { .probe = bam_dmux_probe, - .remove_new = bam_dmux_remove, + .remove = bam_dmux_remove, .driver = { .name = "bam-dmux", .pm = &bam_dmux_pm_ops, diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 604541dcb320..e98c9767e0ef 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -131,6 +131,19 @@ config PTP_1588_CLOCK_KVM To compile this driver as a module, choose M here: the module will be called ptp_kvm. +config PTP_1588_CLOCK_VMCLOCK + tristate "Virtual machine PTP clock" + depends on X86_TSC || ARM_ARCH_TIMER + depends on PTP_1588_CLOCK && ACPI && ARCH_SUPPORTS_INT128 + default y + help + This driver adds support for using a virtual precision clock + advertised by the hypervisor. This clock is only useful in virtual + machines where such a device is present. + + To compile this driver as a module, choose M here: the module + will be called ptp_vmclock. + config PTP_1588_CLOCK_IDT82P33 tristate "IDT 82P33xxx PTP clock" depends on PTP_1588_CLOCK && I2C diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index 68bf02078053..01b5cd91eb61 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o +obj-$(CONFIG_PTP_1588_CLOCK_VMCLOCK) += ptp_vmclock.o obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o ptp-qoriq-y += ptp_qoriq.o ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o diff --git a/drivers/ptp/ptp_vmclock.c b/drivers/ptp/ptp_vmclock.c new file mode 100644 index 000000000000..cdca8a3ad1aa --- /dev/null +++ b/drivers/ptp/ptp_vmclock.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtual PTP 1588 clock for use with LM-safe VMclock device. + * + * Copyright © 2024 Amazon.com, Inc. or its affiliates. + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <uapi/linux/vmclock-abi.h> + +#include <linux/ptp_clock_kernel.h> + +#ifdef CONFIG_X86 +#include <asm/pvclock.h> +#include <asm/kvmclock.h> +#endif + +#ifdef CONFIG_KVM_GUEST +#define SUPPORT_KVMCLOCK +#endif + +static DEFINE_IDA(vmclock_ida); + +ACPI_MODULE_NAME("vmclock"); + +struct vmclock_state { + struct resource res; + struct vmclock_abi *clk; + struct miscdevice miscdev; + struct ptp_clock_info ptp_clock_info; + struct ptp_clock *ptp_clock; + enum clocksource_ids cs_id, sys_cs_id; + int index; + char *name; +}; + +#define VMCLOCK_MAX_WAIT ms_to_ktime(100) + +/* Require at least the flags field to be present. All else can be optional. */ +#define VMCLOCK_MIN_SIZE offsetof(struct vmclock_abi, pad) + +#define VMCLOCK_FIELD_PRESENT(_c, _f) \ + (le32_to_cpu((_c)->size) >= (offsetof(struct vmclock_abi, _f) + \ + sizeof((_c)->_f))) + +/* + * Multiply a 64-bit count by a 64-bit tick 'period' in units of seconds >> 64 + * and add the fractional second part of the reference time. + * + * The result is a 128-bit value, the top 64 bits of which are seconds, and + * the low 64 bits are (seconds >> 64). + */ +static uint64_t mul_u64_u64_shr_add_u64(uint64_t *res_hi, uint64_t delta, + uint64_t period, uint8_t shift, + uint64_t frac_sec) +{ + unsigned __int128 res = (unsigned __int128)delta * period; + + res >>= shift; + res += frac_sec; + *res_hi = res >> 64; + return (uint64_t)res; +} + +static bool tai_adjust(struct vmclock_abi *clk, uint64_t *sec) +{ + if (likely(clk->time_type == VMCLOCK_TIME_UTC)) + return true; + + if (clk->time_type == VMCLOCK_TIME_TAI && + (le64_to_cpu(clk->flags) & VMCLOCK_FLAG_TAI_OFFSET_VALID)) { + if (sec) + *sec += (int16_t)le16_to_cpu(clk->tai_offset_sec); + return true; + } + return false; +} + +static int vmclock_get_crosststamp(struct vmclock_state *st, + struct ptp_system_timestamp *sts, + struct system_counterval_t *system_counter, + struct timespec64 *tspec) +{ + ktime_t deadline = ktime_add(ktime_get(), VMCLOCK_MAX_WAIT); + struct system_time_snapshot systime_snapshot; + uint64_t cycle, delta, seq, frac_sec; + +#ifdef CONFIG_X86 + /* + * We'd expect the hypervisor to know this and to report the clock + * status as VMCLOCK_STATUS_UNRELIABLE. But be paranoid. + */ + if (check_tsc_unstable()) + return -EINVAL; +#endif + + while (1) { + seq = le32_to_cpu(st->clk->seq_count) & ~1ULL; + + /* + * This pairs with a write barrier in the hypervisor + * which populates this structure. + */ + virt_rmb(); + + if (st->clk->clock_status == VMCLOCK_STATUS_UNRELIABLE) + return -EINVAL; + + /* + * When invoked for gettimex64(), fill in the pre/post system + * times. The simple case is when system time is based on the + * same counter as st->cs_id, in which case all three times + * will be derived from the *same* counter value. + * + * If the system isn't using the same counter, then the value + * from ktime_get_snapshot() will still be used as pre_ts, and + * ptp_read_system_postts() is called to populate postts after + * calling get_cycles(). + * + * The conversion to timespec64 happens further down, outside + * the seq_count loop. + */ + if (sts) { + ktime_get_snapshot(&systime_snapshot); + if (systime_snapshot.cs_id == st->cs_id) { + cycle = systime_snapshot.cycles; + } else { + cycle = get_cycles(); + ptp_read_system_postts(sts); + } + } else { + cycle = get_cycles(); + } + + delta = cycle - le64_to_cpu(st->clk->counter_value); + + frac_sec = mul_u64_u64_shr_add_u64(&tspec->tv_sec, delta, + le64_to_cpu(st->clk->counter_period_frac_sec), + st->clk->counter_period_shift, + le64_to_cpu(st->clk->time_frac_sec)); + tspec->tv_nsec = mul_u64_u64_shr(frac_sec, NSEC_PER_SEC, 64); + tspec->tv_sec += le64_to_cpu(st->clk->time_sec); + + if (!tai_adjust(st->clk, &tspec->tv_sec)) + return -EINVAL; + + /* + * This pairs with a write barrier in the hypervisor + * which populates this structure. + */ + virt_rmb(); + if (seq == le32_to_cpu(st->clk->seq_count)) + break; + + if (ktime_after(ktime_get(), deadline)) + return -ETIMEDOUT; + } + + if (system_counter) { + system_counter->cycles = cycle; + system_counter->cs_id = st->cs_id; + } + + if (sts) { + sts->pre_ts = ktime_to_timespec64(systime_snapshot.real); + if (systime_snapshot.cs_id == st->cs_id) + sts->post_ts = sts->pre_ts; + } + + return 0; +} + +#ifdef SUPPORT_KVMCLOCK +/* + * In the case where the system is using the KVM clock for timekeeping, convert + * the TSC value into a KVM clock time in order to return a paired reading that + * get_device_system_crosststamp() can cope with. + */ +static int vmclock_get_crosststamp_kvmclock(struct vmclock_state *st, + struct ptp_system_timestamp *sts, + struct system_counterval_t *system_counter, + struct timespec64 *tspec) +{ + struct pvclock_vcpu_time_info *pvti = this_cpu_pvti(); + unsigned int pvti_ver; + int ret; + + preempt_disable_notrace(); + + do { + pvti_ver = pvclock_read_begin(pvti); + + ret = vmclock_get_crosststamp(st, sts, system_counter, tspec); + if (ret) + break; + + system_counter->cycles = __pvclock_read_cycles(pvti, + system_counter->cycles); + system_counter->cs_id = CSID_X86_KVM_CLK; + + /* + * This retry should never really happen; if the TSC is + * stable and reliable enough across vCPUS that it is sane + * for the hypervisor to expose a VMCLOCK device which uses + * it as the reference counter, then the KVM clock sohuld be + * in 'master clock mode' and basically never changed. But + * the KVM clock is a fickle and often broken thing, so do + * it "properly" just in case. + */ + } while (pvclock_read_retry(pvti, pvti_ver)); + + preempt_enable_notrace(); + + return ret; +} +#endif + +static int ptp_vmclock_get_time_fn(ktime_t *device_time, + struct system_counterval_t *system_counter, + void *ctx) +{ + struct vmclock_state *st = ctx; + struct timespec64 tspec; + int ret; + +#ifdef SUPPORT_KVMCLOCK + if (READ_ONCE(st->sys_cs_id) == CSID_X86_KVM_CLK) + ret = vmclock_get_crosststamp_kvmclock(st, NULL, system_counter, + &tspec); + else +#endif + ret = vmclock_get_crosststamp(st, NULL, system_counter, &tspec); + + if (!ret) + *device_time = timespec64_to_ktime(tspec); + + return ret; +} + +static int ptp_vmclock_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct vmclock_state *st = container_of(ptp, struct vmclock_state, + ptp_clock_info); + int ret = get_device_system_crosststamp(ptp_vmclock_get_time_fn, st, + NULL, xtstamp); +#ifdef SUPPORT_KVMCLOCK + /* + * On x86, the KVM clock may be used for the system time. We can + * actually convert a TSC reading to that, and return a paired + * timestamp that get_device_system_crosststamp() *can* handle. + */ + if (ret == -ENODEV) { + struct system_time_snapshot systime_snapshot; + + ktime_get_snapshot(&systime_snapshot); + + if (systime_snapshot.cs_id == CSID_X86_TSC || + systime_snapshot.cs_id == CSID_X86_KVM_CLK) { + WRITE_ONCE(st->sys_cs_id, systime_snapshot.cs_id); + ret = get_device_system_crosststamp(ptp_vmclock_get_time_fn, + st, NULL, xtstamp); + } + } +#endif + return ret; +} + +/* + * PTP clock operations + */ + +static int ptp_vmclock_adjfine(struct ptp_clock_info *ptp, long delta) +{ + return -EOPNOTSUPP; +} + +static int ptp_vmclock_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + return -EOPNOTSUPP; +} + +static int ptp_vmclock_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + return -EOPNOTSUPP; +} + +static int ptp_vmclock_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct vmclock_state *st = container_of(ptp, struct vmclock_state, + ptp_clock_info); + + return vmclock_get_crosststamp(st, sts, NULL, ts); +} + +static int ptp_vmclock_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info ptp_vmclock_info = { + .owner = THIS_MODULE, + .max_adj = 0, + .n_ext_ts = 0, + .n_pins = 0, + .pps = 0, + .adjfine = ptp_vmclock_adjfine, + .adjtime = ptp_vmclock_adjtime, + .gettimex64 = ptp_vmclock_gettimex, + .settime64 = ptp_vmclock_settime, + .enable = ptp_vmclock_enable, + .getcrosststamp = ptp_vmclock_getcrosststamp, +}; + +static struct ptp_clock *vmclock_ptp_register(struct device *dev, + struct vmclock_state *st) +{ + enum clocksource_ids cs_id; + + if (IS_ENABLED(CONFIG_ARM64) && + st->clk->counter_id == VMCLOCK_COUNTER_ARM_VCNT) { + /* Can we check it's the virtual counter? */ + cs_id = CSID_ARM_ARCH_COUNTER; + } else if (IS_ENABLED(CONFIG_X86) && + st->clk->counter_id == VMCLOCK_COUNTER_X86_TSC) { + cs_id = CSID_X86_TSC; + } else { + return NULL; + } + + /* Only UTC, or TAI with offset */ + if (!tai_adjust(st->clk, NULL)) { + dev_info(dev, "vmclock does not provide unambiguous UTC\n"); + return NULL; + } + + st->sys_cs_id = cs_id; + st->cs_id = cs_id; + st->ptp_clock_info = ptp_vmclock_info; + strscpy(st->ptp_clock_info.name, st->name); + + return ptp_clock_register(&st->ptp_clock_info, dev); +} + +static int vmclock_miscdev_mmap(struct file *fp, struct vm_area_struct *vma) +{ + struct vmclock_state *st = container_of(fp->private_data, + struct vmclock_state, miscdev); + + if ((vma->vm_flags & (VM_READ|VM_WRITE)) != VM_READ) + return -EROFS; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE || vma->vm_pgoff) + return -EINVAL; + + if (io_remap_pfn_range(vma, vma->vm_start, + st->res.start >> PAGE_SHIFT, PAGE_SIZE, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct vmclock_state *st = container_of(fp->private_data, + struct vmclock_state, miscdev); + ktime_t deadline = ktime_add(ktime_get(), VMCLOCK_MAX_WAIT); + size_t max_count; + uint32_t seq; + + if (*ppos >= PAGE_SIZE) + return 0; + + max_count = PAGE_SIZE - *ppos; + if (count > max_count) + count = max_count; + + while (1) { + seq = le32_to_cpu(st->clk->seq_count) & ~1U; + /* Pairs with hypervisor wmb */ + virt_rmb(); + + if (copy_to_user(buf, ((char *)st->clk) + *ppos, count)) + return -EFAULT; + + /* Pairs with hypervisor wmb */ + virt_rmb(); + if (seq == le32_to_cpu(st->clk->seq_count)) + break; + + if (ktime_after(ktime_get(), deadline)) + return -ETIMEDOUT; + } + + *ppos += count; + return count; +} + +static const struct file_operations vmclock_miscdev_fops = { + .mmap = vmclock_miscdev_mmap, + .read = vmclock_miscdev_read, +}; + +/* module operations */ + +static void vmclock_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct vmclock_state *st = dev_get_drvdata(dev); + + if (st->ptp_clock) + ptp_clock_unregister(st->ptp_clock); + + if (st->miscdev.minor != MISC_DYNAMIC_MINOR) + misc_deregister(&st->miscdev); +} + +static acpi_status vmclock_acpi_resources(struct acpi_resource *ares, void *data) +{ + struct vmclock_state *st = data; + struct resource_win win; + struct resource *res = &win.res; + + if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) + return AE_OK; + + /* There can be only one */ + if (resource_type(&st->res) == IORESOURCE_MEM) + return AE_ERROR; + + if (acpi_dev_resource_memory(ares, res) || + acpi_dev_resource_address_space(ares, &win)) { + + if (resource_type(res) != IORESOURCE_MEM || + resource_size(res) < sizeof(st->clk)) + return AE_ERROR; + + st->res = *res; + return AE_OK; + } + + return AE_ERROR; +} + +static int vmclock_probe_acpi(struct device *dev, struct vmclock_state *st) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + acpi_status status; + + /* + * This should never happen as this function is only called when + * has_acpi_companion(dev) is true, but the logic is sufficiently + * complex that Coverity can't see the tautology. + */ + if (!adev) + return -ENODEV; + + status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS, + vmclock_acpi_resources, st); + if (ACPI_FAILURE(status) || resource_type(&st->res) != IORESOURCE_MEM) { + dev_err(dev, "failed to get resources\n"); + return -ENODEV; + } + + return 0; +} + +static void vmclock_put_idx(void *data) +{ + struct vmclock_state *st = data; + + ida_free(&vmclock_ida, st->index); +} + +static int vmclock_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct vmclock_state *st; + int ret; + + st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + if (has_acpi_companion(dev)) + ret = vmclock_probe_acpi(dev, st); + else + ret = -EINVAL; /* Only ACPI for now */ + + if (ret) { + dev_info(dev, "Failed to obtain physical address: %d\n", ret); + goto out; + } + + if (resource_size(&st->res) < VMCLOCK_MIN_SIZE) { + dev_info(dev, "Region too small (0x%llx)\n", + resource_size(&st->res)); + ret = -EINVAL; + goto out; + } + st->clk = devm_memremap(dev, st->res.start, resource_size(&st->res), + MEMREMAP_WB | MEMREMAP_DEC); + if (IS_ERR(st->clk)) { + ret = PTR_ERR(st->clk); + dev_info(dev, "failed to map shared memory\n"); + st->clk = NULL; + goto out; + } + + if (le32_to_cpu(st->clk->magic) != VMCLOCK_MAGIC || + le32_to_cpu(st->clk->size) > resource_size(&st->res) || + le16_to_cpu(st->clk->version) != 1) { + dev_info(dev, "vmclock magic fields invalid\n"); + ret = -EINVAL; + goto out; + } + + ret = ida_alloc(&vmclock_ida, GFP_KERNEL); + if (ret < 0) + goto out; + + st->index = ret; + ret = devm_add_action_or_reset(&pdev->dev, vmclock_put_idx, st); + if (ret) + goto out; + + st->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "vmclock%d", st->index); + if (!st->name) { + ret = -ENOMEM; + goto out; + } + + /* + * If the structure is big enough, it can be mapped to userspace. + * Theoretically a guest OS even using larger pages could still + * use 4KiB PTEs to map smaller MMIO regions like this, but let's + * cross that bridge if/when we come to it. + */ + if (le32_to_cpu(st->clk->size) >= PAGE_SIZE) { + st->miscdev.minor = MISC_DYNAMIC_MINOR; + st->miscdev.fops = &vmclock_miscdev_fops; + st->miscdev.name = st->name; + + ret = misc_register(&st->miscdev); + if (ret) + goto out; + } + + /* If there is valid clock information, register a PTP clock */ + if (VMCLOCK_FIELD_PRESENT(st->clk, time_frac_sec)) { + /* Can return a silent NULL, or an error. */ + st->ptp_clock = vmclock_ptp_register(dev, st); + if (IS_ERR(st->ptp_clock)) { + ret = PTR_ERR(st->ptp_clock); + st->ptp_clock = NULL; + vmclock_remove(pdev); + goto out; + } + } + + if (!st->miscdev.minor && !st->ptp_clock) { + /* Neither miscdev nor PTP registered */ + dev_info(dev, "vmclock: Neither miscdev nor PTP available; not registering\n"); + ret = -ENODEV; + goto out; + } + + dev_info(dev, "%s: registered %s%s%s\n", st->name, + st->miscdev.minor ? "miscdev" : "", + (st->miscdev.minor && st->ptp_clock) ? ", " : "", + st->ptp_clock ? "PTP" : ""); + + dev_set_drvdata(dev, st); + + out: + return ret; +} + +static const struct acpi_device_id vmclock_acpi_ids[] = { + { "AMZNC10C", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, vmclock_acpi_ids); + +static struct platform_driver vmclock_platform_driver = { + .probe = vmclock_probe, + .remove_new = vmclock_remove, + .driver = { + .name = "vmclock", + .acpi_match_table = vmclock_acpi_ids, + }, +}; + +module_platform_driver(vmclock_platform_driver) + +MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); +MODULE_DESCRIPTION("PTP clock using VMCLOCK"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index f41395264dca..13a11f3c09b8 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -89,6 +89,9 @@ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; +enum virtchnl_bw_limit_type { + VIRTCHNL_BW_SHAPER = 0, +}; /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field @@ -151,6 +154,11 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, + /* opcode 57 - 65 are reserved */ + VIRTCHNL_OP_GET_QOS_CAPS = 66, + /* opcode 68 through 111 are reserved */ + VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, + VIRTCHNL_OP_CONFIG_QUANTA = 113, VIRTCHNL_OP_MAX, }; @@ -261,6 +269,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) +#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ @@ -1416,6 +1425,86 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); +struct virtchnl_shaper_bw { + /* Unit is Kbps */ + u32 committed; + u32 peak; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); + +/* VIRTCHNL_OP_GET_QOS_CAPS + * VF sends this message to get its QoS Caps, such as + * TC number, Arbiter and Bandwidth. + */ +struct virtchnl_qos_cap_elem { + u8 tc_num; + u8 tc_prio; +#define VIRTCHNL_ABITER_STRICT 0 +#define VIRTCHNL_ABITER_ETS 2 + u8 arbiter; +#define VIRTCHNL_STRICT_WEIGHT 1 + u8 weight; + enum virtchnl_bw_limit_type type; + union { + struct virtchnl_shaper_bw shaper; + u8 pad2[32]; + }; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); + +struct virtchnl_qos_cap_list { + u16 vsi_id; + u16 num_elem; + struct virtchnl_qos_cap_elem cap[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list); +#define virtchnl_qos_cap_list_LEGACY_SIZEOF 44 + +/* VIRTCHNL_OP_CONFIG_QUEUE_BW */ +struct virtchnl_queue_bw { + u16 queue_id; + u8 tc; + u8 pad; + struct virtchnl_shaper_bw shaper; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw); + +struct virtchnl_queues_bw_cfg { + u16 vsi_id; + u16 num_queues; + struct virtchnl_queue_bw cfg[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg); +#define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16 + +enum virtchnl_queue_type { + VIRTCHNL_QUEUE_TYPE_TX = 0, + VIRTCHNL_QUEUE_TYPE_RX = 1, +}; + +/* structure to specify a chunk of contiguous queues */ +struct virtchnl_queue_chunk { + /* see enum virtchnl_queue_type */ + s32 type; + u16 start_queue_id; + u16 num_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); + +struct virtchnl_quanta_cfg { + u16 quanta_size; + u16 pad; + struct virtchnl_queue_chunk queue_select; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg); + #define __vss_byone(p, member, count, old) \ (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0))) @@ -1438,6 +1527,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \ __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \ __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \ + __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \ + __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \ __vss(virtchnl_rss_key, __vss_byone, p, m, c), \ __vss(virtchnl_rss_lut, __vss_byone, p, m, c)) @@ -1637,6 +1728,35 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: valid_len = sizeof(struct virtchnl_vlan_setting); break; + case VIRTCHNL_OP_GET_QOS_CAPS: + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF; + if (msglen >= valid_len) { + struct virtchnl_queues_bw_cfg *q_bw = + (struct virtchnl_queues_bw_cfg *)msg; + + valid_len = virtchnl_struct_size(q_bw, cfg, + q_bw->num_queues); + if (q_bw->num_queues == 0) { + err_msg_format = true; + break; + } + } + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + valid_len = sizeof(struct virtchnl_quanta_cfg); + if (msglen >= valid_len) { + struct virtchnl_quanta_cfg *q_quanta = + (struct virtchnl_quanta_cfg *)msg; + + if (q_quanta->quanta_size == 0 || + q_quanta->queue_select.num_queues == 0) { + err_msg_format = true; + break; + } + } + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index ce91d9b2acb9..f0f219271daf 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -209,7 +209,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, int __ret = 0; \ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ - if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \ + if (__sk && __sk == skb_to_full_sk(skb) && \ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ CGROUP_INET_EGRESS); \ diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h deleted file mode 100644 index 4cc1c0b77870..000000000000 --- a/include/linux/if_ltalk.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_LTALK_H -#define __LINUX_LTALK_H - -#include <uapi/linux/if_ltalk.h> - -extern struct net_device *alloc_ltalkdev(int sizeof_priv); -#endif diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index cb5280e6cc21..d9c690c8c80b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -141,7 +141,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) ARP_EVICT_NOCARRIER) struct in_ifaddr { - struct hlist_node hash; + struct hlist_node addr_lst; struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e23c692a34c7..fc7e6153b73d 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -45,7 +45,6 @@ #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> -#include <linux/idr.h> #include <linux/notifier.h> #include <linux/refcount.h> #include <linux/auxiliary_bus.h> @@ -474,36 +473,6 @@ struct mlx5_core_sriov { u16 max_ec_vfs; }; -struct mlx5_fc_pool { - struct mlx5_core_dev *dev; - struct mutex pool_lock; /* protects pool lists */ - struct list_head fully_used; - struct list_head partially_used; - struct list_head unused; - int available_fcs; - int used_fcs; - int threshold; -}; - -struct mlx5_fc_stats { - spinlock_t counters_idr_lock; /* protects counters_idr */ - struct idr counters_idr; - struct list_head counters; - struct llist_head addlist; - struct llist_head dellist; - - struct workqueue_struct *wq; - struct delayed_work work; - unsigned long next_query; - unsigned long sampling_interval; /* jiffies */ - u32 *bulk_query_out; - int bulk_query_len; - size_t num_counters; - bool bulk_query_alloc_failed; - unsigned long next_bulk_query_alloc; - struct mlx5_fc_pool fc_pool; -}; - struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; @@ -630,7 +599,7 @@ struct mlx5_priv { struct mlx5_devcom_comp_dev *hca_devcom_comp; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; - struct mlx5_fc_stats fc_stats; + struct mlx5_fc_stats *fc_stats; struct mlx5_rl_table rl_table; struct mlx5_ft_pool *ft_pool; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b744e554f014..438db888bde0 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -298,9 +298,6 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); -/* As mlx5_fc_create() but doesn't queue stats refresh thread. */ -struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging); - void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 96d369112bfa..c79ba6197673 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4105,11 +4105,47 @@ enum { ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4, }; +enum { + TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, + TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, + TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, +}; + +enum { + TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, + TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, + TSAR_TYPE_CAP_MASK_ETS = 1 << 2, +}; + +struct mlx5_ifc_tsar_element_bits { + u8 reserved_at_0[0x8]; + u8 tsar_type[0x8]; + u8 reserved_at_10[0x10]; +}; + +struct mlx5_ifc_vport_element_bits { + u8 reserved_at_0[0x10]; + u8 vport_number[0x10]; +}; + +struct mlx5_ifc_vport_tc_element_bits { + u8 traffic_class[0x4]; + u8 reserved_at_4[0xc]; + u8 vport_number[0x10]; +}; + +union mlx5_ifc_element_attributes_bits { + struct mlx5_ifc_tsar_element_bits tsar; + struct mlx5_ifc_vport_element_bits vport; + struct mlx5_ifc_vport_tc_element_bits vport_tc; + u8 reserved_at_0[0x20]; +}; + struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; - u8 element_attributes[0x20]; + union mlx5_ifc_element_attributes_bits element_attributes; u8 parent_element_id[0x20]; @@ -4798,35 +4834,6 @@ struct mlx5_ifc_register_loopback_control_bits { u8 reserved_at_20[0x60]; }; -struct mlx5_ifc_vport_tc_element_bits { - u8 traffic_class[0x4]; - u8 reserved_at_4[0xc]; - u8 vport_number[0x10]; -}; - -struct mlx5_ifc_vport_element_bits { - u8 reserved_at_0[0x10]; - u8 vport_number[0x10]; -}; - -enum { - TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, - TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, - TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, -}; - -enum { - TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, - TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, - TSAR_TYPE_CAP_MASK_ETS = 1 << 2, -}; - -struct mlx5_ifc_tsar_element_bits { - u8 reserved_at_0[0x8]; - u8 tsar_type[0x8]; - u8 reserved_at_10[0x10]; -}; - enum { MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8896705ccd63..389f92e4d980 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -343,6 +343,15 @@ struct gro_list { #define GRO_HASH_BUCKETS 8 /* + * Structure for per-NAPI config + */ +struct napi_config { + u64 gro_flush_timeout; + u32 defer_hard_irqs; + unsigned int napi_id; +}; + +/* * Structure for NAPI scheduling similar to tasklet but with weighting */ struct napi_struct { @@ -373,10 +382,14 @@ struct napi_struct { unsigned int napi_id; struct hrtimer timer; struct task_struct *thread; + unsigned long gro_flush_timeout; + u32 defer_hard_irqs; /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; int irq; + int index; + struct napi_config *config; }; enum { @@ -1412,8 +1425,7 @@ struct net_device_ops { __be16 proto, u16 vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); - int (*ndo_netpoll_setup)(struct net_device *dev, - struct netpoll_info *info); + int (*ndo_netpoll_setup)(struct net_device *dev); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, @@ -1603,6 +1615,14 @@ struct net_device_ops { int (*ndo_hwtstamp_set)(struct net_device *dev, struct kernel_hwtstamp_config *kernel_config, struct netlink_ext_ack *extack); + +#if IS_ENABLED(CONFIG_NET_SHAPER) + /** + * @net_shaper_ops: Device shaping offload operations + * see include/net/net_shapers.h + */ + const struct net_shaper_ops *net_shaper_ops; +#endif }; /** @@ -1858,9 +1878,6 @@ enum netdev_reg_state { * allocated at register_netdev() time * @real_num_rx_queues: Number of RX queues currently active in device * @xdp_prog: XDP sockets filter program pointer - * @gro_flush_timeout: timeout for GRO layer in NAPI - * @napi_defer_hard_irqs: If not zero, provides a counter that would - * allow to avoid NIC hard IRQ, on busy queues. * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one @@ -2009,6 +2026,13 @@ enum netdev_reg_state { * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, * where the clock is recovered. * + * @max_pacing_offload_horizon: max EDT offload horizon in nsec. + * @napi_config: An array of napi_config structures containing per-NAPI + * settings. + * @gro_flush_timeout: timeout for GRO layer in NAPI + * @napi_defer_hard_irqs: If not zero, provides a counter that would + * allow to avoid NIC hard IRQ, on busy queues. + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -2074,8 +2098,6 @@ struct net_device { int ifindex; unsigned int real_num_rx_queues; struct netdev_rx_queue *_rx; - unsigned long gro_flush_timeout; - u32 napi_defer_hard_irqs; unsigned int gro_max_size; unsigned int gro_ipv4_max_size; rx_handler_func_t __rcu *rx_handler; @@ -2209,6 +2231,9 @@ struct net_device { /* Protocol-specific pointers */ struct in_device __rcu *ip_ptr; + /** @fib_nh_head: nexthops associated with this netdev */ + struct hlist_head fib_nh_head; + #if IS_ENABLED(CONFIG_VLAN_8021Q) struct vlan_info __rcu *vlan_info; #endif @@ -2399,6 +2424,24 @@ struct net_device { /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ struct dim_irq_moder *irq_moder; + u64 max_pacing_offload_horizon; + struct napi_config *napi_config; + unsigned long gro_flush_timeout; + u32 napi_defer_hard_irqs; + + /** + * @lock: protects @net_shaper_hierarchy, feel free to use for other + * netdev-scope protection. Ordering: take after rtnl_lock. + */ + struct mutex lock; + +#if IS_ENABLED(CONFIG_NET_SHAPER) + /** + * @net_shaper_hierarchy: data tracking the current shaper status + * see include/net/net_shapers.h + */ + struct net_shaper_hierarchy *net_shaper_hierarchy; +#endif u8 priv[] ____cacheline_aligned __counted_by(priv_len); } ____cacheline_aligned; @@ -2649,6 +2692,22 @@ netif_napi_add_tx_weight(struct net_device *dev, } /** + * netif_napi_add_config - initialize a NAPI context with persistent config + * @dev: network device + * @napi: NAPI context + * @poll: polling function + * @index: the NAPI index + */ +static inline void +netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int index) +{ + napi->index = index; + napi->config = &dev->napi_config[index]; + netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); +} + +/** * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only * @dev: network device * @napi: NAPI context @@ -3470,7 +3529,7 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, * because in netdev_tx_completed_queue we update the dql_completed * before checking the XOFF flag. */ - smp_mb(); + smp_mb__after_atomic(); /* check again in case another CPU has just made room avail */ if (unlikely(dql_avail(&dev_queue->dql) >= 0)) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index b332c2048c75..a3ca198a3a9e 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -34,6 +34,7 @@ struct netlink_skb_parms { #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) +#define NETLINK_CTX_SIZE 48 void netlink_table_grab(void); @@ -293,7 +294,7 @@ struct netlink_callback { int flags; bool strict_check; union { - u8 ctx[48]; + u8 ctx[NETLINK_CTX_SIZE]; /* args is deprecated. Cast a struct over ctx instead * for proper type safety. @@ -302,7 +303,7 @@ struct netlink_callback { }; }; -#define NL_ASSERT_DUMP_CTX_FITS(type_name) \ +#define NL_ASSERT_CTX_FITS(type_name) \ BUILD_BUG_ON(sizeof(type_name) > \ sizeof_field(struct netlink_callback, ctx)) diff --git a/include/linux/packing.h b/include/linux/packing.h index 8d6571feb95d..5d36dcd06f60 100644 --- a/include/linux/packing.h +++ b/include/linux/packing.h @@ -17,33 +17,13 @@ enum packing_op { UNPACK, }; -/** - * packing - Convert numbers (currently u64) between a packed and an unpacked - * format. Unpacked means laid out in memory in the CPU's native - * understanding of integers, while packed means anything else that - * requires translation. - * - * @pbuf: Pointer to a buffer holding the packed value. - * @uval: Pointer to an u64 holding the unpacked value. - * @startbit: The index (in logical notation, compensated for quirks) where - * the packed value starts within pbuf. Must be larger than, or - * equal to, endbit. - * @endbit: The index (in logical notation, compensated for quirks) where - * the packed value ends within pbuf. Must be smaller than, or equal - * to, startbit. - * @op: If PACK, then uval will be treated as const pointer and copied (packed) - * into pbuf, between startbit and endbit. - * If UNPACK, then pbuf will be treated as const pointer and the logical - * value between startbit and endbit will be copied (unpacked) to uval. - * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and - * QUIRK_MSB_ON_THE_RIGHT. - * - * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming - * correct usage, return code may be discarded. - * If op is PACK, pbuf is modified. - * If op is UNPACK, uval is modified. - */ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, enum packing_op op, u8 quirks); +int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, + u8 quirks); + +int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, + size_t pbuflen, u8 quirks); + #endif diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index b4a4eb6c8866..b5b5d17998b8 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -21,8 +21,6 @@ #define DW_AN_C37_1000BASEX 4 #define DW_10GBASER 5 -struct dw_xpcs_desc; - enum dw_xpcs_pcs_id { DW_XPCS_ID_NATIVE = 0, NXP_SJA1105_XPCS_ID = 0x00000010, @@ -48,33 +46,18 @@ struct dw_xpcs_info { u32 pma; }; -enum dw_xpcs_clock { - DW_XPCS_CORE_CLK, - DW_XPCS_PAD_CLK, - DW_XPCS_NUM_CLKS, -}; - -struct dw_xpcs { - struct dw_xpcs_info info; - const struct dw_xpcs_desc *desc; - struct mdio_device *mdiodev; - struct clk_bulk_data clks[DW_XPCS_NUM_CLKS]; - struct phylink_pcs pcs; - phy_interface_t interface; -}; +struct dw_xpcs; +struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs); int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); -void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, - phy_interface_t interface, int speed, int duplex); -int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, - const unsigned long *advertising, unsigned int neg_mode); void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); -struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, - phy_interface_t interface); -struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, - phy_interface_t interface); +struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr); +struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode); void xpcs_destroy(struct dw_xpcs *xpcs); +struct phylink_pcs *xpcs_create_pcs_mdiodev(struct mii_bus *bus, int addr); +void xpcs_destroy_pcs(struct phylink_pcs *pcs); + #endif /* __LINUX_PCS_XPCS_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index a98bc91a0cde..bf0eb4e5d35c 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -877,8 +877,9 @@ struct phy_plca_status { /* Modes for PHY LED configuration */ enum phy_led_modes { - PHY_LED_ACTIVE_LOW = 0, - PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1, + PHY_LED_ACTIVE_HIGH = 0, + PHY_LED_ACTIVE_LOW = 1, + PHY_LED_INACTIVE_HIGH_IMPEDANCE = 2, /* keep it last */ __PHY_LED_MODES_NUM, @@ -1260,6 +1261,7 @@ size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask); void of_set_phy_supported(struct phy_device *phydev); void of_set_phy_eee_broken(struct phy_device *phydev); +void of_set_phy_timing_role(struct phy_device *phydev); int phy_speed_down_core(struct phy_device *phydev); /** diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index a7da7dfc06a2..8468a4ce8510 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -7,7 +7,6 @@ #include <linux/netdevice.h> #include <linux/wait.h> #include <linux/refcount.h> -#include <linux/cleanup.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -47,13 +46,15 @@ extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); extern bool refcount_dec_and_rtnl_lock(refcount_t *r); -DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock()) - extern wait_queue_head_t netdev_unregistering_wq; extern atomic_t dev_unreg_count; extern struct rw_semaphore pernet_ops_rwsem; extern struct rw_semaphore net_rwsem; +#define ASSERT_RTNL() \ + WARN_ONCE(!rtnl_is_locked(), \ + "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) + #ifdef CONFIG_PROVE_LOCKING extern bool lockdep_rtnl_is_held(void); #else @@ -95,6 +96,64 @@ static inline bool lockdep_rtnl_is_held(void) #define rcu_replace_pointer_rtnl(rp, p) \ rcu_replace_pointer(rp, p, lockdep_rtnl_is_held()) +#ifdef CONFIG_DEBUG_NET_SMALL_RTNL +void __rtnl_net_lock(struct net *net); +void __rtnl_net_unlock(struct net *net); +void rtnl_net_lock(struct net *net); +void rtnl_net_unlock(struct net *net); +int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b); + +bool rtnl_net_is_locked(struct net *net); + +#define ASSERT_RTNL_NET(net) \ + WARN_ONCE(!rtnl_net_is_locked(net), \ + "RTNL_NET: assertion failed at %s (%d)\n", \ + __FILE__, __LINE__) + +bool lockdep_rtnl_net_is_held(struct net *net); + +#define rcu_dereference_rtnl_net(net, p) \ + rcu_dereference_check(p, lockdep_rtnl_net_is_held(net)) +#define rtnl_net_dereference(net, p) \ + rcu_dereference_protected(p, lockdep_rtnl_net_is_held(net)) +#define rcu_replace_pointer_rtnl_net(net, rp, p) \ + rcu_replace_pointer(rp, p, lockdep_rtnl_net_is_held(net)) +#else +static inline void __rtnl_net_lock(struct net *net) {} +static inline void __rtnl_net_unlock(struct net *net) {} + +static inline void rtnl_net_lock(struct net *net) +{ + rtnl_lock(); +} + +static inline void rtnl_net_unlock(struct net *net) +{ + rtnl_unlock(); +} + +static inline void ASSERT_RTNL_NET(struct net *net) +{ + ASSERT_RTNL(); +} + +static inline void *rcu_dereference_rtnl_net(struct net *net, void *p) +{ + return rcu_dereference_rtnl(p); +} + +static inline void *rtnl_net_dereference(struct net *net, void *p) +{ + return rtnl_dereference(p); +} + +static inline void *rcu_replace_pointer_rtnl_net(struct net *net, + void *rp, void *p) +{ + return rcu_replace_pointer_rtnl(rp, p); +} +#endif + static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) { return rtnl_dereference(dev->ingress_queue); @@ -122,10 +181,6 @@ void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); -#define ASSERT_RTNL() \ - WARN_ONCE(!rtnl_is_locked(), \ - "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) - extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 39f1d16f3628..48f1e0fa2a13 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3130,9 +3130,15 @@ static inline int skb_inner_network_offset(const struct sk_buff *skb) return skb_inner_network_header(skb) - skb->data; } +static inline enum skb_drop_reason +pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len) +{ + return pskb_may_pull_reason(skb, skb_network_offset(skb) + len); +} + static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) { - return pskb_may_pull(skb, skb_network_offset(skb) + len); + return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; } /* diff --git a/include/net/act_api.h b/include/net/act_api.h index 77ee0c657e2c..404df8557f6a 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -219,7 +219,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref, bool terse); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); -int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); static inline void tcf_action_update_bstats(struct tc_action *a, struct sk_buff *skb) diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h index 5ee7b322e18b..a000dc45f966 100644 --- a/include/net/caif/cfsrvl.h +++ b/include/net/caif/cfsrvl.h @@ -40,7 +40,6 @@ void cfsrvl_init(struct cfsrvl *service, struct dev_info *dev_info, bool supports_flowctrl); bool cfsrvl_ready(struct cfsrvl *service, int *err); -u8 cfsrvl_getphyid(struct cflayer *layer); static inline void cfsrvl_get(struct cflayer *layr) { diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index 4748680e8c88..d59bb96c5a02 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -92,6 +92,14 @@ FN(PACKET_SOCK_ERROR) \ FN(TC_CHAIN_NOTFOUND) \ FN(TC_RECLASSIFY_LOOP) \ + FN(VXLAN_INVALID_HDR) \ + FN(VXLAN_VNI_NOT_FOUND) \ + FN(MAC_INVALID_SOURCE) \ + FN(VXLAN_ENTRY_EXISTS) \ + FN(VXLAN_NO_REMOTE) \ + FN(IP_TUNNEL_ECN) \ + FN(TUNNEL_TXINFO) \ + FN(LOCAL_MAC) \ FNe(MAX) /** @@ -419,6 +427,38 @@ enum skb_drop_reason { */ SKB_DROP_REASON_TC_RECLASSIFY_LOOP, /** + * @SKB_DROP_REASON_VXLAN_INVALID_HDR: VXLAN header is invalid. E.g.: + * 1) reserved fields are not zero + * 2) "I" flag is not set + */ + SKB_DROP_REASON_VXLAN_INVALID_HDR, + /** @SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND: no VXLAN device found for VNI */ + SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND, + /** @SKB_DROP_REASON_MAC_INVALID_SOURCE: source mac is invalid */ + SKB_DROP_REASON_MAC_INVALID_SOURCE, + /** + * @SKB_DROP_REASON_VXLAN_ENTRY_EXISTS: trying to migrate a static + * entry or an entry pointing to a nexthop. + */ + SKB_DROP_REASON_VXLAN_ENTRY_EXISTS, + /** @SKB_DROP_REASON_VXLAN_NO_REMOTE: no remote found for xmit */ + SKB_DROP_REASON_VXLAN_NO_REMOTE, + /** + * @SKB_DROP_REASON_IP_TUNNEL_ECN: skb is dropped according to + * RFC 6040 4.2, see __INET_ECN_decapsulate() for detail. + */ + SKB_DROP_REASON_IP_TUNNEL_ECN, + /** + * @SKB_DROP_REASON_TUNNEL_TXINFO: packet without necessary metadata + * reached a device which is in "external" mode. + */ + SKB_DROP_REASON_TUNNEL_TXINFO, + /** + * @SKB_DROP_REASON_LOCAL_MAC: the source MAC address is equal to + * the MAC address of the local netdev. + */ + SKB_DROP_REASON_LOCAL_MAC, + /** * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which * shouldn't be used as a real 'reason' - only for tracing code gen */ diff --git a/include/net/dsa.h b/include/net/dsa.h index d7a6c2930277..72ae65e7246a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -885,21 +885,6 @@ struct dsa_switch_ops { */ void (*phylink_get_caps)(struct dsa_switch *ds, int port, struct phylink_config *config); - struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds, - int port, - phy_interface_t iface); - void (*phylink_mac_config)(struct dsa_switch *ds, int port, - unsigned int mode, - const struct phylink_link_state *state); - void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface); - void (*phylink_mac_link_up)(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface, - struct phy_device *phydev, - int speed, int duplex, - bool tx_pause, bool rx_pause); void (*phylink_fixed_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); /* diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h index 6d59221ff05a..48aad6128fea 100644 --- a/include/net/fib_notifier.h +++ b/include/net/fib_notifier.h @@ -28,7 +28,7 @@ enum fib_event_type { struct fib_notifier_ops { int family; struct list_head list; - unsigned int (*fib_seq_read)(struct net *net); + unsigned int (*fib_seq_read)(const struct net *net); int (*fib_dump)(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); struct module *owner; diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index d17855c52ef9..04383d90a1e3 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -176,7 +176,7 @@ int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table); bool fib_rule_matchall(const struct fib_rule *rule); int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, struct netlink_ext_ack *extack); -unsigned int fib_rules_seq_read(struct net *net, int family); +unsigned int fib_rules_seq_read(const struct net *net, int family); int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); diff --git a/include/net/genetlink.h b/include/net/genetlink.h index c1d91f1d20f6..d096cc6352de 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -124,7 +124,8 @@ struct genl_family { * @genlhdr: generic netlink message header * @attrs: netlink attributes * @_net: network namespace - * @user_ptr: user pointers + * @ctx: storage space for the use by the family + * @user_ptr: user pointers (deprecated, use ctx instead) * @extack: extended ACK report struct */ struct genl_info { @@ -135,7 +136,10 @@ struct genl_info { struct genlmsghdr * genlhdr; struct nlattr ** attrs; possible_net_t _net; - void * user_ptr[2]; + union { + u8 ctx[NETLINK_CTX_SIZE]; + void * user_ptr[2]; + }; struct netlink_ext_ack *extack; }; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index c0deaafebfdc..3c82fad904d4 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -197,12 +197,12 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) struct inet_connection_sock *icsk = inet_csk(sk); if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { - icsk->icsk_pending = 0; + smp_store_release(&icsk->icsk_pending, 0); #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_retransmit_timer); #endif } else if (what == ICSK_TIME_DACK) { - icsk->icsk_ack.pending = 0; + smp_store_release(&icsk->icsk_ack.pending, 0); icsk->icsk_ack.retry = 0; #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_delack_timer); @@ -229,11 +229,12 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) { - icsk->icsk_pending = what; + smp_store_release(&icsk->icsk_pending, what); icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); } else if (what == ICSK_TIME_DACK) { - icsk->icsk_ack.pending |= ICSK_ACK_TIMER; + smp_store_release(&icsk->icsk_ack.pending, + icsk->icsk_ack.pending | ICSK_ACK_TIMER); icsk->icsk_ack.timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); } else { diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 394c3b66065e..56d8bc5593d3 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -174,6 +174,7 @@ struct inet_cork { __s16 tos; char priority; __u16 gso_size; + u32 ts_opt_id; u64 transmit_time; u32 mark; }; @@ -241,7 +242,8 @@ struct inet_sock { struct inet_cork_full cork; }; -#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ +#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ +#define IPCORK_TS_OPT_ID 2 /* ts_opt_id field is valid, overriding sk_tskey */ enum { INET_FLAGS_PKTINFO = 0, @@ -319,8 +321,10 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet) static inline struct sock *sk_to_full_sk(struct sock *sk) { #ifdef CONFIG_INET - if (sk && sk->sk_state == TCP_NEW_SYN_RECV) + if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) sk = inet_reqsk(sk)->rsk_listener; + if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) + sk = NULL; #endif return sk; } @@ -329,8 +333,10 @@ static inline struct sock *sk_to_full_sk(struct sock *sk) static inline const struct sock *sk_const_to_full_sk(const struct sock *sk) { #ifdef CONFIG_INET - if (sk && sk->sk_state == TCP_NEW_SYN_RECV) + if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) sk = ((const struct request_sock *)sk)->rsk_listener; + if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) + sk = NULL; #endif return sk; } diff --git a/include/net/ip.h b/include/net/ip.h index d92d3bc3ec0e..0e548c1f2a0e 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -288,7 +288,8 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; } -void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, +void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk, + struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, @@ -424,6 +425,11 @@ int ip_decrease_ttl(struct iphdr *iph) return --iph->ttl; } +static inline dscp_t ip4h_dscp(const struct iphdr *ip4h) +{ + return inet_dsfield_to_dscp(ip4h->tos); +} + static inline int ip_mtu_locked(const struct dst_entry *dst) { const struct rtable *rt = dst_rtable(dst); @@ -684,6 +690,11 @@ static inline unsigned int ipv4_addr_hash(__be32 ip) return (__force unsigned int) ip; } +static inline u32 __ipv4_addr_hash(const __be32 ip, const u32 initval) +{ + return jhash_1word((__force u32)ip, initval); +} + static inline u32 ipv4_portaddr_hash(const struct net *net, __be32 saddr, unsigned int port) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 6cb867ce4878..7c87873ae211 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -394,7 +394,7 @@ struct fib6_table { struct fib6_node tb6_root; struct inet_peer_base tb6_peers; unsigned int flags; - unsigned int fib_seq; + unsigned int fib_seq; /* writes protected by rtnl_mutex */ struct hlist_head tb6_gc_hlist; /* GC candidates */ #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) }; @@ -563,7 +563,7 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, int __net_init fib6_notifier_init(struct net *net); void __net_exit fib6_notifier_exit(struct net *net); -unsigned int fib6_tables_seq_read(struct net *net); +unsigned int fib6_tables_seq_read(const struct net *net); int fib6_tables_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); @@ -632,7 +632,7 @@ void fib6_rules_cleanup(void); bool fib6_rule_default(const struct fib_rule *rule); int fib6_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); -unsigned int fib6_rules_seq_read(struct net *net); +unsigned int fib6_rules_seq_read(const struct net *net); static inline bool fib6_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, @@ -676,7 +676,7 @@ static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb, { return 0; } -static inline unsigned int fib6_rules_seq_read(struct net *net) +static inline unsigned int fib6_rules_seq_read(const struct net *net) { return 0; } diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 967e4dc555fa..b6e44f4eaa4c 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -347,7 +347,7 @@ static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb, return 0; } -static inline unsigned int fib4_rules_seq_read(struct net *net) +static inline unsigned int fib4_rules_seq_read(const struct net *net) { return 0; } @@ -411,7 +411,7 @@ static inline bool fib4_has_custom_rules(const struct net *net) bool fib4_rule_default(const struct fib_rule *rule); int fib4_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); -unsigned int fib4_rules_seq_read(struct net *net); +unsigned int fib4_rules_seq_read(const struct net *net); static inline bool fib4_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, @@ -449,8 +449,9 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla, __be32 fib_compute_spec_dst(struct sk_buff *skb); bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, - u8 tos, int oif, struct net_device *dev, + dscp_t dscp, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); + #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 6194fbb564c6..4e4f9e24c9c1 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -439,7 +439,8 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap); -static inline bool pskb_inet_may_pull(struct sk_buff *skb) +static inline enum skb_drop_reason +pskb_inet_may_pull_reason(struct sk_buff *skb) { int nhlen; @@ -456,16 +457,22 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb) nhlen = 0; } - return pskb_network_may_pull(skb, nhlen); + return pskb_network_may_pull_reason(skb, nhlen); +} + +static inline bool pskb_inet_may_pull(struct sk_buff *skb) +{ + return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET; } /* Variant of pskb_inet_may_pull(). */ -static inline bool skb_vlan_inet_prepare(struct sk_buff *skb, - bool inner_proto_inherit) +static inline enum skb_drop_reason +skb_vlan_inet_prepare(struct sk_buff *skb, bool inner_proto_inherit) { int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN; __be16 type = skb->protocol; + enum skb_drop_reason reason; /* Essentially this is skb_protocol(skb, true) * And we get MAC len. @@ -486,11 +493,13 @@ static inline bool skb_vlan_inet_prepare(struct sk_buff *skb, /* For ETH_P_IPV6/ETH_P_IP we make sure to pull * a base network header in skb->head. */ - if (!pskb_may_pull(skb, maclen + nhlen)) - return false; + reason = pskb_may_pull_reason(skb, maclen + nhlen); + if (reason) + return reason; skb_set_network_header(skb, maclen); - return true; + + return SKB_NOT_DROPPED_YET; } static inline int ip_encap_hlen(struct ip_tunnel_encap *e) diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 031c661aa14d..2d6141f28b53 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -78,7 +78,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex) struct net_device *dev; int rc = 0; - if (likely(ifindex)) { + if (ifindex) { rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index de47fa533b15..90f56656b572 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -267,7 +267,8 @@ struct gdma_event { struct gdma_queue; struct mana_eq { - struct gdma_queue *eq; + struct gdma_queue *eq; + struct dentry *mana_eq_debugfs; }; typedef void gdma_eq_callback(void *context, struct gdma_queue *q, @@ -365,6 +366,7 @@ struct gdma_irq_context { struct gdma_context { struct device *dev; + struct dentry *mana_pci_debugfs; /* Per-vPort max number of queues */ unsigned int max_num_queues; @@ -878,5 +880,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, u32 resp_len, void *resp); int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); +void mana_register_debugfs(void); +void mana_unregister_debugfs(void); #endif /* _GDMA_H */ diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index f2a5200d8a0f..0d00b24eacaf 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -43,7 +43,7 @@ enum TRI_STATE { * size beyond this value gets rejected by __alloc_page() call. */ #define MAX_RX_BUFFERS_PER_QUEUE 8192 -#define DEF_RX_BUFFERS_PER_QUEUE 512 +#define DEF_RX_BUFFERS_PER_QUEUE 1024 #define MIN_RX_BUFFERS_PER_QUEUE 128 /* This max value for TX buffers is derived as the maximum allocatable @@ -350,6 +350,7 @@ struct mana_rxq { int xdp_rc; /* XDP redirect return code */ struct page_pool *page_pool; + struct dentry *mana_rx_debugfs; /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. @@ -363,6 +364,8 @@ struct mana_tx_qp { struct mana_cq tx_cq; mana_handle_t tx_object; + + struct dentry *mana_tx_debugfs; }; struct mana_ethtool_stats { @@ -407,6 +410,7 @@ struct mana_context { u16 num_ports; struct mana_eq *eqs; + struct dentry *mana_eqs_debugfs; struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; }; @@ -468,6 +472,9 @@ struct mana_port_context { bool port_st_save; /* Saved port state */ struct mana_ethtool_stats eth_stats; + + /* Debugfs */ + struct dentry *mana_port_debugfs; }; netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); @@ -494,6 +501,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); extern const struct ethtool_ops mana_ethtool_ops; +extern struct dentry *mana_debugfs_root; /* A CQ can be created not associated with any EQ */ #define GDMA_CQ_NO_EQ 0xffff diff --git a/include/net/neighbour.h b/include/net/neighbour.h index a44f262a7384..3887ed9e5026 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -239,7 +239,6 @@ struct neigh_table { enum { NEIGH_ARP_TABLE = 0, NEIGH_ND_TABLE = 1, - NEIGH_DN_TABLE = 2, NEIGH_NR_TABLES, NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */ }; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index e67b483cc8bb..873c0f9fdac6 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -188,6 +188,10 @@ struct net { #if IS_ENABLED(CONFIG_SMC) struct netns_smc smc; #endif +#ifdef CONFIG_DEBUG_NET_SMALL_RTNL + /* Move to a better place when the config guard is removed. */ + struct mutex rtnl_mutex; +#endif } __randomize_layout; #include <linux/seq_file_net.h> diff --git a/include/net/net_shaper.h b/include/net/net_shaper.h new file mode 100644 index 000000000000..5c3f49b52fe9 --- /dev/null +++ b/include/net/net_shaper.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _NET_SHAPER_H_ +#define _NET_SHAPER_H_ + +#include <linux/types.h> + +#include <uapi/linux/net_shaper.h> + +struct net_device; +struct devlink; +struct netlink_ext_ack; + +enum net_shaper_binding_type { + NET_SHAPER_BINDING_TYPE_NETDEV, + /* NET_SHAPER_BINDING_TYPE_DEVLINK_PORT */ +}; + +struct net_shaper_binding { + enum net_shaper_binding_type type; + union { + struct net_device *netdev; + struct devlink *devlink; + }; +}; + +struct net_shaper_handle { + enum net_shaper_scope scope; + u32 id; +}; + +/** + * struct net_shaper - represents a shaping node on the NIC H/W + * zeroed field are considered not set. + * @parent: Unique identifier for the shaper parent, usually implied + * @handle: Unique identifier for this shaper + * @metric: Specify if the rate limits refers to PPS or BPS + * @bw_min: Minimum guaranteed rate for this shaper + * @bw_max: Maximum peak rate allowed for this shaper + * @burst: Maximum burst for the peek rate of this shaper + * @priority: Scheduling priority for this shaper + * @weight: Scheduling weight for this shaper + */ +struct net_shaper { + struct net_shaper_handle parent; + struct net_shaper_handle handle; + enum net_shaper_metric metric; + u64 bw_min; + u64 bw_max; + u64 burst; + u32 priority; + u32 weight; + + /* private: */ + u32 leaves; /* accounted only for NODE scope */ + struct rcu_head rcu; +}; + +/** + * struct net_shaper_ops - Operations on device H/W shapers + * + * The operations applies to either net_device and devlink objects. + * The initial shaping configuration at device initialization is empty: + * does not constraint the rate in any way. + * The network core keeps track of the applied user-configuration in + * the net_device or devlink structure. + * The operations are serialized via a per device lock. + * + * Device not supporting any kind of nesting should not provide the + * group operation. + * + * Each shaper is uniquely identified within the device with a 'handle' + * comprising the shaper scope and a scope-specific id. + */ +struct net_shaper_ops { + /** + * @group: create the specified shapers scheduling group + * + * Nest the @leaves shapers identified under the * @node shaper. + * All the shapers belong to the device specified by @binding. + * The @leaves arrays size is specified by @leaves_count. + * Create either the @leaves and the @node shaper; or if they already + * exists, links them together in the desired way. + * @leaves scope must be NET_SHAPER_SCOPE_QUEUE. + */ + int (*group)(struct net_shaper_binding *binding, int leaves_count, + const struct net_shaper *leaves, + const struct net_shaper *node, + struct netlink_ext_ack *extack); + + /** + * @set: Updates the specified shaper + * + * Updates or creates the @shaper on the device specified by @binding. + */ + int (*set)(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack); + + /** + * @delete: Removes the specified shaper + * + * Removes the shaper configuration as identified by the given @handle + * on the device specified by @binding, restoring the default behavior. + */ + int (*delete)(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct netlink_ext_ack *extack); + + /** + * @capabilities: get the shaper features supported by the device + * + * Fills the bitmask @cap with the supported capabilities for the + * specified @scope and device specified by @binding. + */ + void (*capabilities)(struct net_shaper_binding *binding, + enum net_shaper_scope scope, unsigned long *cap); +}; + +#endif diff --git a/include/net/netns/core.h b/include/net/netns/core.h index 78214f1b43a2..9b36f0ff0c20 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -15,6 +15,7 @@ struct netns_core { int sysctl_somaxconn; int sysctl_optmem_max; u8 sysctl_txrehash; + u8 sysctl_tstamp_allow_data; #ifdef CONFIG_PROC_FS struct prot_inuse __percpu *prot_inuse; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 276f622f3516..3c014170e001 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -76,6 +76,8 @@ struct netns_ipv4 { __cacheline_group_begin(netns_ipv4_read_rx); u8 sysctl_ip_early_demux; u8 sysctl_tcp_early_demux; + u8 sysctl_tcp_l3mdev_accept; + /* 3 bytes hole, try to pack */ int sysctl_tcp_reordering; int sysctl_tcp_rmem[3]; __cacheline_group_end(netns_ipv4_read_rx); @@ -151,9 +153,6 @@ struct netns_ipv4 { u8 sysctl_fwmark_reflect; u8 sysctl_tcp_fwmark_accept; -#ifdef CONFIG_NET_L3_MASTER_DEV - u8 sysctl_tcp_l3mdev_accept; -#endif u8 sysctl_tcp_mtu_probing; int sysctl_tcp_mtu_probe_floor; int sysctl_tcp_base_mss; @@ -263,12 +262,14 @@ struct netns_ipv4 { #endif struct fib_notifier_ops *notifier_ops; - unsigned int fib_seq; /* protected by rtnl_mutex */ + unsigned int fib_seq; /* writes protected by rtnl_mutex */ struct fib_notifier_ops *ipmr_notifier_ops; unsigned int ipmr_seq; /* protected by rtnl_mutex */ atomic_t rt_genid; siphash_key_t ip_id_key; + struct hlist_head *inet_addr_lst; + struct delayed_work addr_chk_work; }; #endif diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index e9dc8dca5817..37a3e83531c6 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h @@ -11,13 +11,13 @@ #define PN_DEV_H #include <linux/list.h> -#include <linux/mutex.h> +#include <linux/spinlock.h> struct net; struct phonet_device_list { struct list_head list; - struct mutex lock; + spinlock_t lock; }; struct phonet_device_list *phonet_device_list(struct net *net); @@ -38,11 +38,11 @@ int phonet_address_add(struct net_device *dev, u8 addr); int phonet_address_del(struct net_device *dev, u8 addr); u8 phonet_address_get(struct net_device *dev, u8 addr); int phonet_address_lookup(struct net *net, u8 addr); -void phonet_address_notify(int event, struct net_device *dev, u8 addr); +void phonet_address_notify(struct net *net, int event, u32 ifindex, u8 addr); int phonet_route_add(struct net_device *dev, u8 daddr); int phonet_route_del(struct net_device *dev, u8 daddr); -void rtm_phonet_notify(int event, struct net_device *dev, u8 dst); +void rtm_phonet_notify(struct net *net, int event, u32 ifindex, u8 dst); struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr); struct net_device *phonet_route_output(struct net *net, u8 daddr); diff --git a/include/net/route.h b/include/net/route.h index 1789f1e6640b..586e59f7ed8a 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -198,22 +198,23 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 fl4->fl4_gre_key = gre_key; return ip_route_output_key(net, fl4); } + int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev, + dscp_t dscp, struct net_device *dev, struct in_device *in_dev, u32 *itag); -int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin); -int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin, +int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev); +int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, const struct sk_buff *hint); static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin) + dscp_t dscp, struct net_device *devin) { int err; rcu_read_lock(); - err = ip_route_input_noref(skb, dst, src, tos, devin); + err = ip_route_input_noref(skb, dst, src, dscp, devin); if (!err) { skb_dst_force(skb); if (!skb_dst(skb)) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 2d3eb7cb4dff..e0d9a8eae6b6 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -3,6 +3,7 @@ #define __NET_RTNETLINK_H #include <linux/rtnetlink.h> +#include <linux/srcu.h> #include <net/netlink.h> typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, @@ -29,6 +30,16 @@ static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype) return msgtype & RTNL_KIND_MASK; } +/** + * struct rtnl_msg_handler - rtnetlink message type and handlers + * + * @owner: NULL for built-in, THIS_MODULE for module + * @protocol: Protocol family or PF_UNSPEC + * @msgtype: rtnetlink message type + * @doit: Function pointer called for each request message + * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message + * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions + */ struct rtnl_msg_handler { struct module *owner; int protocol; @@ -38,11 +49,6 @@ struct rtnl_msg_handler { int flags; }; -void rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); -int rtnl_register_module(struct module *owner, int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); -int rtnl_unregister(int protocol, int msgtype); void rtnl_unregister_all(int protocol); int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n); @@ -64,7 +70,8 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) /** * struct rtnl_link_ops - rtnetlink link operations * - * @list: Used internally + * @list: Used internally, protected by RTNL and SRCU + * @srcu: Used internally * @kind: Identifier * @netns_refund: Physical device, move to init_net on netns exit * @maxtype: Highest device specific netlink attribute number @@ -95,6 +102,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) */ struct rtnl_link_ops { struct list_head list; + struct srcu_struct srcu; const char *kind; @@ -164,7 +172,8 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops); /** * struct rtnl_af_ops - rtnetlink address family operations * - * @list: Used internally + * @list: Used internally, protected by RTNL and SRCU + * @srcu: Used internally * @family: Address family * @fill_link_af: Function to fill IFLA_AF_SPEC with address family * specific netlink attributes. @@ -177,6 +186,8 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops); */ struct rtnl_af_ops { struct list_head list; + struct srcu_struct srcu; + int family; int (*fill_link_af)(struct sk_buff *skb, @@ -196,7 +207,7 @@ struct rtnl_af_ops { size_t (*get_stats_af_size)(const struct net_device *dev); }; -void rtnl_af_register(struct rtnl_af_ops *ops); +int rtnl_af_register(struct rtnl_af_ops *ops); void rtnl_af_unregister(struct rtnl_af_ops *ops); struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); diff --git a/include/net/sock.h b/include/net/sock.h index f29c14448938..7464e9f9f47c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -956,6 +956,12 @@ enum sock_flags { }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) +/* + * The highest bit of sk_tsflags is reserved for kernel-internal + * SOCKCM_FLAG_TS_OPT_ID. There is a check in core/sock.c to control that + * SOF_TIMESTAMPING* values do not reach this reserved area + */ +#define SOCKCM_FLAG_TS_OPT_ID BIT(31) static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk) { @@ -1754,6 +1760,15 @@ void sock_efree(struct sk_buff *skb); #ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); void sock_pfree(struct sk_buff *skb); + +static inline void skb_set_owner_edemux(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + if (refcount_inc_not_zero(&sk->sk_refcnt)) { + skb->sk = sk; + skb->destructor = sock_edemux; + } +} #else #define sock_edemux sock_efree #endif @@ -1798,6 +1813,7 @@ struct sockcm_cookie { u64 transmit_time; u32 mark; u32 tsflags; + u32 ts_opt_id; }; static inline void sockcm_init(struct sockcm_cookie *sockc, @@ -2655,39 +2671,48 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, sock_write_timestamp(sk, 0); } -void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); +void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags); /** * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet - * @tsflags: timestamping flags to use + * @sockc: pointer to socket cmsg cookie to get timestamping info * @tx_flags: completed with instructions for time stamping * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno) * * Note: callers should take care of initial ``*tx_flags`` value (usually 0) */ -static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, +static inline void _sock_tx_timestamp(struct sock *sk, + const struct sockcm_cookie *sockc, __u8 *tx_flags, __u32 *tskey) { + __u32 tsflags = sockc->tsflags; + if (unlikely(tsflags)) { __sock_tx_timestamp(tsflags, tx_flags); if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey && - tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) - *tskey = atomic_inc_return(&sk->sk_tskey) - 1; + tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) { + if (tsflags & SOCKCM_FLAG_TS_OPT_ID) + *tskey = sockc->ts_opt_id; + else + *tskey = atomic_inc_return(&sk->sk_tskey) - 1; + } } if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) *tx_flags |= SKBTX_WIFI_STATUS; } -static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags, +static inline void sock_tx_timestamp(struct sock *sk, + const struct sockcm_cookie *sockc, __u8 *tx_flags) { - _sock_tx_timestamp(sk, tsflags, tx_flags, NULL); + _sock_tx_timestamp(sk, sockc, tx_flags, NULL); } -static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) +static inline void skb_setup_tx_timestamp(struct sk_buff *skb, + const struct sockcm_cookie *sockc) { - _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags, + _sock_tx_timestamp(skb->sk, sockc, &skb_shinfo(skb)->tx_flags, &skb_shinfo(skb)->tskey); } @@ -2791,6 +2816,16 @@ static inline bool sk_listener(const struct sock *sk) return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); } +/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV or TIME_WAIT + * TCP SYNACK messages can be attached to LISTEN or NEW_SYN_RECV (depending on SYNCOOKIE) + * TCP RST and ACK can be attached to TIME_WAIT. + */ +static inline bool sk_listener_or_tw(const struct sock *sk) +{ + return (1 << READ_ONCE(sk->sk_state)) & + (TCPF_LISTEN | TCPF_NEW_SYN_RECV | TCPF_TIME_WAIT); +} + void sock_enable_timestamp(struct sock *sk, enum sock_flags flag); int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type); @@ -2815,8 +2850,6 @@ void sk_get_meminfo(const struct sock *sk, u32 *meminfo); extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; -extern int sysctl_tstamp_allow_data; - extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; diff --git a/include/net/tcp.h b/include/net/tcp.h index d1948d357dad..739a9fb83d0c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2442,7 +2442,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; } else { WARN_ONCE(1, - "rtx queue emtpy: " + "rtx queue empty: " "out:%u sacked:%u lost:%u retrans:%u " "tlp_high_seq:%u sk_state:%u ca_state:%u " "advmss:%u mss_cache:%u pmtu:%u\n", diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 0a5dca2b2b3f..40085afd9160 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -126,8 +126,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) if (likely(!xdp_buff_has_frags(xdp))) goto out; - list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { - list_del(&pos->xskb_list_node); + list_for_each_entry_safe(pos, tmp, xskb_list, list_node) { + list_del(&pos->list_node); xp_free(pos); } @@ -140,7 +140,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp) { struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp); - list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list); + list_add_tail(&frag->list_node, &frag->pool->xskb_list); } static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) @@ -150,9 +150,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) struct xdp_buff_xsk *frag; frag = list_first_entry_or_null(&xskb->pool->xskb_list, - struct xdp_buff_xsk, xskb_list_node); + struct xdp_buff_xsk, list_node); if (frag) { - list_del(&frag->xskb_list_node); + list_del(&frag->list_node); ret = &frag->xdp; } @@ -163,7 +163,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail) { struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp); - list_del(&xskb->xskb_list_node); + list_del(&xskb->list_node); } static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) @@ -172,7 +172,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) struct xdp_buff_xsk *frag; frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, - xskb_list_node); + list_node); return &frag->xdp; } diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index bacb33f1e3e5..bb03cee716b3 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -28,9 +28,7 @@ struct xdp_buff_xsk { dma_addr_t dma; dma_addr_t frame_dma; struct xsk_buff_pool *pool; - u64 orig_addr; - struct list_head free_list_node; - struct list_head xskb_list_node; + struct list_head list_node; }; #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) @@ -78,6 +76,7 @@ struct xsk_buff_pool { u32 chunk_size; u32 chunk_shift; u32 frame_len; + u32 xdp_zc_max_segs; u8 tx_metadata_len; /* inherited from umem */ u8 cached_need_wakeup; bool uses_need_wakeup; @@ -120,7 +119,6 @@ void xp_free(struct xdp_buff_xsk *xskb); static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, u64 addr) { - xskb->orig_addr = addr; xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; } @@ -222,14 +220,19 @@ static inline void xp_release(struct xdp_buff_xsk *xskb) xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; } -static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) +static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb, + struct xsk_buff_pool *pool) { - u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; + u64 orig_addr = xskb->xdp.data - pool->addrs; + u64 offset; - offset += xskb->pool->headroom; - if (!xskb->pool->unaligned) - return xskb->orig_addr + offset; - return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); + if (!pool->unaligned) + return orig_addr; + + offset = xskb->xdp.data - xskb->xdp.data_hard_start; + orig_addr -= offset; + offset += pool->headroom; + return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); } static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool) diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 3b4e3e815602..deacfd6dd197 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -141,6 +141,8 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 80 +#define SCM_TS_OPT_ID 81 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 6e25753015df..439132a819ea 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -9,6 +9,7 @@ #include <asm/byteorder.h> #include <linux/if_ether.h> +#include <linux/stddef.h> #include <linux/types.h> /** @@ -593,19 +594,6 @@ struct batadv_tvlv_gateway_data { }; /** - * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container - * @flags: translation table flags (see batadv_tt_data_flags) - * @ttvn: translation table version number - * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by - * one batadv_tvlv_tt_vlan_data object per announced vlan - */ -struct batadv_tvlv_tt_data { - __u8 flags; - __u8 ttvn; - __be16 num_vlan; -}; - -/** * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through * the tt tvlv container * @crc: crc32 checksum of the entries belonging to this vlan @@ -619,6 +607,21 @@ struct batadv_tvlv_tt_vlan_data { }; /** + * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container + * @flags: translation table flags (see batadv_tt_data_flags) + * @ttvn: translation table version number + * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by + * one batadv_tvlv_tt_vlan_data object per announced vlan + * @vlan_data: array of batadv_tvlv_tt_vlan_data objects + */ +struct batadv_tvlv_tt_data { + __u8 flags; + __u8 ttvn; + __be16 num_vlan; + struct batadv_tvlv_tt_vlan_data vlan_data[] __counted_by_be(num_vlan); +}; + +/** * struct batadv_tvlv_tt_change - translation table diff data * @flags: status indicators concerning the non-mesh client (see * batadv_tt_client_flags) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 6dc258993b17..8516c1ccd57a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -377,6 +377,7 @@ enum { IFLA_GSO_IPV4_MAX_SIZE, IFLA_GRO_IPV4_MAX_SIZE, IFLA_DPLL_PIN, + IFLA_MAX_PACING_OFFLOAD_HORIZON, __IFLA_MAX }; @@ -1292,6 +1293,19 @@ enum netkit_mode { NETKIT_L3, }; +/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to + * the BPF program if attached. This also means the latter can + * consume the two fields if they were populated earlier. + * + * NETKIT_SCRUB_DEFAULT zeroes skb->{mark,priority} fields before + * invoking the attached BPF program when the peer device resides + * in a different network namespace. This is the default behavior. + */ +enum netkit_scrub { + NETKIT_SCRUB_NONE, + NETKIT_SCRUB_DEFAULT, +}; + enum { IFLA_NETKIT_UNSPEC, IFLA_NETKIT_PEER_INFO, @@ -1299,6 +1313,8 @@ enum { IFLA_NETKIT_POLICY, IFLA_NETKIT_PEER_POLICY, IFLA_NETKIT_MODE, + IFLA_NETKIT_SCRUB, + IFLA_NETKIT_PEER_SCRUB, __IFLA_NETKIT_MAX, }; #define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) diff --git a/include/uapi/linux/net_shaper.h b/include/uapi/linux/net_shaper.h new file mode 100644 index 000000000000..d8834b59f7d7 --- /dev/null +++ b/include/uapi/linux/net_shaper.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/net_shaper.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_NET_SHAPER_H +#define _UAPI_LINUX_NET_SHAPER_H + +#define NET_SHAPER_FAMILY_NAME "net-shaper" +#define NET_SHAPER_FAMILY_VERSION 1 + +/** + * enum net_shaper_scope - Defines the shaper @id interpretation. + * @NET_SHAPER_SCOPE_UNSPEC: The scope is not specified. + * @NET_SHAPER_SCOPE_NETDEV: The main shaper for the given network device. + * @NET_SHAPER_SCOPE_QUEUE: The shaper is attached to the given device queue, + * the @id represents the queue number. + * @NET_SHAPER_SCOPE_NODE: The shaper allows grouping of queues or other node + * shapers; can be nested in either @netdev shapers or other @node shapers, + * allowing placement in any location of the scheduling tree, except leaves + * and root. + */ +enum net_shaper_scope { + NET_SHAPER_SCOPE_UNSPEC, + NET_SHAPER_SCOPE_NETDEV, + NET_SHAPER_SCOPE_QUEUE, + NET_SHAPER_SCOPE_NODE, + + /* private: */ + __NET_SHAPER_SCOPE_MAX, + NET_SHAPER_SCOPE_MAX = (__NET_SHAPER_SCOPE_MAX - 1) +}; + +/** + * enum net_shaper_metric - Different metric supported by the shaper. + * @NET_SHAPER_METRIC_BPS: Shaper operates on a bits per second basis. + * @NET_SHAPER_METRIC_PPS: Shaper operates on a packets per second basis. + */ +enum net_shaper_metric { + NET_SHAPER_METRIC_BPS, + NET_SHAPER_METRIC_PPS, +}; + +enum { + NET_SHAPER_A_HANDLE = 1, + NET_SHAPER_A_METRIC, + NET_SHAPER_A_BW_MIN, + NET_SHAPER_A_BW_MAX, + NET_SHAPER_A_BURST, + NET_SHAPER_A_PRIORITY, + NET_SHAPER_A_WEIGHT, + NET_SHAPER_A_IFINDEX, + NET_SHAPER_A_PARENT, + NET_SHAPER_A_LEAVES, + + __NET_SHAPER_A_MAX, + NET_SHAPER_A_MAX = (__NET_SHAPER_A_MAX - 1) +}; + +enum { + NET_SHAPER_A_HANDLE_SCOPE = 1, + NET_SHAPER_A_HANDLE_ID, + + __NET_SHAPER_A_HANDLE_MAX, + NET_SHAPER_A_HANDLE_MAX = (__NET_SHAPER_A_HANDLE_MAX - 1) +}; + +enum { + NET_SHAPER_A_CAPS_IFINDEX = 1, + NET_SHAPER_A_CAPS_SCOPE, + NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS, + NET_SHAPER_A_CAPS_SUPPORT_METRIC_PPS, + NET_SHAPER_A_CAPS_SUPPORT_NESTING, + NET_SHAPER_A_CAPS_SUPPORT_BW_MIN, + NET_SHAPER_A_CAPS_SUPPORT_BW_MAX, + NET_SHAPER_A_CAPS_SUPPORT_BURST, + NET_SHAPER_A_CAPS_SUPPORT_PRIORITY, + NET_SHAPER_A_CAPS_SUPPORT_WEIGHT, + + __NET_SHAPER_A_CAPS_MAX, + NET_SHAPER_A_CAPS_MAX = (__NET_SHAPER_A_CAPS_MAX - 1) +}; + +enum { + NET_SHAPER_CMD_GET = 1, + NET_SHAPER_CMD_SET, + NET_SHAPER_CMD_DELETE, + NET_SHAPER_CMD_GROUP, + NET_SHAPER_CMD_CAP_GET, + + __NET_SHAPER_CMD_MAX, + NET_SHAPER_CMD_MAX = (__NET_SHAPER_CMD_MAX - 1) +}; + +#endif /* _UAPI_LINUX_NET_SHAPER_H */ diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index 7c308f04e7a0..e3ebb49f60d2 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -122,6 +122,8 @@ enum { NETDEV_A_NAPI_ID, NETDEV_A_NAPI_IRQ, NETDEV_A_NAPI_PID, + NETDEV_A_NAPI_DEFER_HARD_IRQS, + NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, __NETDEV_A_NAPI_MAX, NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1) @@ -199,6 +201,7 @@ enum { NETDEV_CMD_NAPI_GET, NETDEV_CMD_QSTATS_GET, NETDEV_CMD_BIND_RX, + NETDEV_CMD_NAPI_SET, __NETDEV_CMD_MAX, NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index a3cd0c2dc995..25a9a47001cd 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -836,6 +836,8 @@ enum { TCA_FQ_WEIGHTS, /* Weights for each band */ + TCA_FQ_OFFLOAD_HORIZON, /* dequeue paced packets within this horizon immediately (us units) */ + __TCA_FQ_MAX }; diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index 1a0fe8b151fb..d85d671deed3 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h @@ -31,7 +31,7 @@ struct udphdr { #define UDP_CORK 1 /* Never send partially complete segments */ #define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */ #define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */ -#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */ +#define UDP_NO_CHECK6_RX 102 /* Disable accepting checksum for UDP6 */ #define UDP_SEGMENT 103 /* Set GSO segmentation size */ #define UDP_GRO 104 /* This socket can receive UDP GRO packets */ diff --git a/include/uapi/linux/vmclock-abi.h b/include/uapi/linux/vmclock-abi.h new file mode 100644 index 000000000000..2d99b29ac44a --- /dev/null +++ b/include/uapi/linux/vmclock-abi.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ + +/* + * This structure provides a vDSO-style clock to VM guests, exposing the + * relationship (or lack thereof) between the CPU clock (TSC, timebase, arch + * counter, etc.) and real time. It is designed to address the problem of + * live migration, which other clock enlightenments do not. + * + * When a guest is live migrated, this affects the clock in two ways. + * + * First, even between identical hosts the actual frequency of the underlying + * counter will change within the tolerances of its specification (typically + * ±50PPM, or 4 seconds a day). This frequency also varies over time on the + * same host, but can be tracked by NTP as it generally varies slowly. With + * live migration there is a step change in the frequency, with no warning. + * + * Second, there may be a step change in the value of the counter itself, as + * its accuracy is limited by the precision of the NTP synchronization on the + * source and destination hosts. + * + * So any calibration (NTP, PTP, etc.) which the guest has done on the source + * host before migration is invalid, and needs to be redone on the new host. + * + * In its most basic mode, this structure provides only an indication to the + * guest that live migration has occurred. This allows the guest to know that + * its clock is invalid and take remedial action. For applications that need + * reliable accurate timestamps (e.g. distributed databases), the structure + * can be mapped all the way to userspace. This allows the application to see + * directly for itself that the clock is disrupted and take appropriate + * action, even when using a vDSO-style method to get the time instead of a + * system call. + * + * In its more advanced mode. this structure can also be used to expose the + * precise relationship of the CPU counter to real time, as calibrated by the + * host. This means that userspace applications can have accurate time + * immediately after live migration, rather than having to pause operations + * and wait for NTP to recover. This mode does, of course, rely on the + * counter being reliable and consistent across CPUs. + * + * Note that this must be true UTC, never with smeared leap seconds. If a + * guest wishes to construct a smeared clock, it can do so. Presenting a + * smeared clock through this interface would be problematic because it + * actually messes with the apparent counter *period*. A linear smearing + * of 1 ms per second would effectively tweak the counter period by 1000PPM + * at the start/end of the smearing period, while a sinusoidal smear would + * basically be impossible to represent. + * + * This structure is offered with the intent that it be adopted into the + * nascent virtio-rtc standard, as a virtio-rtc that does not address the live + * migration problem seems a little less than fit for purpose. For that + * reason, certain fields use precisely the same numeric definitions as in + * the virtio-rtc proposal. The structure can also be exposed through an ACPI + * device with the CID "VMCLOCK", modelled on the "VMGENID" device except for + * the fact that it uses a real _CRS to convey the address of the structure + * (which should be a full page, to allow for mapping directly to userspace). + */ + +#ifndef __VMCLOCK_ABI_H__ +#define __VMCLOCK_ABI_H__ + +#include <linux/types.h> + +struct vmclock_abi { + /* CONSTANT FIELDS */ + __le32 magic; +#define VMCLOCK_MAGIC 0x4b4c4356 /* "VCLK" */ + __le32 size; /* Size of region containing this structure */ + __le16 version; /* 1 */ + __u8 counter_id; /* Matches VIRTIO_RTC_COUNTER_xxx except INVALID */ +#define VMCLOCK_COUNTER_ARM_VCNT 0 +#define VMCLOCK_COUNTER_X86_TSC 1 +#define VMCLOCK_COUNTER_INVALID 0xff + __u8 time_type; /* Matches VIRTIO_RTC_TYPE_xxx */ +#define VMCLOCK_TIME_UTC 0 /* Since 1970-01-01 00:00:00z */ +#define VMCLOCK_TIME_TAI 1 /* Since 1970-01-01 00:00:00z */ +#define VMCLOCK_TIME_MONOTONIC 2 /* Since undefined epoch */ +#define VMCLOCK_TIME_INVALID_SMEARED 3 /* Not supported */ +#define VMCLOCK_TIME_INVALID_MAYBE_SMEARED 4 /* Not supported */ + + /* NON-CONSTANT FIELDS PROTECTED BY SEQCOUNT LOCK */ + __le32 seq_count; /* Low bit means an update is in progress */ + /* + * This field changes to another non-repeating value when the CPU + * counter is disrupted, for example on live migration. This lets + * the guest know that it should discard any calibration it has + * performed of the counter against external sources (NTP/PTP/etc.). + */ + __le64 disruption_marker; + __le64 flags; + /* Indicates that the tai_offset_sec field is valid */ +#define VMCLOCK_FLAG_TAI_OFFSET_VALID (1 << 0) + /* + * Optionally used to notify guests of pending maintenance events. + * A guest which provides latency-sensitive services may wish to + * remove itself from service if an event is coming up. Two flags + * indicate the approximate imminence of the event. + */ +#define VMCLOCK_FLAG_DISRUPTION_SOON (1 << 1) /* About a day */ +#define VMCLOCK_FLAG_DISRUPTION_IMMINENT (1 << 2) /* About an hour */ +#define VMCLOCK_FLAG_PERIOD_ESTERROR_VALID (1 << 3) +#define VMCLOCK_FLAG_PERIOD_MAXERROR_VALID (1 << 4) +#define VMCLOCK_FLAG_TIME_ESTERROR_VALID (1 << 5) +#define VMCLOCK_FLAG_TIME_MAXERROR_VALID (1 << 6) + /* + * If the MONOTONIC flag is set then (other than leap seconds) it is + * guaranteed that the time calculated according this structure at + * any given moment shall never appear to be later than the time + * calculated via the structure at any *later* moment. + * + * In particular, a timestamp based on a counter reading taken + * immediately after setting the low bit of seq_count (and the + * associated memory barrier), using the previously-valid time and + * period fields, shall never be later than a timestamp based on + * a counter reading taken immediately before *clearing* the low + * bit again after the update, using the about-to-be-valid fields. + */ +#define VMCLOCK_FLAG_TIME_MONOTONIC (1 << 7) + + __u8 pad[2]; + __u8 clock_status; +#define VMCLOCK_STATUS_UNKNOWN 0 +#define VMCLOCK_STATUS_INITIALIZING 1 +#define VMCLOCK_STATUS_SYNCHRONIZED 2 +#define VMCLOCK_STATUS_FREERUNNING 3 +#define VMCLOCK_STATUS_UNRELIABLE 4 + + /* + * The time exposed through this device is never smeared. This field + * corresponds to the 'subtype' field in virtio-rtc, which indicates + * the smearing method. However in this case it provides a *hint* to + * the guest operating system, such that *if* the guest OS wants to + * provide its users with an alternative clock which does not follow + * UTC, it may do so in a fashion consistent with the other systems + * in the nearby environment. + */ + __u8 leap_second_smearing_hint; /* Matches VIRTIO_RTC_SUBTYPE_xxx */ +#define VMCLOCK_SMEARING_STRICT 0 +#define VMCLOCK_SMEARING_NOON_LINEAR 1 +#define VMCLOCK_SMEARING_UTC_SLS 2 + __le16 tai_offset_sec; /* Actually two's complement signed */ + __u8 leap_indicator; + /* + * This field is based on the VIRTIO_RTC_LEAP_xxx values as defined + * in the current draft of virtio-rtc, but since smearing cannot be + * used with the shared memory device, some values are not used. + * + * The _POST_POS and _POST_NEG values allow the guest to perform + * its own smearing during the day or so after a leap second when + * such smearing may need to continue being applied for a leap + * second which is now theoretically "historical". + */ +#define VMCLOCK_LEAP_NONE 0x00 /* No known nearby leap second */ +#define VMCLOCK_LEAP_PRE_POS 0x01 /* Positive leap second at EOM */ +#define VMCLOCK_LEAP_PRE_NEG 0x02 /* Negative leap second at EOM */ +#define VMCLOCK_LEAP_POS 0x03 /* Set during 23:59:60 second */ +#define VMCLOCK_LEAP_POST_POS 0x04 +#define VMCLOCK_LEAP_POST_NEG 0x05 + + /* Bit shift for counter_period_frac_sec and its error rate */ + __u8 counter_period_shift; + /* + * Paired values of counter and UTC at a given point in time. + */ + __le64 counter_value; + /* + * Counter period, and error margin of same. The unit of these + * fields is 1/2^(64 + counter_period_shift) of a second. + */ + __le64 counter_period_frac_sec; + __le64 counter_period_esterror_rate_frac_sec; + __le64 counter_period_maxerror_rate_frac_sec; + + /* + * Time according to time_type field above. + */ + __le64 time_sec; /* Seconds since time_type epoch */ + __le64 time_frac_sec; /* Units of 1/2^64 of a second */ + __le64 time_esterror_nanosec; + __le64 time_maxerror_nanosec; +}; + +#endif /* __VMCLOCK_ABI_H__ */ diff --git a/lib/Kconfig b/lib/Kconfig index b38849af6f13..50d85f38b569 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -40,6 +40,18 @@ config PACKING When in doubt, say N. +config PACKING_KUNIT_TEST + tristate "KUnit tests for packing library" if !KUNIT_ALL_TESTS + depends on PACKING && KUNIT + default KUNIT_ALL_TESTS + help + This builds KUnit tests for the packing library. + + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in Documentation/dev-tools/kunit/. + + When in doubt, say N. + config BITREVERSE tristate diff --git a/lib/Makefile b/lib/Makefile index 773adf88af41..811ba12c8cd0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -154,6 +154,7 @@ obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o obj-$(CONFIG_BITREVERSE) += bitrev.o obj-$(CONFIG_LINEAR_RANGES) += linear_ranges.o obj-$(CONFIG_PACKING) += packing.o +obj-$(CONFIG_PACKING_KUNIT_TEST) += packing_test.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o obj-$(CONFIG_CRC16) += crc16.o obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o diff --git a/lib/packing.c b/lib/packing.c index 3f656167c17e..793942745e34 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -9,52 +9,53 @@ #include <linux/types.h> #include <linux/bitrev.h> -static int get_le_offset(int offset) +/** + * calculate_box_addr - Determine physical location of byte in buffer + * @box: Index of byte within buffer seen as a logical big-endian big number + * @len: Size of buffer in bytes + * @quirks: mask of QUIRK_LSW32_IS_FIRST and QUIRK_LITTLE_ENDIAN + * + * Function interprets the buffer as a @len byte sized big number, and returns + * the physical offset of the @box logical octet within it. Internally, it + * treats the big number as groups of 4 bytes. If @len is not a multiple of 4, + * the last group may be shorter. + * + * @QUIRK_LSW32_IS_FIRST gives the ordering of groups of 4 octets relative to + * each other. If set, the most significant group of 4 octets is last in the + * buffer (and may be truncated if @len is not a multiple of 4). + * + * @QUIRK_LITTLE_ENDIAN gives the ordering of bytes within each group of 4. + * If set, the most significant byte is last in the group. If @len takes the + * form of 4k+3, the last group will only be able to represent 24 bits, and its + * most significant octet is byte 2. + * + * Return: the physical offset into the buffer corresponding to the logical box. + */ +static size_t calculate_box_addr(size_t box, size_t len, u8 quirks) { - int closest_multiple_of_4; + size_t offset_of_group, offset_in_group, this_group = box / 4; + size_t group_size; - closest_multiple_of_4 = (offset / 4) * 4; - offset -= closest_multiple_of_4; - return closest_multiple_of_4 + (3 - offset); -} + if (quirks & QUIRK_LSW32_IS_FIRST) + offset_of_group = this_group * 4; + else + offset_of_group = len - ((this_group + 1) * 4); -static int get_reverse_lsw32_offset(int offset, size_t len) -{ - int closest_multiple_of_4; - int word_index; - - word_index = offset / 4; - closest_multiple_of_4 = word_index * 4; - offset -= closest_multiple_of_4; - word_index = (len / 4) - word_index - 1; - return word_index * 4 + offset; -} + group_size = min(4, len - offset_of_group); -static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, - int *box_end_bit, u8 *box_mask) -{ - int box_bit_width = *box_start_bit - *box_end_bit + 1; - int new_box_start_bit, new_box_end_bit; - - *to_write >>= *box_end_bit; - *to_write = bitrev8(*to_write) >> (8 - box_bit_width); - *to_write <<= *box_end_bit; - - new_box_end_bit = box_bit_width - *box_start_bit - 1; - new_box_start_bit = box_bit_width - *box_end_bit - 1; - *box_mask = GENMASK_ULL(new_box_start_bit, new_box_end_bit); - *box_start_bit = new_box_start_bit; - *box_end_bit = new_box_end_bit; + if (quirks & QUIRK_LITTLE_ENDIAN) + offset_in_group = box - this_group * 4; + else + offset_in_group = group_size - (box - this_group * 4) - 1; + + return offset_of_group + offset_in_group; } /** - * packing - Convert numbers (currently u64) between a packed and an unpacked - * format. Unpacked means laid out in memory in the CPU's native - * understanding of integers, while packed means anything else that - * requires translation. + * pack - Pack u64 number into bitfield of buffer. * * @pbuf: Pointer to a buffer holding the packed value. - * @uval: Pointer to an u64 holding the unpacked value. + * @uval: CPU-readable unpacked value to pack. * @startbit: The index (in logical notation, compensated for quirks) where * the packed value starts within pbuf. Must be larger than, or * equal to, endbit. @@ -62,79 +63,179 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, * the packed value ends within pbuf. Must be smaller than, or equal * to, startbit. * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. - * @op: If PACK, then uval will be treated as const pointer and copied (packed) - * into pbuf, between startbit and endbit. - * If UNPACK, then pbuf will be treated as const pointer and the logical - * value between startbit and endbit will be copied (unpacked) to uval. * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and * QUIRK_MSB_ON_THE_RIGHT. * * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming - * correct usage, return code may be discarded. - * If op is PACK, pbuf is modified. - * If op is UNPACK, uval is modified. + * correct usage, return code may be discarded. The @pbuf memory will + * be modified on success. */ -int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, - enum packing_op op, u8 quirks) +int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, + u8 quirks) { - /* Number of bits for storing "uval" - * also width of the field to access in the pbuf - */ - u64 value_width; /* Logical byte indices corresponding to the * start and end of the field. */ int plogical_first_u8, plogical_last_u8, box; + /* width of the field to access in the pbuf */ + u64 value_width; - /* startbit is expected to be larger than endbit */ - if (startbit < endbit) + /* startbit is expected to be larger than endbit, and both are + * expected to be within the logically addressable range of the buffer. + */ + if (unlikely(startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen)) /* Invalid function call */ return -EINVAL; value_width = startbit - endbit + 1; - if (value_width > 64) + if (unlikely(value_width > 64)) return -ERANGE; /* Check if "uval" fits in "value_width" bits. * If value_width is 64, the check will fail, but any * 64-bit uval will surely fit. */ - if (op == PACK && value_width < 64 && (*uval >= (1ull << value_width))) + if (unlikely(value_width < 64 && uval >= (1ull << value_width))) /* Cannot store "uval" inside "value_width" bits. * Truncating "uval" is most certainly not desirable, * so simply erroring out is appropriate. */ return -ERANGE; + /* Iterate through an idealistic view of the pbuf as an u64 with + * no quirks, u8 by u8 (aligned at u8 boundaries), from high to low + * logical bit significance. "box" denotes the current logical u8. + */ + plogical_first_u8 = startbit / BITS_PER_BYTE; + plogical_last_u8 = endbit / BITS_PER_BYTE; + + for (box = plogical_first_u8; box >= plogical_last_u8; box--) { + /* Bit indices into the currently accessed 8-bit box */ + size_t box_start_bit, box_end_bit, box_addr; + u8 box_mask; + /* Corresponding bits from the unpacked u64 parameter */ + size_t proj_start_bit, proj_end_bit; + u64 proj_mask; + u64 pval; + + /* This u8 may need to be accessed in its entirety + * (from bit 7 to bit 0), or not, depending on the + * input arguments startbit and endbit. + */ + if (box == plogical_first_u8) + box_start_bit = startbit % BITS_PER_BYTE; + else + box_start_bit = 7; + if (box == plogical_last_u8) + box_end_bit = endbit % BITS_PER_BYTE; + else + box_end_bit = 0; + + /* We have determined the box bit start and end. + * Now we calculate where this (masked) u8 box would fit + * in the unpacked (CPU-readable) u64 - the u8 box's + * projection onto the unpacked u64. Though the + * box is u8, the projection is u64 because it may fall + * anywhere within the unpacked u64. + */ + proj_start_bit = ((box * BITS_PER_BYTE) + box_start_bit) - endbit; + proj_end_bit = ((box * BITS_PER_BYTE) + box_end_bit) - endbit; + proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit); + box_mask = GENMASK(box_start_bit, box_end_bit); + + /* Determine the offset of the u8 box inside the pbuf, + * adjusted for quirks. The adjusted box_addr will be used for + * effective addressing inside the pbuf (so it's not + * logical any longer). + */ + box_addr = calculate_box_addr(box, pbuflen, quirks); + + /* Write to pbuf, read from uval */ + pval = uval & proj_mask; + pval >>= proj_end_bit; + pval <<= box_end_bit; + + if (quirks & QUIRK_MSB_ON_THE_RIGHT) { + pval = bitrev8(pval); + box_mask = bitrev8(box_mask); + } + + ((u8 *)pbuf)[box_addr] &= ~box_mask; + ((u8 *)pbuf)[box_addr] |= pval; + } + return 0; +} +EXPORT_SYMBOL(pack); + +/** + * unpack - Unpack u64 number from packed buffer. + * + * @pbuf: Pointer to a buffer holding the packed value. + * @uval: Pointer to an u64 holding the unpacked value. + * @startbit: The index (in logical notation, compensated for quirks) where + * the packed value starts within pbuf. Must be larger than, or + * equal to, endbit. + * @endbit: The index (in logical notation, compensated for quirks) where + * the packed value ends within pbuf. Must be smaller than, or equal + * to, startbit. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. + * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and + * QUIRK_MSB_ON_THE_RIGHT. + * + * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming + * correct usage, return code may be discarded. The @uval will be + * modified on success. + */ +int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, + size_t pbuflen, u8 quirks) +{ + /* Logical byte indices corresponding to the + * start and end of the field. + */ + int plogical_first_u8, plogical_last_u8, box; + /* width of the field to access in the pbuf */ + u64 value_width; + + /* startbit is expected to be larger than endbit, and both are + * expected to be within the logically addressable range of the buffer. + */ + if (unlikely(startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen)) + /* Invalid function call */ + return -EINVAL; + + value_width = startbit - endbit + 1; + if (unlikely(value_width > 64)) + return -ERANGE; + /* Initialize parameter */ - if (op == UNPACK) - *uval = 0; + *uval = 0; /* Iterate through an idealistic view of the pbuf as an u64 with * no quirks, u8 by u8 (aligned at u8 boundaries), from high to low * logical bit significance. "box" denotes the current logical u8. */ - plogical_first_u8 = startbit / 8; - plogical_last_u8 = endbit / 8; + plogical_first_u8 = startbit / BITS_PER_BYTE; + plogical_last_u8 = endbit / BITS_PER_BYTE; for (box = plogical_first_u8; box >= plogical_last_u8; box--) { /* Bit indices into the currently accessed 8-bit box */ - int box_start_bit, box_end_bit, box_addr; + size_t box_start_bit, box_end_bit, box_addr; u8 box_mask; /* Corresponding bits from the unpacked u64 parameter */ - int proj_start_bit, proj_end_bit; + size_t proj_start_bit, proj_end_bit; u64 proj_mask; + u64 pval; /* This u8 may need to be accessed in its entirety * (from bit 7 to bit 0), or not, depending on the * input arguments startbit and endbit. */ if (box == plogical_first_u8) - box_start_bit = startbit % 8; + box_start_bit = startbit % BITS_PER_BYTE; else box_start_bit = 7; if (box == plogical_last_u8) - box_end_bit = endbit % 8; + box_end_bit = endbit % BITS_PER_BYTE; else box_end_bit = 0; @@ -145,57 +246,72 @@ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, * box is u8, the projection is u64 because it may fall * anywhere within the unpacked u64. */ - proj_start_bit = ((box * 8) + box_start_bit) - endbit; - proj_end_bit = ((box * 8) + box_end_bit) - endbit; + proj_start_bit = ((box * BITS_PER_BYTE) + box_start_bit) - endbit; + proj_end_bit = ((box * BITS_PER_BYTE) + box_end_bit) - endbit; proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit); - box_mask = GENMASK_ULL(box_start_bit, box_end_bit); + box_mask = GENMASK(box_start_bit, box_end_bit); /* Determine the offset of the u8 box inside the pbuf, * adjusted for quirks. The adjusted box_addr will be used for * effective addressing inside the pbuf (so it's not * logical any longer). */ - box_addr = pbuflen - box - 1; - if (quirks & QUIRK_LITTLE_ENDIAN) - box_addr = get_le_offset(box_addr); - if (quirks & QUIRK_LSW32_IS_FIRST) - box_addr = get_reverse_lsw32_offset(box_addr, - pbuflen); - - if (op == UNPACK) { - u64 pval; - - /* Read from pbuf, write to uval */ - pval = ((u8 *)pbuf)[box_addr] & box_mask; - if (quirks & QUIRK_MSB_ON_THE_RIGHT) - adjust_for_msb_right_quirk(&pval, - &box_start_bit, - &box_end_bit, - &box_mask); - - pval >>= box_end_bit; - pval <<= proj_end_bit; - *uval &= ~proj_mask; - *uval |= pval; - } else { - u64 pval; - - /* Write to pbuf, read from uval */ - pval = (*uval) & proj_mask; - pval >>= proj_end_bit; - if (quirks & QUIRK_MSB_ON_THE_RIGHT) - adjust_for_msb_right_quirk(&pval, - &box_start_bit, - &box_end_bit, - &box_mask); - - pval <<= box_end_bit; - ((u8 *)pbuf)[box_addr] &= ~box_mask; - ((u8 *)pbuf)[box_addr] |= pval; - } + box_addr = calculate_box_addr(box, pbuflen, quirks); + + /* Read from pbuf, write to uval */ + pval = ((u8 *)pbuf)[box_addr]; + + if (quirks & QUIRK_MSB_ON_THE_RIGHT) + pval = bitrev8(pval); + + pval &= box_mask; + + pval >>= box_end_bit; + pval <<= proj_end_bit; + *uval &= ~proj_mask; + *uval |= pval; } return 0; } +EXPORT_SYMBOL(unpack); + +/** + * packing - Convert numbers (currently u64) between a packed and an unpacked + * format. Unpacked means laid out in memory in the CPU's native + * understanding of integers, while packed means anything else that + * requires translation. + * + * @pbuf: Pointer to a buffer holding the packed value. + * @uval: Pointer to an u64 holding the unpacked value. + * @startbit: The index (in logical notation, compensated for quirks) where + * the packed value starts within pbuf. Must be larger than, or + * equal to, endbit. + * @endbit: The index (in logical notation, compensated for quirks) where + * the packed value ends within pbuf. Must be smaller than, or equal + * to, startbit. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. + * @op: If PACK, then uval will be treated as const pointer and copied (packed) + * into pbuf, between startbit and endbit. + * If UNPACK, then pbuf will be treated as const pointer and the logical + * value between startbit and endbit will be copied (unpacked) to uval. + * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and + * QUIRK_MSB_ON_THE_RIGHT. + * + * Note: this is deprecated, prefer to use pack() or unpack() in new code. + * + * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming + * correct usage, return code may be discarded. + * If op is PACK, pbuf is modified. + * If op is UNPACK, uval is modified. + */ +int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, + enum packing_op op, u8 quirks) +{ + if (op == PACK) + return pack(pbuf, *uval, startbit, endbit, pbuflen, quirks); + + return unpack(pbuf, uval, startbit, endbit, pbuflen, quirks); +} EXPORT_SYMBOL(packing); MODULE_DESCRIPTION("Generic bitfield packing and unpacking"); diff --git a/lib/packing_test.c b/lib/packing_test.c new file mode 100644 index 000000000000..b38ea43c03fd --- /dev/null +++ b/lib/packing_test.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Vladimir Oltean <olteanv@gmail.com> + * Copyright (c) 2024, Intel Corporation. + */ +#include <kunit/test.h> +#include <linux/packing.h> + +struct packing_test_case { + const char *desc; + const u8 *pbuf; + size_t pbuf_size; + u64 uval; + size_t start_bit; + size_t end_bit; + u8 quirks; +}; + +#define NO_QUIRKS 0 + +/** + * PBUF - Initialize .pbuf and .pbuf_size + * @array: elements of constant physical buffer + * + * Initializes the .pbuf and .pbuf_size fields of a struct packing_test_case + * with a constant array of the specified elements. + */ +#define PBUF(array...) \ + .pbuf = (const u8[]){ array }, \ + .pbuf_size = sizeof((const u8 []){ array }) + +static const struct packing_test_case cases[] = { + /* These tests pack and unpack a magic 64-bit value + * (0xcafedeadbeefcafe) at a fixed logical offset (32) within an + * otherwise zero array of 128 bits (16 bytes). They test all possible + * bit layouts of the 128 bit buffer. + */ + { + .desc = "no quirks, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad, + 0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = NO_QUIRKS, + }, + { + .desc = "lsw32 first, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xbe, 0xef, 0xca, 0xfe, + 0xca, 0xfe, 0xde, 0xad, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LSW32_IS_FIRST, + }, + { + .desc = "little endian words, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xad, 0xde, 0xfe, 0xca, + 0xfe, 0xca, 0xef, 0xbe, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "lsw32 first + little endian words, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe, + 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "msb right, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x53, 0x7f, 0x7b, 0xb5, + 0x7d, 0xf7, 0x53, 0x7f, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_MSB_ON_THE_RIGHT, + }, + { + .desc = "msb right + lsw32 first, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x7d, 0xf7, 0x53, 0x7f, + 0x53, 0x7f, 0x7b, 0xb5, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST, + }, + { + .desc = "msb right + little endian words, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xb5, 0x7b, 0x7f, 0x53, + 0x7f, 0x53, 0xf7, 0x7d, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "msb right + lsw32 first + little endian words, 16 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x7f, 0x53, 0xf7, 0x7d, + 0xb5, 0x7b, 0x7f, 0x53, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + /* These tests pack and unpack a magic 64-bit value + * (0xcafedeadbeefcafe) at a fixed logical offset (32) within an + * otherwise zero array of varying size from 18 bytes to 24 bytes. + */ + { + .desc = "no quirks, 18 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0xfe, + 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00, + 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = NO_QUIRKS, + }, + { + .desc = "no quirks, 19 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, + 0xfe, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0x00, + 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = NO_QUIRKS, + }, + { + .desc = "no quirks, 20 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xca, 0xfe, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, + 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = NO_QUIRKS, + }, + { + .desc = "no quirks, 22 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad, 0xbe, 0xef, + 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = NO_QUIRKS, + }, + { + .desc = "no quirks, 24 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad, + 0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = NO_QUIRKS, + }, + { + .desc = "lsw32 first + little endian words, 18 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe, + 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "lsw32 first + little endian words, 19 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe, + 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "lsw32 first + little endian words, 20 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe, + 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "lsw32 first + little endian words, 22 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe, + 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "lsw32 first + little endian words, 24 bytes", + PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe, + 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00), + .uval = 0xcafedeadbeefcafe, + .start_bit = 95, + .end_bit = 32, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + /* These tests pack and unpack a magic 64-bit value + * (0x1122334455667788) at an odd starting bit (43) within an + * otherwise zero array of 128 bits (16 bytes). They test all possible + * bit layouts of the 128 bit buffer. + */ + { + .desc = "no quirks, 16 bytes, non-aligned", + PBUF(0x00, 0x00, 0x00, 0x89, 0x11, 0x9a, 0x22, 0xab, + 0x33, 0xbc, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = NO_QUIRKS, + }, + { + .desc = "lsw32 first, 16 bytes, non-aligned", + PBUF(0x00, 0x00, 0x00, 0x00, 0x33, 0xbc, 0x40, 0x00, + 0x11, 0x9a, 0x22, 0xab, 0x00, 0x00, 0x00, 0x89), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_LSW32_IS_FIRST, + }, + { + .desc = "little endian words, 16 bytes, non-aligned", + PBUF(0x89, 0x00, 0x00, 0x00, 0xab, 0x22, 0x9a, 0x11, + 0x00, 0x40, 0xbc, 0x33, 0x00, 0x00, 0x00, 0x00), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "lsw32 first + little endian words, 16 bytes, non-aligned", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xbc, 0x33, + 0xab, 0x22, 0x9a, 0x11, 0x89, 0x00, 0x00, 0x00), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "msb right, 16 bytes, non-aligned", + PBUF(0x00, 0x00, 0x00, 0x91, 0x88, 0x59, 0x44, 0xd5, + 0xcc, 0x3d, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT, + }, + { + .desc = "msb right + lsw32 first, 16 bytes, non-aligned", + PBUF(0x00, 0x00, 0x00, 0x00, 0xcc, 0x3d, 0x02, 0x00, + 0x88, 0x59, 0x44, 0xd5, 0x00, 0x00, 0x00, 0x91), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST, + }, + { + .desc = "msb right + little endian words, 16 bytes, non-aligned", + PBUF(0x91, 0x00, 0x00, 0x00, 0xd5, 0x44, 0x59, 0x88, + 0x00, 0x02, 0x3d, 0xcc, 0x00, 0x00, 0x00, 0x00), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "msb right + lsw32 first + little endian words, 16 bytes, non-aligned", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x3d, 0xcc, + 0xd5, 0x44, 0x59, 0x88, 0x91, 0x00, 0x00, 0x00), + .uval = 0x1122334455667788, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + /* These tests pack and unpack a u64 with all bits set + * (0xffffffffffffffff) at an odd starting bit (43) within an + * otherwise zero array of 128 bits (16 bytes). They test all possible + * bit layouts of the 128 bit buffer. + */ + { + .desc = "no quirks, 16 bytes, non-aligned, 0xff", + PBUF(0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = NO_QUIRKS, + }, + { + .desc = "lsw32 first, 16 bytes, non-aligned, 0xff", + PBUF(0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x07, 0xff), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_LSW32_IS_FIRST, + }, + { + .desc = "little endian words, 16 bytes, non-aligned, 0xff", + PBUF(0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0xf8, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "lsw32 first + little endian words, 16 bytes, non-aligned, 0xff", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x00, 0x00), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "msb right, 16 bytes, non-aligned, 0xff", + PBUF(0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT, + }, + { + .desc = "msb right + lsw32 first, 16 bytes, non-aligned, 0xff", + PBUF(0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xe0, 0xff), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST, + }, + { + .desc = "msb right + little endian words, 16 bytes, non-aligned, 0xff", + PBUF(0xff, 0xe0, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x1f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN, + }, + { + .desc = "msb right + lsw32 first + little endian words, 16 bytes, non-aligned, 0xff", + PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x00, 0x00), + .uval = 0xffffffffffffffff, + .start_bit = 106, + .end_bit = 43, + .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(packing, cases, desc); + +static void packing_test_pack(struct kunit *test) +{ + const struct packing_test_case *params = test->param_value; + u8 *pbuf; + int err; + + pbuf = kunit_kzalloc(test, params->pbuf_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, pbuf); + + err = pack(pbuf, params->uval, params->start_bit, params->end_bit, + params->pbuf_size, params->quirks); + + KUNIT_EXPECT_EQ_MSG(test, err, 0, "pack() returned %pe\n", ERR_PTR(err)); + KUNIT_EXPECT_MEMEQ(test, pbuf, params->pbuf, params->pbuf_size); +} + +static void packing_test_unpack(struct kunit *test) +{ + const struct packing_test_case *params = test->param_value; + u64 uval; + int err; + + err = unpack(params->pbuf, &uval, params->start_bit, params->end_bit, + params->pbuf_size, params->quirks); + KUNIT_EXPECT_EQ_MSG(test, err, 0, "unpack() returned %pe\n", ERR_PTR(err)); + KUNIT_EXPECT_EQ(test, uval, params->uval); +} + +static struct kunit_case packing_test_cases[] = { + KUNIT_CASE_PARAM(packing_test_pack, packing_gen_params), + KUNIT_CASE_PARAM(packing_test_unpack, packing_gen_params), + {}, +}; + +static struct kunit_suite packing_test_suite = { + .name = "packing", + .test_cases = packing_test_cases, +}; + +kunit_test_suite(packing_test_suite); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("KUnit tests for packing library"); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 458040e8a0e0..91d134961357 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -725,7 +725,7 @@ static void vlan_dev_poll_controller(struct net_device *dev) return; } -static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) +static int vlan_dev_netpoll_setup(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; diff --git a/net/Kconfig b/net/Kconfig index a629f92dc86b..c3fca69a7c83 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -72,6 +72,9 @@ config NET_DEVMEM depends on GENERIC_ALLOCATOR depends on PAGE_POOL +config NET_SHAPER + bool + menu "Networking options" source "net/packet/Kconfig" diff --git a/net/Kconfig.debug b/net/Kconfig.debug index 5e3fffe707dd..277fab8c4d77 100644 --- a/net/Kconfig.debug +++ b/net/Kconfig.debug @@ -24,3 +24,18 @@ config DEBUG_NET help Enable extra sanity checks in networking. This is mostly used by fuzzers, but is safe to select. + +config DEBUG_NET_SMALL_RTNL + bool "Add extra per-netns mutex inside RTNL" + depends on DEBUG_KERNEL && NET && LOCK_DEBUGGING_SUPPORT + select PROVE_LOCKING + default n + help + rtnl_lock() is being replaced with rtnl_net_lock() that + acquires the global RTNL and a small per-netns RTNL mutex. + + During the conversion, rtnl_net_lock() just adds an extra + mutex in every RTNL scope and slows down the operations. + + Once the conversion completes, rtnl_lock() will be removed + and rtnetlink will gain per-netns scalability. diff --git a/net/Makefile b/net/Makefile index 65bb8c72a35e..60ed5190eda8 100644 --- a/net/Makefile +++ b/net/Makefile @@ -79,3 +79,4 @@ obj-$(CONFIG_XDP_SOCKETS) += xdp/ obj-$(CONFIG_MPTCP) += mptcp/ obj-$(CONFIG_MCTP) += mctp/ obj-$(CONFIG_NET_HANDSHAKE) += handshake/ +obj-$(CONFIG_NET_SHAPER) += shaper/ diff --git a/net/appletalk/Makefile b/net/appletalk/Makefile index 33164d972d37..152312a15180 100644 --- a/net/appletalk/Makefile +++ b/net/appletalk/Makefile @@ -5,6 +5,6 @@ obj-$(CONFIG_ATALK) += appletalk.o -appletalk-y := aarp.o ddp.o dev.o +appletalk-y := aarp.o ddp.o appletalk-$(CONFIG_PROC_FS) += atalk_proc.o appletalk-$(CONFIG_SYSCTL) += sysctl_net_atalk.o diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c deleted file mode 100644 index 284c8e585533..000000000000 --- a/net/appletalk/dev.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Moved here from drivers/net/net_init.c, which is: - * Written 1993,1994,1995 by Donald Becker. - */ - -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/if_arp.h> -#include <linux/if_ltalk.h> - -static void ltalk_setup(struct net_device *dev) -{ - /* Fill in the fields of the device structure with localtalk-generic values. */ - - dev->type = ARPHRD_LOCALTLK; - dev->hard_header_len = LTALK_HLEN; - dev->mtu = LTALK_MTU; - dev->addr_len = LTALK_ALEN; - dev->tx_queue_len = 10; - - dev->broadcast[0] = 0xFF; - - dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP; -} - -/** - * alloc_ltalkdev - Allocates and sets up an localtalk device - * @sizeof_priv: Size of additional driver-private structure to be allocated - * for this localtalk device - * - * Fill in the fields of the device structure with localtalk-generic - * values. Basically does everything except registering the device. - * - * Constructs a new net device, complete with a private data area of - * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for - * this private data area. - */ - -struct net_device *alloc_ltalkdev(int sizeof_priv) -{ - return alloc_netdev(sizeof_priv, "lt%d", NET_NAME_UNKNOWN, - ltalk_setup); -} -EXPORT_SYMBOL(alloc_ltalkdev); diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 74b49c35ddc1..07ae5dd1f150 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -36,6 +36,7 @@ #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/types.h> #include <linux/workqueue.h> #include <net/genetlink.h> @@ -371,8 +372,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, batadv_ogm_packet->orig, ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, batadv_ogm_packet->ttl, - ((batadv_ogm_packet->flags & BATADV_DIRECTLINK) ? - "on" : "off"), + str_on_off(batadv_ogm_packet->flags & BATADV_DIRECTLINK), hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 5f46ca3d4bb8..449faf5a5487 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -33,6 +33,7 @@ #include <linux/sprintf.h> #include <linux/stddef.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/workqueue.h> #include <net/arp.h> #include <net/genetlink.h> @@ -1946,16 +1947,15 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, claim = batadv_claim_hash_find(bat_priv, &search_claim); if (!claim) { + bool local = batadv_is_my_client(bat_priv, ethhdr->h_source, vid); + /* possible optimization: race for a claim */ /* No claim exists yet, claim it for us! */ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n", - __func__, ethhdr->h_source, - batadv_is_my_client(bat_priv, - ethhdr->h_source, vid) ? - "yes" : "no"); + __func__, ethhdr->h_source, str_yes_no(local)); batadv_handle_claim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 3d4c36ae2e1a..97ea71a052f8 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -13,7 +13,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2024.2" +#define BATADV_SOURCE_VERSION "2024.3" #endif /* B.A.T.M.A.N. parameters */ diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 2243cec18ecc..b44c382226a1 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -28,6 +28,7 @@ #include <linux/net.h> #include <linux/netdevice.h> #include <linux/netlink.h> +#include <linux/overflow.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> @@ -209,20 +210,6 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr, } /** - * batadv_tt_local_entry_free_rcu() - free the tt_local_entry - * @rcu: rcu pointer of the tt_local_entry - */ -static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu) -{ - struct batadv_tt_local_entry *tt_local_entry; - - tt_local_entry = container_of(rcu, struct batadv_tt_local_entry, - common.rcu); - - kmem_cache_free(batadv_tl_cache, tt_local_entry); -} - -/** * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue * for free after rcu grace period * @ref: kref pointer of the nc_node @@ -236,7 +223,7 @@ static void batadv_tt_local_entry_release(struct kref *ref) batadv_softif_vlan_put(tt_local_entry->vlan); - call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu); + kfree_rcu(tt_local_entry, common.rcu); } /** @@ -255,20 +242,6 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry) } /** - * batadv_tt_global_entry_free_rcu() - free the tt_global_entry - * @rcu: rcu pointer of the tt_global_entry - */ -static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) -{ - struct batadv_tt_global_entry *tt_global_entry; - - tt_global_entry = container_of(rcu, struct batadv_tt_global_entry, - common.rcu); - - kmem_cache_free(batadv_tg_cache, tt_global_entry); -} - -/** * batadv_tt_global_entry_release() - release tt_global_entry from lists and * queue for free after rcu grace period * @ref: kref pointer of the nc_node @@ -282,7 +255,7 @@ void batadv_tt_global_entry_release(struct kref *ref) batadv_tt_global_del_orig_list(tt_global_entry); - call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu); + kfree_rcu(tt_global_entry, common.rcu); } /** @@ -408,19 +381,6 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node, } /** - * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry - * @rcu: rcu pointer of the orig_entry - */ -static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) -{ - struct batadv_tt_orig_list_entry *orig_entry; - - orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); - - kmem_cache_free(batadv_tt_orig_cache, orig_entry); -} - -/** * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and * queue for free after rcu grace period * @ref: kref pointer of the tt orig entry @@ -433,7 +393,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref) refcount); batadv_orig_node_put(orig_entry->orig_node); - call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); + kfree_rcu(orig_entry, rcu); } /** @@ -856,8 +816,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, num_entries += atomic_read(&vlan->tt.num_entries); } - change_offset = sizeof(**tt_data); - change_offset += num_vlan * sizeof(*tt_vlan); + change_offset = struct_size(*tt_data, vlan_data, num_vlan); /* if tt_len is negative, allocate the space needed by the full table */ if (*tt_len < 0) @@ -876,7 +835,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, (*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn); (*tt_data)->num_vlan = htons(num_vlan); - tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); + tt_vlan = (*tt_data)->vlan_data; hlist_for_each_entry(vlan, &orig_node->vlan_list, list) { tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); @@ -936,8 +895,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, total_entries += vlan_entries; } - change_offset = sizeof(**tt_data); - change_offset += num_vlan * sizeof(*tt_vlan); + change_offset = struct_size(*tt_data, vlan_data, num_vlan); /* if tt_len is negative, allocate the space needed by the full table */ if (*tt_len < 0) @@ -956,7 +914,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, (*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn); (*tt_data)->num_vlan = htons(num_vlan); - tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); + tt_vlan = (*tt_data)->vlan_data; hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) { vlan_entries = atomic_read(&vlan->tt.num_entries); if (vlan_entries < 1) @@ -2916,7 +2874,6 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv, { struct batadv_tvlv_tt_data *tvlv_tt_data = NULL; struct batadv_tt_req_node *tt_req_node = NULL; - struct batadv_tvlv_tt_vlan_data *tt_vlan_req; struct batadv_hard_iface *primary_if; bool ret = false; int i, size; @@ -2932,7 +2889,7 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv, if (!tt_req_node) goto out; - size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan; + size = struct_size(tvlv_tt_data, vlan_data, num_vlan); tvlv_tt_data = kzalloc(size, GFP_ATOMIC); if (!tvlv_tt_data) goto out; @@ -2944,12 +2901,10 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv, /* send all the CRCs within the request. This is needed by intermediate * nodes to ensure they have the correct table before replying */ - tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1); for (i = 0; i < num_vlan; i++) { - tt_vlan_req->vid = tt_vlan->vid; - tt_vlan_req->crc = tt_vlan->crc; + tvlv_tt_data->vlan_data[i].vid = tt_vlan->vid; + tvlv_tt_data->vlan_data[i].crc = tt_vlan->crc; - tt_vlan_req++; tt_vlan++; } @@ -3001,7 +2956,6 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv, struct batadv_orig_node *res_dst_orig_node = NULL; struct batadv_tvlv_tt_change *tt_change; struct batadv_tvlv_tt_data *tvlv_tt_data = NULL; - struct batadv_tvlv_tt_vlan_data *tt_vlan; bool ret = false, full_table; u8 orig_ttvn, req_ttvn; u16 tvlv_len; @@ -3024,10 +2978,9 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv, orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn); req_ttvn = tt_data->ttvn; - tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1); /* this node doesn't have the requested data */ if (orig_ttvn != req_ttvn || - !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan, + !batadv_tt_global_check_crc(req_dst_orig_node, tt_data->vlan_data, ntohs(tt_data->num_vlan))) goto out; @@ -3370,7 +3323,6 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node = NULL; struct batadv_tvlv_tt_change *tt_change; u8 *tvlv_ptr = (u8 *)tt_data; - u16 change_offset; batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n", @@ -3383,10 +3335,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv, spin_lock_bh(&orig_node->tt_lock); - change_offset = sizeof(struct batadv_tvlv_tt_vlan_data); - change_offset *= ntohs(tt_data->num_vlan); - change_offset += sizeof(*tt_data); - tvlv_ptr += change_offset; + tvlv_ptr += struct_size(tt_data, vlan_data, ntohs(tt_data->num_vlan)); tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr; if (tt_data->flags & BATADV_TT_FULL_TABLE) { @@ -3985,10 +3934,10 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, u8 flags, void *tvlv_value, u16 tvlv_value_len) { - struct batadv_tvlv_tt_vlan_data *tt_vlan; struct batadv_tvlv_tt_change *tt_change; struct batadv_tvlv_tt_data *tt_data; u16 num_entries, num_vlan; + size_t flex_size; if (tvlv_value_len < sizeof(*tt_data)) return; @@ -3998,17 +3947,18 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, num_vlan = ntohs(tt_data->num_vlan); - if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan) + flex_size = flex_array_size(tt_data, vlan_data, num_vlan); + if (tvlv_value_len < flex_size) return; - tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1); - tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan); - tvlv_value_len -= sizeof(*tt_vlan) * num_vlan; + tt_change = (struct batadv_tvlv_tt_change *)((void *)tt_data + + flex_size); + tvlv_value_len -= flex_size; num_entries = batadv_tt_entries(tvlv_value_len); - batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change, - num_entries, tt_data->ttvn); + batadv_tt_update_orig(bat_priv, orig, tt_data->vlan_data, num_vlan, + tt_change, num_entries, tt_data->ttvn); } /** @@ -4039,8 +3989,8 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv, tt_data = tvlv_value; tvlv_value_len -= sizeof(*tt_data); - tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data); - tt_vlan_len *= ntohs(tt_data->num_vlan); + tt_vlan_len = flex_array_size(tt_data, vlan_data, + ntohs(tt_data->num_vlan)); if (tvlv_value_len < tt_vlan_len) return NET_RX_SUCCESS; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index ba437c6f6ee5..18e89e764f3b 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1886,6 +1886,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, chan = l2cap_chan_create(); if (!chan) { sk_free(sk); + sock->sk = NULL; return NULL; } diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index f48250e3f2e1..355e1a1698f5 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -274,13 +274,13 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, struct rfcomm_dlc *d; struct sock *sk; - sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); - if (!sk) + d = rfcomm_dlc_alloc(prio); + if (!d) return NULL; - d = rfcomm_dlc_alloc(prio); - if (!d) { - sk_free(sk); + sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); + if (!sk) { + rfcomm_dlc_free(d); return NULL; } diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 26b79feb385d..0ab4613aa07a 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -328,7 +328,7 @@ int br_netpoll_enable(struct net_bridge_port *p) return __br_netpoll_enable(p); } -static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int br_netpoll_setup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p; diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 642b8ccaae8e..1cd7bade9b3b 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -73,13 +73,6 @@ static inline int has_expired(const struct net_bridge *br, time_before_eq(fdb->updated + hold_time(br), jiffies); } -static void fdb_rcu_free(struct rcu_head *head) -{ - struct net_bridge_fdb_entry *ent - = container_of(head, struct net_bridge_fdb_entry, rcu); - kmem_cache_free(br_fdb_cache, ent); -} - static int fdb_to_nud(const struct net_bridge *br, const struct net_bridge_fdb_entry *fdb) { @@ -329,7 +322,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f, if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags)) atomic_dec(&br->fdb_n_learned); fdb_notify(br, f, RTM_DELNEIGH, swdev_notify); - call_rcu(&f->rcu, fdb_rcu_free); + kfree_rcu(f, rcu); } /* Delete a local entry if no other port had the same address. diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 1d458e9da660..17a5f5923d61 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -370,9 +370,9 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb, */ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net_device *dev = skb->dev, *br_indev; - struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + struct net_device *dev = skb->dev, *br_indev; + const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; int err; @@ -390,7 +390,9 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ } nf_bridge->in_prerouting = 0; if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) { - if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { + err = ip_route_input(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev); + if (err) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 6b97ae47f855..3e0f47203f2a 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1924,7 +1924,9 @@ int __init br_netlink_init(void) if (err) goto out; - rtnl_af_register(&br_af_ops); + err = rtnl_af_register(&br_af_ops); + if (err) + goto out_vlan; err = rtnl_link_register(&br_link_ops); if (err) @@ -1934,6 +1936,8 @@ int __init br_netlink_init(void) out_af: rtnl_af_unregister(&br_af_ops); +out_vlan: + br_vlan_rtnl_uninit(); out: return err; } diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index 9cef9496a707..171fa32ada85 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -183,12 +183,6 @@ bool cfsrvl_ready(struct cfsrvl *service, int *err) return true; } -u8 cfsrvl_getphyid(struct cflayer *layer) -{ - struct cfsrvl *servl = container_obj(layer); - return servl->dev_info.id; -} - bool cfsrvl_phyid_match(struct cflayer *layer, int phyid) { struct cfsrvl *servl = container_obj(layer); diff --git a/net/can/af_can.c b/net/can/af_can.c index 707576eeeb58..01f3fbb3b67d 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -171,6 +171,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol, /* release sk on errors */ sock_orphan(sk); sock_put(sk); + sock->sk = NULL; } errout: diff --git a/net/can/gw.c b/net/can/gw.c index 37528826935e..ef93293c1fae 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -1265,6 +1265,15 @@ static struct pernet_operations cangw_pernet_ops = { .exit_batch = cangw_pernet_exit_batch, }; +static const struct rtnl_msg_handler cgw_rtnl_msg_handlers[] __initconst_or_module = { + {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_NEWROUTE, + .doit = cgw_create_job}, + {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_DELROUTE, + .doit = cgw_remove_job}, + {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_GETROUTE, + .dumpit = cgw_dump_jobs}, +}; + static __init int cgw_module_init(void) { int ret; @@ -1290,27 +1299,13 @@ static __init int cgw_module_init(void) if (ret) goto out_register_notifier; - ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE, - NULL, cgw_dump_jobs, 0); - if (ret) - goto out_rtnl_register1; - - ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE, - cgw_create_job, NULL, 0); - if (ret) - goto out_rtnl_register2; - ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE, - cgw_remove_job, NULL, 0); + ret = rtnl_register_many(cgw_rtnl_msg_handlers); if (ret) - goto out_rtnl_register3; + goto out_rtnl_register; return 0; -out_rtnl_register3: - rtnl_unregister(PF_CAN, RTM_NEWROUTE); -out_rtnl_register2: - rtnl_unregister(PF_CAN, RTM_GETROUTE); -out_rtnl_register1: +out_rtnl_register: unregister_netdevice_notifier(¬ifier); out_register_notifier: kmem_cache_destroy(cgw_cache); diff --git a/net/can/raw.c b/net/can/raw.c index 00533f64d69d..255c0a8f39d6 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -966,7 +966,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) skb->mark = READ_ONCE(sk->sk_mark); skb->tstamp = sockc.transmit_time; - skb_setup_tx_timestamp(skb, sockc.tsflags); + skb_setup_tx_timestamp(skb, &sockc); err = can_send(skb, ro->loopback); diff --git a/net/core/Makefile b/net/core/Makefile index c3ebbaf9c81e..5a72a87ee0f1 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -45,3 +45,4 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o obj-$(CONFIG_OF) += of_net.o obj-$(CONFIG_NET_TEST) += net_test.o obj-$(CONFIG_NET_DEVMEM) += devmem.o +obj-$(CONFIG_DEBUG_NET_SMALL_RTNL) += rtnl_net_debug.o diff --git a/net/core/dev.c b/net/core/dev.c index ea5fbcd133ae..c682173a7642 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2949,6 +2949,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) if (dev->num_tc) netif_setup_tc(dev, txq); + net_shaper_set_real_num_tx_queues(dev, txq); + dev_qdisc_change_real_num_tx(dev, txq); dev->real_num_tx_queues = txq; @@ -6230,12 +6232,12 @@ bool napi_complete_done(struct napi_struct *n, int work_done) if (work_done) { if (n->gro_bitmask) - timeout = READ_ONCE(n->dev->gro_flush_timeout); - n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); + timeout = napi_get_gro_flush_timeout(n); + n->defer_hard_irqs_count = napi_get_defer_hard_irqs(n); } if (n->defer_hard_irqs_count > 0) { n->defer_hard_irqs_count--; - timeout = READ_ONCE(n->dev->gro_flush_timeout); + timeout = napi_get_gro_flush_timeout(n); if (timeout) ret = false; } @@ -6369,8 +6371,8 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); if (flags & NAPI_F_PREFER_BUSY_POLL) { - napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); - timeout = READ_ONCE(napi->dev->gro_flush_timeout); + napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi); + timeout = napi_get_gro_flush_timeout(napi); if (napi->defer_hard_irqs_count && timeout) { hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); skip_schedule = true; @@ -6503,6 +6505,23 @@ EXPORT_SYMBOL(napi_busy_loop); #endif /* CONFIG_NET_RX_BUSY_POLL */ +static void __napi_hash_add_with_id(struct napi_struct *napi, + unsigned int napi_id) +{ + napi->napi_id = napi_id; + hlist_add_head_rcu(&napi->napi_hash_node, + &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); +} + +static void napi_hash_add_with_id(struct napi_struct *napi, + unsigned int napi_id) +{ + spin_lock(&napi_hash_lock); + WARN_ON_ONCE(napi_by_id(napi_id)); + __napi_hash_add_with_id(napi, napi_id); + spin_unlock(&napi_hash_lock); +} + static void napi_hash_add(struct napi_struct *napi) { if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) @@ -6515,10 +6534,8 @@ static void napi_hash_add(struct napi_struct *napi) if (unlikely(++napi_gen_id < MIN_NAPI_ID)) napi_gen_id = MIN_NAPI_ID; } while (napi_by_id(napi_gen_id)); - napi->napi_id = napi_gen_id; - hlist_add_head_rcu(&napi->napi_hash_node, - &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); + __napi_hash_add_with_id(napi, napi_gen_id); spin_unlock(&napi_hash_lock); } @@ -6641,6 +6658,28 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, } EXPORT_SYMBOL(netif_queue_set_napi); +static void napi_restore_config(struct napi_struct *n) +{ + n->defer_hard_irqs = n->config->defer_hard_irqs; + n->gro_flush_timeout = n->config->gro_flush_timeout; + /* a NAPI ID might be stored in the config, if so use it. if not, use + * napi_hash_add to generate one for us. It will be saved to the config + * in napi_disable. + */ + if (n->config->napi_id) + napi_hash_add_with_id(n, n->config->napi_id); + else + napi_hash_add(n); +} + +static void napi_save_config(struct napi_struct *n) +{ + n->config->defer_hard_irqs = n->defer_hard_irqs; + n->config->gro_flush_timeout = n->gro_flush_timeout; + n->config->napi_id = n->napi_id; + napi_hash_del(n); +} + void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { @@ -6668,7 +6707,13 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, set_bit(NAPI_STATE_SCHED, &napi->state); set_bit(NAPI_STATE_NPSVC, &napi->state); list_add_rcu(&napi->dev_list, &dev->napi_list); - napi_hash_add(napi); + + /* default settings from sysfs are applied to all NAPIs. any per-NAPI + * configuration will be loaded in napi_enable + */ + napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs)); + napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout)); + napi_get_frags_check(napi); /* Create kthread for this napi if dev->threaded is set. * Clear dev->threaded if kthread creation failed so that @@ -6700,6 +6745,11 @@ void napi_disable(struct napi_struct *n) hrtimer_cancel(&n->timer); + if (n->config) + napi_save_config(n); + else + napi_hash_del(n); + clear_bit(NAPI_STATE_DISABLE, &n->state); } EXPORT_SYMBOL(napi_disable); @@ -6715,6 +6765,11 @@ void napi_enable(struct napi_struct *n) { unsigned long new, val = READ_ONCE(n->state); + if (n->config) + napi_restore_config(n); + else + napi_hash_add(n); + do { BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); @@ -6744,7 +6799,11 @@ void __netif_napi_del(struct napi_struct *napi) if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) return; - napi_hash_del(napi); + if (napi->config) { + napi->index = -1; + napi->config = NULL; + } + list_del_rcu(&napi->dev_list); napi_free_frags(napi); @@ -11056,8 +11115,8 @@ void netdev_sw_irq_coalesce_default_on(struct net_device *dev) WARN_ON(dev->reg_state == NETREG_REGISTERED); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { - dev->gro_flush_timeout = 20000; - dev->napi_defer_hard_irqs = 1; + netdev_set_gro_flush_timeout(dev, 20000); + netdev_set_defer_hard_irqs(dev, 1); } } EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); @@ -11081,6 +11140,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned int txqs, unsigned int rxqs) { struct net_device *dev; + size_t napi_config_sz; + unsigned int maxqs; BUG_ON(strlen(name) >= sizeof(dev->name)); @@ -11094,6 +11155,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, return NULL; } + maxqs = max(txqs, rxqs); + dev = kvzalloc(struct_size(dev, priv, sizeof_priv), GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); if (!dev) @@ -11147,6 +11210,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, hash_init(dev->qdisc_hash); #endif + mutex_init(&dev->lock); + dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); @@ -11168,6 +11233,11 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, if (!dev->ethtool) goto free_all; + napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config)); + dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT); + if (!dev->napi_config) + goto free_all; + strscpy(dev->name, name); dev->name_assign_type = name_assign_type; dev->group = INIT_NETDEV_GROUP; @@ -11217,6 +11287,8 @@ void free_netdev(struct net_device *dev) return; } + mutex_destroy(&dev->lock); + kfree(dev->ethtool); netif_free_tx_queues(dev); netif_free_rx_queues(dev); @@ -11229,6 +11301,8 @@ void free_netdev(struct net_device *dev) list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) netif_napi_del(p); + kvfree(dev->napi_config); + ref_tracker_dir_exit(&dev->refcnt_tracker); #ifdef CONFIG_PCPU_DEV_REFCNT free_percpu(dev->pcpu_refcnt); @@ -11426,6 +11500,8 @@ void unregister_netdevice_many_notify(struct list_head *head, mutex_destroy(&dev->ethtool->rss_lock); + net_shaper_flush_netdev(dev); + if (skb) rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); @@ -11994,8 +12070,6 @@ static void __init net_dev_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx); - CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_flush_timeout); - CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, napi_defer_hard_irqs); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler); @@ -12007,7 +12081,7 @@ static void __init net_dev_struct_check(void) #ifdef CONFIG_NET_XGRESS CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress); #endif - CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 104); + CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 92); } /* diff --git a/net/core/dev.h b/net/core/dev.h index 5654325c5b71..7881bced70a9 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -35,6 +35,16 @@ void dev_addr_flush(struct net_device *dev); int dev_addr_init(struct net_device *dev); void dev_addr_check(struct net_device *dev); +#if IS_ENABLED(CONFIG_NET_SHAPER) +void net_shaper_flush_netdev(struct net_device *dev); +void net_shaper_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq); +#else +static inline void net_shaper_flush_netdev(struct net_device *dev) {} +static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) {} +#endif + /* sysctls not referred to from outside net/core/ */ extern int netdev_unregister_timeout_secs; extern int weight_p; @@ -138,6 +148,94 @@ static inline void netif_set_gro_ipv4_max_size(struct net_device *dev, WRITE_ONCE(dev->gro_ipv4_max_size, size); } +/** + * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs + * @n: napi struct to get the defer_hard_irqs field from + * + * Return: the per-NAPI value of the defar_hard_irqs field. + */ +static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n) +{ + return READ_ONCE(n->defer_hard_irqs); +} + +/** + * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi + * @n: napi_struct to set the defer_hard_irqs field + * @defer: the value the field should be set to + */ +static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer) +{ + WRITE_ONCE(n->defer_hard_irqs, defer); +} + +/** + * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev + * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set + * @defer: the defer_hard_irqs value to set + */ +static inline void netdev_set_defer_hard_irqs(struct net_device *netdev, + u32 defer) +{ + unsigned int count = max(netdev->num_rx_queues, + netdev->num_tx_queues); + struct napi_struct *napi; + int i; + + WRITE_ONCE(netdev->napi_defer_hard_irqs, defer); + list_for_each_entry(napi, &netdev->napi_list, dev_list) + napi_set_defer_hard_irqs(napi, defer); + + for (i = 0; i < count; i++) + netdev->napi_config[i].defer_hard_irqs = defer; +} + +/** + * napi_get_gro_flush_timeout - get the gro_flush_timeout + * @n: napi struct to get the gro_flush_timeout from + * + * Return: the per-NAPI value of the gro_flush_timeout field. + */ +static inline unsigned long +napi_get_gro_flush_timeout(const struct napi_struct *n) +{ + return READ_ONCE(n->gro_flush_timeout); +} + +/** + * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi + * @n: napi struct to set the gro_flush_timeout + * @timeout: timeout value to set + * + * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout + */ +static inline void napi_set_gro_flush_timeout(struct napi_struct *n, + unsigned long timeout) +{ + WRITE_ONCE(n->gro_flush_timeout, timeout); +} + +/** + * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs + * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set + * @timeout: the timeout value to set + */ +static inline void netdev_set_gro_flush_timeout(struct net_device *netdev, + unsigned long timeout) +{ + unsigned int count = max(netdev->num_rx_queues, + netdev->num_tx_queues); + struct napi_struct *napi; + int i; + + WRITE_ONCE(netdev->gro_flush_timeout, timeout); + list_for_each_entry(napi, &netdev->napi_list, dev_list) + napi_set_gro_flush_timeout(napi, timeout); + + for (i = 0; i < count; i++) + netdev->napi_config[i].gro_flush_timeout = timeout; +} + int rps_cpumask_housekeeping(struct cpumask *mask); #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) diff --git a/net/core/fib_notifier.c b/net/core/fib_notifier.c index fc96259807b6..5cdca49b1d7c 100644 --- a/net/core/fib_notifier.c +++ b/net/core/fib_notifier.c @@ -43,7 +43,6 @@ static unsigned int fib_seq_sum(struct net *net) struct fib_notifier_ops *ops; unsigned int fib_seq = 0; - rtnl_lock(); rcu_read_lock(); list_for_each_entry_rcu(ops, &fn_net->fib_notifier_ops, list) { if (!try_module_get(ops->owner)) @@ -52,7 +51,6 @@ static unsigned int fib_seq_sum(struct net *net) module_put(ops->owner); } rcu_read_unlock(); - rtnl_unlock(); return fib_seq; } diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 154a2681f55c..d0de9677f450 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -101,7 +101,8 @@ static void notify_rule_change(int event, struct fib_rule *rule, struct fib_rules_ops *ops, struct nlmsghdr *nlh, u32 pid); -static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) +static struct fib_rules_ops *lookup_rules_ops(const struct net *net, + int family) { struct fib_rules_ops *ops; @@ -370,7 +371,9 @@ static int call_fib_rule_notifiers(struct net *net, .rule = rule, }; - ops->fib_rules_seq++; + ASSERT_RTNL(); + /* Paired with READ_ONCE() in fib_rules_seq() */ + WRITE_ONCE(ops->fib_rules_seq, ops->fib_rules_seq + 1); return call_fib_notifiers(net, event_type, &info.info); } @@ -397,17 +400,16 @@ int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, } EXPORT_SYMBOL_GPL(fib_rules_dump); -unsigned int fib_rules_seq_read(struct net *net, int family) +unsigned int fib_rules_seq_read(const struct net *net, int family) { unsigned int fib_rules_seq; struct fib_rules_ops *ops; - ASSERT_RTNL(); - ops = lookup_rules_ops(net, family); if (!ops) return 0; - fib_rules_seq = ops->fib_rules_seq; + /* Paired with WRITE_ONCE() in call_fib_rule_notifiers() */ + fib_rules_seq = READ_ONCE(ops->fib_rules_seq); rules_ops_put(ops); return fib_rules_seq; @@ -1289,13 +1291,18 @@ static struct pernet_operations fib_rules_net_ops = { .exit = fib_rules_net_exit, }; +static const struct rtnl_msg_handler fib_rules_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWRULE, .doit = fib_nl_newrule}, + {.msgtype = RTM_DELRULE, .doit = fib_nl_delrule}, + {.msgtype = RTM_GETRULE, .dumpit = fib_nl_dumprule, + .flags = RTNL_FLAG_DUMP_UNLOCKED}, +}; + static int __init fib_rules_init(void) { int err; - rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, - RTNL_FLAG_DUMP_UNLOCKED); + + rtnl_register_many(fib_rules_rtnl_msg_handlers); err = register_pernet_subsys(&fib_rules_net_ops); if (err < 0) @@ -1310,9 +1317,7 @@ static int __init fib_rules_init(void) fail_unregister: unregister_pernet_subsys(&fib_rules_net_ops); fail: - rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); - rtnl_unregister(PF_UNSPEC, RTM_DELRULE); - rtnl_unregister(PF_UNSPEC, RTM_GETRULE); + rtnl_unregister_many(fib_rules_rtnl_msg_handlers); return err; } diff --git a/net/core/filter.c b/net/core/filter.c index cb272b35d484..58761263176c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5140,6 +5140,17 @@ static u64 __bpf_get_netns_cookie(struct sock *sk) return net->net_cookie; } +BPF_CALL_1(bpf_get_netns_cookie, struct sk_buff *, skb) +{ + return __bpf_get_netns_cookie(skb && skb->sk ? skb->sk : NULL); +} + +static const struct bpf_func_proto bpf_get_netns_cookie_proto = { + .func = bpf_get_netns_cookie, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX_OR_NULL, +}; + BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx) { return __bpf_get_netns_cookie(ctx); @@ -6780,8 +6791,6 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk * sock refcnt is decremented to prevent a request_sock leak. */ - if (!sk_fullsock(sk2)) - sk2 = NULL; if (sk2 != sk) { sock_gen_put(sk); /* Ensure there is no need to bump sk2 refcnt */ @@ -6828,8 +6837,6 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk * sock refcnt is decremented to prevent a request_sock leak. */ - if (!sk_fullsock(sk2)) - sk2 = NULL; if (sk2 != sk) { sock_gen_put(sk); /* Ensure there is no need to bump sk2 refcnt */ @@ -7278,7 +7285,7 @@ BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk) { sk = sk_to_full_sk(sk); - if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) + if (sk && sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) return (unsigned long)sk; return (unsigned long)NULL; @@ -8211,6 +8218,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skb_under_cgroup_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_proto; + case BPF_FUNC_get_netns_cookie: + return &bpf_get_netns_cookie_proto; case BPF_FUNC_get_socket_uid: return &bpf_get_socket_uid_proto; case BPF_FUNC_fib_lookup: @@ -10243,10 +10252,6 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, } \ } while (0) -#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \ - SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \ - S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) - static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 1a14f915b7a4..e0ca24a58810 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -10,6 +10,7 @@ #include <linux/bpf.h> #include <net/lwtunnel.h> #include <net/gre.h> +#include <net/ip.h> #include <net/ip6_route.h> #include <net/ipv6_stubs.h> #include <net/inet_dscp.h> @@ -91,12 +92,12 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_IP)) { struct net_device *dev = skb_dst(skb)->dev; - struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph = ip_hdr(skb); dev_hold(dev); skb_dst_drop(skb); err = ip_route_input_noref(skb, iph->daddr, iph->saddr, - iph->tos, dev); + ip4h_dscp(iph), dev); dev_put(dev); } else if (skb->protocol == htons(ETH_P_IPV6)) { skb_dst_drop(skb); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 77b819cd995b..395ae1626eef 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3886,17 +3886,18 @@ EXPORT_SYMBOL(neigh_sysctl_unregister); #endif /* CONFIG_SYSCTL */ +static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWNEIGH, .doit = neigh_add}, + {.msgtype = RTM_DELNEIGH, .doit = neigh_delete}, + {.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info, + .flags = RTNL_FLAG_DUMP_UNLOCKED}, + {.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info}, + {.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set}, +}; + static int __init neigh_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, - RTNL_FLAG_DUMP_UNLOCKED); - - rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, - 0); - rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); - + rtnl_register_many(neigh_rtnl_msg_handlers); return 0; } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 05cf5347f25e..2d9afc6e2161 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -409,7 +409,7 @@ NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) { - WRITE_ONCE(dev->gro_flush_timeout, val); + netdev_set_gro_flush_timeout(dev, val); return 0; } @@ -429,7 +429,7 @@ static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val if (val > S32_MAX) return -ERANGE; - WRITE_ONCE(dev->napi_defer_hard_irqs, val); + netdev_set_defer_hard_irqs(dev, (u32)val); return 0; } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index e39479f1c9a4..809b48c0a528 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -317,6 +317,7 @@ static __net_init void preinit_net_sysctl(struct net *net) */ net->core.sysctl_optmem_max = 128 * 1024; net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED; + net->core.sysctl_tstamp_allow_data = 1; } /* init code that must occur even if setup_net() is not called. */ @@ -334,6 +335,12 @@ static __net_init void preinit_net(struct net *net, struct user_namespace *user_ idr_init(&net->netns_ids); spin_lock_init(&net->nsid_lock); mutex_init(&net->ipv4.ra_mutex); + +#ifdef CONFIG_DEBUG_NET_SMALL_RTNL + mutex_init(&net->rtnl_mutex); + lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL); +#endif + preinit_net_sysctl(net); } @@ -1153,13 +1160,23 @@ static void __init netns_ipv4_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_tcp_early_demux); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, + sysctl_tcp_l3mdev_accept); + CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_tcp_reordering); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_tcp_rmem); - CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 18); + CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22); } #endif +static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid, + .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.msgtype = RTM_GETNSID, .doit = rtnl_net_getid, + .dumpit = rtnl_net_dumpid, + .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, +}; + void __init net_ns_init(void) { struct net_generic *ng; @@ -1197,11 +1214,7 @@ void __init net_ns_init(void) if (register_pernet_subsys(&net_ns_ops)) panic("Could not register network namespace subsystems"); - rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, - RTNL_FLAG_DOIT_UNLOCKED); - rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, - RTNL_FLAG_DOIT_UNLOCKED | - RTNL_FLAG_DUMP_UNLOCKED); + rtnl_register_many(net_ns_rtnl_msg_handlers); } static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list) diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c index b28424ae06d5..21de7e10be16 100644 --- a/net/core/netdev-genl-gen.c +++ b/net/core/netdev-genl-gen.c @@ -14,12 +14,16 @@ /* Integer value ranges */ static const struct netlink_range_validation netdev_a_page_pool_id_range = { .min = 1ULL, - .max = 4294967295ULL, + .max = U32_MAX, }; static const struct netlink_range_validation netdev_a_page_pool_ifindex_range = { .min = 1ULL, - .max = 2147483647ULL, + .max = S32_MAX, +}; + +static const struct netlink_range_validation netdev_a_napi_defer_hard_irqs_range = { + .max = S32_MAX, }; /* Common nested types */ @@ -87,6 +91,13 @@ static const struct nla_policy netdev_bind_rx_nl_policy[NETDEV_A_DMABUF_FD + 1] [NETDEV_A_DMABUF_QUEUES] = NLA_POLICY_NESTED(netdev_queue_id_nl_policy), }; +/* NETDEV_CMD_NAPI_SET - do */ +static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT + 1] = { + [NETDEV_A_NAPI_ID] = { .type = NLA_U32, }, + [NETDEV_A_NAPI_DEFER_HARD_IRQS] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_napi_defer_hard_irqs_range), + [NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT] = { .type = NLA_UINT, }, +}; + /* Ops table for netdev */ static const struct genl_split_ops netdev_nl_ops[] = { { @@ -171,6 +182,13 @@ static const struct genl_split_ops netdev_nl_ops[] = { .maxattr = NETDEV_A_DMABUF_FD, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, }, + { + .cmd = NETDEV_CMD_NAPI_SET, + .doit = netdev_nl_napi_set_doit, + .policy = netdev_napi_set_nl_policy, + .maxattr = NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, }; static const struct genl_multicast_group netdev_nl_mcgrps[] = { diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h index 8cda334fd042..e09dd7539ff2 100644 --- a/net/core/netdev-genl-gen.h +++ b/net/core/netdev-genl-gen.h @@ -33,6 +33,7 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb); int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb); int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info); +int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info); enum { NETDEV_NLGRP_MGMT, diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index 1cb954f2d39e..b49c3b4e5fbe 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -24,7 +24,7 @@ struct netdev_nl_dump_ctx { static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) { - NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx); + NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); return (struct netdev_nl_dump_ctx *)cb->ctx; } @@ -161,6 +161,8 @@ static int netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, const struct genl_info *info) { + unsigned long gro_flush_timeout; + u32 napi_defer_hard_irqs; void *hdr; pid_t pid; @@ -189,6 +191,16 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, goto nla_put_failure; } + napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); + if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, + napi_defer_hard_irqs)) + goto nla_put_failure; + + gro_flush_timeout = napi_get_gro_flush_timeout(napi); + if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, + gro_flush_timeout)) + goto nla_put_failure; + genlmsg_end(rsp, hdr); return 0; @@ -292,6 +304,51 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } static int +netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) +{ + u64 gro_flush_timeout = 0; + u32 defer = 0; + + if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { + defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); + napi_set_defer_hard_irqs(napi, defer); + } + + if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) { + gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]); + napi_set_gro_flush_timeout(napi, gro_flush_timeout); + } + + return 0; +} + +int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct napi_struct *napi; + unsigned int napi_id; + int err; + + if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) + return -EINVAL; + + napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); + + rtnl_lock(); + + napi = napi_by_id(napi_id); + if (napi) { + err = netdev_nl_napi_set_config(napi, info); + } else { + NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); + err = -ENOENT; + } + + rtnl_unlock(); + + return err; +} + +static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { diff --git a/net/core/netpoll.c b/net/core/netpoll.c index aa49b92e9194..94b7f07a952f 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -641,7 +641,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) ops = ndev->netdev_ops; if (ops->ndo_netpoll_setup) { - err = ops->ndo_netpoll_setup(ndev, npinfo); + err = ops->ndo_netpoll_setup(ndev); if (err) goto free_npinfo; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e30e7ea0207d..194a81e5f608 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -179,6 +179,76 @@ bool lockdep_rtnl_is_held(void) EXPORT_SYMBOL(lockdep_rtnl_is_held); #endif /* #ifdef CONFIG_PROVE_LOCKING */ +#ifdef CONFIG_DEBUG_NET_SMALL_RTNL +void __rtnl_net_lock(struct net *net) +{ + ASSERT_RTNL(); + + mutex_lock(&net->rtnl_mutex); +} +EXPORT_SYMBOL(__rtnl_net_lock); + +void __rtnl_net_unlock(struct net *net) +{ + ASSERT_RTNL(); + + mutex_unlock(&net->rtnl_mutex); +} +EXPORT_SYMBOL(__rtnl_net_unlock); + +void rtnl_net_lock(struct net *net) +{ + rtnl_lock(); + __rtnl_net_lock(net); +} +EXPORT_SYMBOL(rtnl_net_lock); + +void rtnl_net_unlock(struct net *net) +{ + __rtnl_net_unlock(net); + rtnl_unlock(); +} +EXPORT_SYMBOL(rtnl_net_unlock); + +static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b) +{ + if (net_eq(net_a, net_b)) + return 0; + + /* always init_net first */ + if (net_eq(net_a, &init_net)) + return -1; + + if (net_eq(net_b, &init_net)) + return 1; + + /* otherwise lock in ascending order */ + return net_a < net_b ? -1 : 1; +} + +int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b) +{ + const struct net *net_a, *net_b; + + net_a = container_of(a, struct net, rtnl_mutex.dep_map); + net_b = container_of(b, struct net, rtnl_mutex.dep_map); + + return rtnl_net_cmp_locks(net_a, net_b); +} + +bool rtnl_net_is_locked(struct net *net) +{ + return rtnl_is_locked() && mutex_is_locked(&net->rtnl_mutex); +} +EXPORT_SYMBOL(rtnl_net_is_locked); + +bool lockdep_rtnl_net_is_held(struct net *net) +{ + return lockdep_rtnl_is_held() && lockdep_is_held(&net->rtnl_mutex); +} +EXPORT_SYMBOL(lockdep_rtnl_net_is_held); +#endif + static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; static inline int rtm_msgindex(int msgtype) @@ -269,64 +339,13 @@ unlock: } /** - * rtnl_register_module - Register a rtnetlink message type - * - * @owner: module registering the hook (THIS_MODULE) - * @protocol: Protocol family or PF_UNSPEC - * @msgtype: rtnetlink message type - * @doit: Function pointer called for each request message - * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message - * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions - * - * Like rtnl_register, but for use by removable modules. - */ -int rtnl_register_module(struct module *owner, - int protocol, int msgtype, - rtnl_doit_func doit, rtnl_dumpit_func dumpit, - unsigned int flags) -{ - return rtnl_register_internal(owner, protocol, msgtype, - doit, dumpit, flags); -} -EXPORT_SYMBOL_GPL(rtnl_register_module); - -/** - * rtnl_register - Register a rtnetlink message type - * @protocol: Protocol family or PF_UNSPEC - * @msgtype: rtnetlink message type - * @doit: Function pointer called for each request message - * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message - * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions - * - * Registers the specified function pointers (at least one of them has - * to be non-NULL) to be called whenever a request message for the - * specified protocol family and message type is received. - * - * The special protocol family PF_UNSPEC may be used to define fallback - * function pointers for the case when no entry for the specific protocol - * family exists. - */ -void rtnl_register(int protocol, int msgtype, - rtnl_doit_func doit, rtnl_dumpit_func dumpit, - unsigned int flags) -{ - int err; - - err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, - flags); - if (err) - pr_err("Unable to register rtnetlink message handler, " - "protocol = %d, message type = %d\n", protocol, msgtype); -} - -/** * rtnl_unregister - Unregister a rtnetlink message type * @protocol: Protocol family or PF_UNSPEC * @msgtype: rtnetlink message type * * Returns 0 on success or a negative error code. */ -int rtnl_unregister(int protocol, int msgtype) +static int rtnl_unregister(int protocol, int msgtype) { struct rtnl_link __rcu **tab; struct rtnl_link *link; @@ -349,7 +368,6 @@ int rtnl_unregister(int protocol, int msgtype) return 0; } -EXPORT_SYMBOL_GPL(rtnl_unregister); /** * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol @@ -384,6 +402,26 @@ void rtnl_unregister_all(int protocol) } EXPORT_SYMBOL_GPL(rtnl_unregister_all); +/** + * __rtnl_register_many - Register rtnetlink message types + * @handlers: Array of struct rtnl_msg_handlers + * @n: The length of @handlers + * + * Registers the specified function pointers (at least one of them has + * to be non-NULL) to be called whenever a request message for the + * specified protocol family and message type is received. + * + * The special protocol family PF_UNSPEC may be used to define fallback + * function pointers for the case when no entry for the specific protocol + * family exists. + * + * When one element of @handlers fails to register, + * 1) built-in: panics. + * 2) modules : the previous successful registrations are unwinded + * and an error is returned. + * + * Use rtnl_register_many(). + */ int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n) { const struct rtnl_msg_handler *handler; @@ -394,6 +432,10 @@ int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n) handler->msgtype, handler->doit, handler->dumpit, handler->flags); if (err) { + if (!handler->owner) + panic("Unable to register rtnetlink message " + "handlers, %pS\n", handlers); + __rtnl_unregister_many(handlers, i); break; } @@ -415,15 +457,29 @@ EXPORT_SYMBOL_GPL(__rtnl_unregister_many); static LIST_HEAD(link_ops); -static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) +static struct rtnl_link_ops *rtnl_link_ops_get(const char *kind, int *srcu_index) { - const struct rtnl_link_ops *ops; + struct rtnl_link_ops *ops; + + rcu_read_lock(); - list_for_each_entry(ops, &link_ops, list) { - if (!strcmp(ops->kind, kind)) - return ops; + list_for_each_entry_rcu(ops, &link_ops, list) { + if (!strcmp(ops->kind, kind)) { + *srcu_index = srcu_read_lock(&ops->srcu); + goto unlock; + } } - return NULL; + + ops = NULL; +unlock: + rcu_read_unlock(); + + return ops; +} + +static void rtnl_link_ops_put(struct rtnl_link_ops *ops, int srcu_index) +{ + srcu_read_unlock(&ops->srcu, srcu_index); } /** @@ -438,8 +494,16 @@ static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) */ int __rtnl_link_register(struct rtnl_link_ops *ops) { - if (rtnl_link_ops_get(ops->kind)) - return -EEXIST; + struct rtnl_link_ops *tmp; + int err; + + /* When RTNL is removed, add lock for link_ops. */ + ASSERT_RTNL(); + + list_for_each_entry(tmp, &link_ops, list) { + if (!strcmp(ops->kind, tmp->kind)) + return -EEXIST; + } /* The check for alloc/setup is here because if ops * does not have that filled up, it is not possible @@ -449,7 +513,12 @@ int __rtnl_link_register(struct rtnl_link_ops *ops) if ((ops->alloc || ops->setup) && !ops->dellink) ops->dellink = unregister_netdevice_queue; - list_add_tail(&ops->list, &link_ops); + err = init_srcu_struct(&ops->srcu); + if (err) + return err; + + list_add_tail_rcu(&ops->list, &link_ops); + return 0; } EXPORT_SYMBOL_GPL(__rtnl_link_register); @@ -500,10 +569,12 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops) { struct net *net; - for_each_net(net) { + list_del_rcu(&ops->list); + synchronize_srcu(&ops->srcu); + cleanup_srcu_struct(&ops->srcu); + + for_each_net(net) __rtnl_kill_links(net, ops); - } - list_del(&ops->list); } EXPORT_SYMBOL_GPL(__rtnl_link_unregister); @@ -595,18 +666,31 @@ static size_t rtnl_link_get_size(const struct net_device *dev) static LIST_HEAD(rtnl_af_ops); -static const struct rtnl_af_ops *rtnl_af_lookup(const int family) +static struct rtnl_af_ops *rtnl_af_lookup(const int family, int *srcu_index) { - const struct rtnl_af_ops *ops; + struct rtnl_af_ops *ops; ASSERT_RTNL(); - list_for_each_entry(ops, &rtnl_af_ops, list) { - if (ops->family == family) - return ops; + rcu_read_lock(); + + list_for_each_entry_rcu(ops, &rtnl_af_ops, list) { + if (ops->family == family) { + *srcu_index = srcu_read_lock(&ops->srcu); + goto unlock; + } } - return NULL; + ops = NULL; +unlock: + rcu_read_unlock(); + + return ops; +} + +static void rtnl_af_put(struct rtnl_af_ops *ops, int srcu_index) +{ + srcu_read_unlock(&ops->srcu, srcu_index); } /** @@ -615,11 +699,18 @@ static const struct rtnl_af_ops *rtnl_af_lookup(const int family) * * Returns 0 on success or a negative error code. */ -void rtnl_af_register(struct rtnl_af_ops *ops) +int rtnl_af_register(struct rtnl_af_ops *ops) { + int err = init_srcu_struct(&ops->srcu); + + if (err) + return err; + rtnl_lock(); list_add_tail_rcu(&ops->list, &rtnl_af_ops); rtnl_unlock(); + + return 0; } EXPORT_SYMBOL_GPL(rtnl_af_register); @@ -634,6 +725,8 @@ void rtnl_af_unregister(struct rtnl_af_ops *ops) rtnl_unlock(); synchronize_rcu(); + synchronize_srcu(&ops->srcu); + cleanup_srcu_struct(&ops->srcu); } EXPORT_SYMBOL_GPL(rtnl_af_unregister); @@ -1147,6 +1240,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ + rtnl_devlink_port_size(dev) + rtnl_dpll_pin_size(dev) + + nla_total_size(8) /* IFLA_MAX_PACING_OFFLOAD_HORIZON */ + 0; } @@ -1896,6 +1990,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, READ_ONCE(dev->tso_max_size)) || nla_put_u32(skb, IFLA_TSO_MAX_SEGS, READ_ONCE(dev->tso_max_segs)) || + nla_put_uint(skb, IFLA_MAX_PACING_OFFLOAD_HORIZON, + READ_ONCE(dev->max_pacing_offload_horizon)) || #ifdef CONFIG_RPS nla_put_u32(skb, IFLA_NUM_RX_QUEUES, READ_ONCE(dev->num_rx_queues)) || @@ -2004,6 +2100,7 @@ nla_put_failure: } static const struct nla_policy ifla_policy[IFLA_MAX+1] = { + [IFLA_UNSPEC] = { .strict_start_type = IFLA_DPLL_PIN }, [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, @@ -2112,10 +2209,11 @@ static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, }; -static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) +static struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla, + int *ops_srcu_index) { - const struct rtnl_link_ops *ops = NULL; struct nlattr *linfo[IFLA_INFO_MAX + 1]; + struct rtnl_link_ops *ops = NULL; if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) return NULL; @@ -2124,7 +2222,7 @@ static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla char kind[MODULE_NAME_LEN]; nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); - ops = rtnl_link_ops_get(kind); + ops = rtnl_link_ops_get(kind, ops_srcu_index); } return ops; @@ -2244,8 +2342,8 @@ static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { - const struct rtnl_link_ops *kind_ops = NULL; struct netlink_ext_ack *extack = cb->extack; + struct rtnl_link_ops *kind_ops = NULL; const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); unsigned int flags = NLM_F_MULTI; @@ -2256,6 +2354,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) struct net *tgt_net = net; u32 ext_filter_mask = 0; struct net_device *dev; + int ops_srcu_index; int master_idx = 0; int netnsid = -1; int err, i; @@ -2289,7 +2388,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) master_idx = nla_get_u32(tb[i]); break; case IFLA_LINKINFO: - kind_ops = linkinfo_to_kind_ops(tb[i]); + kind_ops = linkinfo_to_kind_ops(tb[i], &ops_srcu_index); break; default: if (cb->strict_check) { @@ -2315,6 +2414,10 @@ walk_entries: if (err < 0) break; } + + if (kind_ops) + rtnl_link_ops_put(kind_ops, ops_srcu_index); + cb->seq = tgt_net->dev_base_seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); if (netnsid >= 0) @@ -2496,20 +2599,24 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], int rem, err; nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { - const struct rtnl_af_ops *af_ops; + struct rtnl_af_ops *af_ops; + int af_ops_srcu_index; - af_ops = rtnl_af_lookup(nla_type(af)); + af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index); if (!af_ops) return -EAFNOSUPPORT; if (!af_ops->set_link_af) - return -EOPNOTSUPP; - - if (af_ops->validate_link_af) { + err = -EOPNOTSUPP; + else if (af_ops->validate_link_af) err = af_ops->validate_link_af(dev, af, extack); - if (err < 0) - return err; - } + else + err = 0; + + rtnl_af_put(af_ops, af_ops_srcu_index); + + if (err < 0) + return err; } } @@ -2800,8 +2907,8 @@ static int do_set_proto_down(struct net_device *dev, #define DO_SETLINK_MODIFIED 0x01 /* notify flag means notify + modified. */ #define DO_SETLINK_NOTIFY 0x03 -static int do_setlink(const struct sk_buff *skb, - struct net_device *dev, struct ifinfomsg *ifm, +static int do_setlink(const struct sk_buff *skb, struct net_device *dev, + struct net *tgt_net, struct ifinfomsg *ifm, struct netlink_ext_ack *extack, struct nlattr **tb, int status) { @@ -2809,32 +2916,28 @@ static int do_setlink(const struct sk_buff *skb, char ifname[IFNAMSIZ]; int err; + err = validate_linkmsg(dev, tb, extack); + if (err < 0) + goto errout; + if (tb[IFLA_IFNAME]) nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); else ifname[0] = '\0'; - if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { + if (!net_eq(tgt_net, dev_net(dev))) { const char *pat = ifname[0] ? ifname : NULL; - struct net *net; int new_ifindex; - net = rtnl_link_get_net_capable(skb, dev_net(dev), - tb, CAP_NET_ADMIN); - if (IS_ERR(net)) { - err = PTR_ERR(net); - goto errout; - } - if (tb[IFLA_NEW_IFINDEX]) new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); else new_ifindex = 0; - err = __dev_change_net_namespace(dev, net, pat, new_ifindex); - put_net(net); + err = __dev_change_net_namespace(dev, tgt_net, pat, new_ifindex); if (err) goto errout; + status |= DO_SETLINK_MODIFIED; } @@ -3093,11 +3196,18 @@ static int do_setlink(const struct sk_buff *skb, int rem; nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { - const struct rtnl_af_ops *af_ops; + struct rtnl_af_ops *af_ops; + int af_ops_srcu_index; - BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); + af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index); + if (!af_ops) { + err = -EAFNOSUPPORT; + goto errout; + } err = af_ops->set_link_af(dev, af, extack); + rtnl_af_put(af_ops, af_ops_srcu_index); + if (err < 0) goto errout; @@ -3194,11 +3304,12 @@ static struct net_device *rtnl_dev_get(struct net *net, static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct ifinfomsg *ifm = nlmsg_data(nlh); struct net *net = sock_net(skb->sk); - struct ifinfomsg *ifm; - struct net_device *dev; - int err; struct nlattr *tb[IFLA_MAX+1]; + struct net_device *dev = NULL; + struct net *tgt_net; + int err; err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); @@ -3209,25 +3320,25 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) goto errout; - err = -EINVAL; - ifm = nlmsg_data(nlh); + tgt_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); + if (IS_ERR(tgt_net)) { + err = PTR_ERR(tgt_net); + goto errout; + } + if (ifm->ifi_index > 0) dev = __dev_get_by_index(net, ifm->ifi_index); else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) dev = rtnl_dev_get(net, tb); else - goto errout; + err = -EINVAL; - if (dev == NULL) { + if (dev) + err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0); + else if (!err) err = -ENODEV; - goto errout; - } - - err = validate_linkmsg(dev, tb, extack); - if (err < 0) - goto errout; - err = do_setlink(skb, dev, ifm, extack, tb, 0); + put_net(tgt_net); errout: return err; } @@ -3287,14 +3398,14 @@ EXPORT_SYMBOL_GPL(rtnl_delete_link); static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct ifinfomsg *ifm = nlmsg_data(nlh); struct net *net = sock_net(skb->sk); u32 portid = NETLINK_CB(skb).portid; - struct net *tgt_net = net; - struct net_device *dev = NULL; - struct ifinfomsg *ifm; struct nlattr *tb[IFLA_MAX+1]; - int err; + struct net_device *dev = NULL; + struct net *tgt_net = net; int netnsid = -1; + int err; err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); @@ -3312,27 +3423,20 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, return PTR_ERR(tgt_net); } - err = -EINVAL; - ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(tgt_net, ifm->ifi_index); else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) dev = rtnl_dev_get(tgt_net, tb); + + if (dev) + err = rtnl_delete_link(dev, portid, nlh); + else if (ifm->ifi_index > 0 || tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) + err = -ENODEV; else if (tb[IFLA_GROUP]) err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); else - goto out; - - if (!dev) { - if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) - err = -ENODEV; - - goto out; - } - - err = rtnl_delete_link(dev, portid, nlh); + err = -EINVAL; -out: if (netnsid >= 0) put_net(tgt_net); @@ -3459,21 +3563,90 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, } EXPORT_SYMBOL(rtnl_create_link); +struct rtnl_newlink_tbs { + struct nlattr *tb[IFLA_MAX + 1]; + struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; + struct nlattr *attr[RTNL_MAX_TYPE + 1]; + struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; +}; + +static int rtnl_changelink(const struct sk_buff *skb, struct nlmsghdr *nlh, + const struct rtnl_link_ops *ops, + struct net_device *dev, struct net *tgt_net, + struct rtnl_newlink_tbs *tbs, + struct nlattr **data, + struct netlink_ext_ack *extack) +{ + struct nlattr ** const linkinfo = tbs->linkinfo; + struct nlattr ** const tb = tbs->tb; + int status = 0; + int err; + + if (nlh->nlmsg_flags & NLM_F_EXCL) + return -EEXIST; + + if (nlh->nlmsg_flags & NLM_F_REPLACE) + return -EOPNOTSUPP; + + if (linkinfo[IFLA_INFO_DATA]) { + if (!ops || ops != dev->rtnl_link_ops || !ops->changelink) + return -EOPNOTSUPP; + + err = ops->changelink(dev, tb, data, extack); + if (err < 0) + return err; + + status |= DO_SETLINK_NOTIFY; + } + + if (linkinfo[IFLA_INFO_SLAVE_DATA]) { + const struct rtnl_link_ops *m_ops = NULL; + struct nlattr **slave_data = NULL; + struct net_device *master_dev; + + master_dev = netdev_master_upper_dev_get(dev); + if (master_dev) + m_ops = master_dev->rtnl_link_ops; + + if (!m_ops || !m_ops->slave_changelink) + return -EOPNOTSUPP; + + if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) + return -EINVAL; + + if (m_ops->slave_maxtype) { + err = nla_parse_nested_deprecated(tbs->slave_attr, + m_ops->slave_maxtype, + linkinfo[IFLA_INFO_SLAVE_DATA], + m_ops->slave_policy, extack); + if (err < 0) + return err; + + slave_data = tbs->slave_attr; + } + + err = m_ops->slave_changelink(master_dev, dev, tb, slave_data, extack); + if (err < 0) + return err; + + status |= DO_SETLINK_NOTIFY; + } + + return do_setlink(skb, dev, tgt_net, nlmsg_data(nlh), extack, tb, status); +} + static int rtnl_group_changelink(const struct sk_buff *skb, - struct net *net, int group, - struct ifinfomsg *ifm, - struct netlink_ext_ack *extack, - struct nlattr **tb) + struct net *net, struct net *tgt_net, + int group, struct ifinfomsg *ifm, + struct netlink_ext_ack *extack, + struct nlattr **tb) { struct net_device *dev, *aux; int err; for_each_netdev_safe(net, dev, aux) { if (dev->group == group) { - err = validate_linkmsg(dev, tb, extack); - if (err < 0) - return err; - err = do_setlink(skb, dev, ifm, extack, tb, 0); + err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0); if (err < 0) return err; } @@ -3484,6 +3657,7 @@ static int rtnl_group_changelink(const struct sk_buff *skb, static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, const struct rtnl_link_ops *ops, + struct net *tgt_net, struct net *link_net, const struct nlmsghdr *nlh, struct nlattr **tb, struct nlattr **data, struct netlink_ext_ack *extack) @@ -3491,7 +3665,6 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, unsigned char name_assign_type = NET_NAME_USER; struct net *net = sock_net(skb->sk); u32 portid = NETLINK_CB(skb).portid; - struct net *dest_net, *link_net; struct net_device *dev; char ifname[IFNAMSIZ]; int err; @@ -3506,27 +3679,7 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, name_assign_type = NET_NAME_ENUM; } - dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); - if (IS_ERR(dest_net)) - return PTR_ERR(dest_net); - - if (tb[IFLA_LINK_NETNSID]) { - int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); - - link_net = get_net_ns_by_id(dest_net, id); - if (!link_net) { - NL_SET_ERR_MSG(extack, "Unknown network namespace id"); - err = -EINVAL; - goto out; - } - err = -EPERM; - if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) - goto out; - } else { - link_net = NULL; - } - - dev = rtnl_create_link(link_net ? : dest_net, ifname, + dev = rtnl_create_link(link_net ? : tgt_net, ifname, name_assign_type, ops, tb, extack); if (IS_ERR(dev)) { err = PTR_ERR(dev); @@ -3548,7 +3701,7 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, if (err < 0) goto out_unregister; if (link_net) { - err = dev_change_net_namespace(dev, dest_net, ifname); + err = dev_change_net_namespace(dev, tgt_net, ifname); if (err < 0) goto out_unregister; } @@ -3558,9 +3711,6 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, goto out_unregister; } out: - if (link_net) - put_net(link_net); - put_net(dest_net); return err; out_unregister: if (ops->newlink) { @@ -3574,41 +3724,18 @@ out_unregister: goto out; } -struct rtnl_newlink_tbs { - struct nlattr *tb[IFLA_MAX + 1]; - struct nlattr *attr[RTNL_MAX_TYPE + 1]; - struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; -}; - static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, + const struct rtnl_link_ops *ops, + struct net *tgt_net, struct net *link_net, struct rtnl_newlink_tbs *tbs, + struct nlattr **data, struct netlink_ext_ack *extack) { - struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; struct nlattr ** const tb = tbs->tb; - const struct rtnl_link_ops *m_ops; - struct net_device *master_dev; struct net *net = sock_net(skb->sk); - const struct rtnl_link_ops *ops; - struct nlattr **slave_data; - char kind[MODULE_NAME_LEN]; struct net_device *dev; struct ifinfomsg *ifm; - struct nlattr **data; bool link_specified; - int err; - -#ifdef CONFIG_MODULES -replay: -#endif - err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, - ifla_policy, extack); - if (err < 0) - return err; - - err = rtnl_ensure_unique_netns(tb, extack, false); - if (err < 0) - return err; ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) { @@ -3625,151 +3752,135 @@ replay: dev = NULL; } - master_dev = NULL; - m_ops = NULL; - if (dev) { - master_dev = netdev_master_upper_dev_get(dev); - if (master_dev) - m_ops = master_dev->rtnl_link_ops; + if (dev) + return rtnl_changelink(skb, nlh, ops, dev, tgt_net, tbs, data, extack); + + if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { + /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, + * or it's for a group + */ + if (link_specified || !tb[IFLA_GROUP]) + return -ENODEV; + + return rtnl_group_changelink(skb, net, tgt_net, + nla_get_u32(tb[IFLA_GROUP]), + ifm, extack, tb); } + if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) + return -EOPNOTSUPP; + + if (!ops) { + NL_SET_ERR_MSG(extack, "Unknown device type"); + return -EOPNOTSUPP; + } + + return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, nlh, tb, data, extack); +} + +static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct nlattr **tb, **linkinfo, **data = NULL; + struct net *tgt_net, *link_net = NULL; + struct rtnl_link_ops *ops = NULL; + struct rtnl_newlink_tbs *tbs; + int ops_srcu_index; + int ret; + + tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); + if (!tbs) + return -ENOMEM; + + tb = tbs->tb; + ret = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), tb, + IFLA_MAX, ifla_policy, extack); + if (ret < 0) + goto free; + + ret = rtnl_ensure_unique_netns(tb, extack, false); + if (ret < 0) + goto free; + + linkinfo = tbs->linkinfo; if (tb[IFLA_LINKINFO]) { - err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, + ret = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO], ifla_info_policy, NULL); - if (err < 0) - return err; - } else - memset(linkinfo, 0, sizeof(linkinfo)); + if (ret < 0) + goto free; + } else { + memset(linkinfo, 0, sizeof(tbs->linkinfo)); + } if (linkinfo[IFLA_INFO_KIND]) { + char kind[MODULE_NAME_LEN]; + nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); - ops = rtnl_link_ops_get(kind); - } else { - kind[0] = '\0'; - ops = NULL; + ops = rtnl_link_ops_get(kind, &ops_srcu_index); +#ifdef CONFIG_MODULES + if (!ops) { + __rtnl_unlock(); + request_module("rtnl-link-%s", kind); + rtnl_lock(); + ops = rtnl_link_ops_get(kind, &ops_srcu_index); + } +#endif } - data = NULL; if (ops) { if (ops->maxtype > RTNL_MAX_TYPE) return -EINVAL; if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { - err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, + ret = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, linkinfo[IFLA_INFO_DATA], ops->policy, extack); - if (err < 0) - return err; + if (ret < 0) + goto put_ops; + data = tbs->attr; } + if (ops->validate) { - err = ops->validate(tb, data, extack); - if (err < 0) - return err; + ret = ops->validate(tb, data, extack); + if (ret < 0) + goto put_ops; } } - slave_data = NULL; - if (m_ops) { - if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) - return -EINVAL; - - if (m_ops->slave_maxtype && - linkinfo[IFLA_INFO_SLAVE_DATA]) { - err = nla_parse_nested_deprecated(tbs->slave_attr, - m_ops->slave_maxtype, - linkinfo[IFLA_INFO_SLAVE_DATA], - m_ops->slave_policy, - extack); - if (err < 0) - return err; - slave_data = tbs->slave_attr; - } + tgt_net = rtnl_link_get_net_capable(skb, sock_net(skb->sk), tb, CAP_NET_ADMIN); + if (IS_ERR(tgt_net)) { + ret = PTR_ERR(tgt_net); + goto put_ops; } - if (dev) { - int status = 0; - - if (nlh->nlmsg_flags & NLM_F_EXCL) - return -EEXIST; - if (nlh->nlmsg_flags & NLM_F_REPLACE) - return -EOPNOTSUPP; - - err = validate_linkmsg(dev, tb, extack); - if (err < 0) - return err; - - if (linkinfo[IFLA_INFO_DATA]) { - if (!ops || ops != dev->rtnl_link_ops || - !ops->changelink) - return -EOPNOTSUPP; - - err = ops->changelink(dev, tb, data, extack); - if (err < 0) - return err; - status |= DO_SETLINK_NOTIFY; - } - - if (linkinfo[IFLA_INFO_SLAVE_DATA]) { - if (!m_ops || !m_ops->slave_changelink) - return -EOPNOTSUPP; + if (tb[IFLA_LINK_NETNSID]) { + int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); - err = m_ops->slave_changelink(master_dev, dev, tb, - slave_data, extack); - if (err < 0) - return err; - status |= DO_SETLINK_NOTIFY; + link_net = get_net_ns_by_id(tgt_net, id); + if (!link_net) { + NL_SET_ERR_MSG(extack, "Unknown network namespace id"); + ret = -EINVAL; + goto put_net; } - return do_setlink(skb, dev, ifm, extack, tb, status); - } - - if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { - /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, - * or it's for a group - */ - if (link_specified) - return -ENODEV; - if (tb[IFLA_GROUP]) - return rtnl_group_changelink(skb, net, - nla_get_u32(tb[IFLA_GROUP]), - ifm, extack, tb); - return -ENODEV; - } - - if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) - return -EOPNOTSUPP; - - if (!ops) { -#ifdef CONFIG_MODULES - if (kind[0]) { - __rtnl_unlock(); - request_module("rtnl-link-%s", kind); - rtnl_lock(); - ops = rtnl_link_ops_get(kind); - if (ops) - goto replay; + if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) { + ret = -EPERM; + goto put_net; } -#endif - NL_SET_ERR_MSG(extack, "Unknown device type"); - return -EOPNOTSUPP; } - return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); -} + ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, tbs, data, extack); -static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) -{ - struct rtnl_newlink_tbs *tbs; - int ret; - - tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); - if (!tbs) - return -ENOMEM; - - ret = __rtnl_newlink(skb, nlh, tbs, extack); +put_net: + if (link_net) + put_net(link_net); + put_net(tgt_net); +put_ops: + if (ops) + rtnl_link_ops_put(ops, ops_srcu_index); +free: kfree(tbs); return ret; } @@ -6198,7 +6309,7 @@ static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) int idx, s_idx; int err; - NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx); + NL_ASSERT_CTX_FITS(struct rtnl_mdb_dump_ctx); if (cb->strict_check) { err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); @@ -6765,6 +6876,38 @@ static struct pernet_operations rtnetlink_net_ops = { .exit = rtnetlink_net_exit, }; +static const struct rtnl_msg_handler rtnetlink_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWLINK, .doit = rtnl_newlink}, + {.msgtype = RTM_DELLINK, .doit = rtnl_dellink}, + {.msgtype = RTM_GETLINK, .doit = rtnl_getlink, + .dumpit = rtnl_dump_ifinfo, .flags = RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, + {.msgtype = RTM_SETLINK, .doit = rtnl_setlink}, + {.msgtype = RTM_GETADDR, .dumpit = rtnl_dump_all}, + {.msgtype = RTM_GETROUTE, .dumpit = rtnl_dump_all}, + {.msgtype = RTM_GETNETCONF, .dumpit = rtnl_dump_all}, + {.msgtype = RTM_GETSTATS, .doit = rtnl_stats_get, + .dumpit = rtnl_stats_dump}, + {.msgtype = RTM_SETSTATS, .doit = rtnl_stats_set}, + {.msgtype = RTM_NEWLINKPROP, .doit = rtnl_newlinkprop}, + {.msgtype = RTM_DELLINKPROP, .doit = rtnl_dellinkprop}, + {.protocol = PF_BRIDGE, .msgtype = RTM_GETLINK, + .dumpit = rtnl_bridge_getlink}, + {.protocol = PF_BRIDGE, .msgtype = RTM_DELLINK, + .doit = rtnl_bridge_dellink}, + {.protocol = PF_BRIDGE, .msgtype = RTM_SETLINK, + .doit = rtnl_bridge_setlink}, + {.protocol = PF_BRIDGE, .msgtype = RTM_NEWNEIGH, .doit = rtnl_fdb_add}, + {.protocol = PF_BRIDGE, .msgtype = RTM_DELNEIGH, .doit = rtnl_fdb_del, + .flags = RTNL_FLAG_BULK_DEL_SUPPORTED}, + {.protocol = PF_BRIDGE, .msgtype = RTM_GETNEIGH, .doit = rtnl_fdb_get, + .dumpit = rtnl_fdb_dump}, + {.protocol = PF_BRIDGE, .msgtype = RTM_NEWMDB, .doit = rtnl_mdb_add}, + {.protocol = PF_BRIDGE, .msgtype = RTM_DELMDB, .doit = rtnl_mdb_del, + .flags = RTNL_FLAG_BULK_DEL_SUPPORTED}, + {.protocol = PF_BRIDGE, .msgtype = RTM_GETMDB, .doit = rtnl_mdb_get, + .dumpit = rtnl_mdb_dump}, +}; + void __init rtnetlink_init(void) { if (register_pernet_subsys(&rtnetlink_net_ops)) @@ -6772,34 +6915,5 @@ void __init rtnetlink_init(void) register_netdevice_notifier(&rtnetlink_dev_notifier); - rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, - rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE); - rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); - - rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); - rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); - rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); - - rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); - - rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); - rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, - RTNL_FLAG_BULK_DEL_SUPPORTED); - rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); - - rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); - rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); - rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); - - rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, - 0); - rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); - - rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0); - rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); - rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, - RTNL_FLAG_BULK_DEL_SUPPORTED); + rtnl_register_many(rtnetlink_rtnl_msg_handlers); } diff --git a/net/core/rtnl_net_debug.c b/net/core/rtnl_net_debug.c new file mode 100644 index 000000000000..f406045cbd0e --- /dev/null +++ b/net/core/rtnl_net_debug.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright Amazon.com Inc. or its affiliates. */ + +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/notifier.h> +#include <linux/rtnetlink.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> + +static int rtnl_net_debug_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); + enum netdev_cmd cmd = event; + + /* Keep enum and don't add default to trigger -Werror=switch */ + switch (cmd) { + case NETDEV_UP: + case NETDEV_DOWN: + case NETDEV_REBOOT: + case NETDEV_CHANGE: + case NETDEV_REGISTER: + case NETDEV_UNREGISTER: + case NETDEV_CHANGEMTU: + case NETDEV_CHANGEADDR: + case NETDEV_PRE_CHANGEADDR: + case NETDEV_GOING_DOWN: + case NETDEV_CHANGENAME: + case NETDEV_FEAT_CHANGE: + case NETDEV_BONDING_FAILOVER: + case NETDEV_PRE_UP: + case NETDEV_PRE_TYPE_CHANGE: + case NETDEV_POST_TYPE_CHANGE: + case NETDEV_POST_INIT: + case NETDEV_PRE_UNINIT: + case NETDEV_RELEASE: + case NETDEV_NOTIFY_PEERS: + case NETDEV_JOIN: + case NETDEV_CHANGEUPPER: + case NETDEV_RESEND_IGMP: + case NETDEV_PRECHANGEMTU: + case NETDEV_CHANGEINFODATA: + case NETDEV_BONDING_INFO: + case NETDEV_PRECHANGEUPPER: + case NETDEV_CHANGELOWERSTATE: + case NETDEV_UDP_TUNNEL_PUSH_INFO: + case NETDEV_UDP_TUNNEL_DROP_INFO: + case NETDEV_CHANGE_TX_QUEUE_LEN: + case NETDEV_CVLAN_FILTER_PUSH_INFO: + case NETDEV_CVLAN_FILTER_DROP_INFO: + case NETDEV_SVLAN_FILTER_PUSH_INFO: + case NETDEV_SVLAN_FILTER_DROP_INFO: + case NETDEV_OFFLOAD_XSTATS_ENABLE: + case NETDEV_OFFLOAD_XSTATS_DISABLE: + case NETDEV_OFFLOAD_XSTATS_REPORT_USED: + case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: + case NETDEV_XDP_FEAT_CHANGE: + ASSERT_RTNL(); + break; + + /* Once an event fully supports RTNL_NET, move it here + * and remove "if (0)" below. + * + * case NETDEV_XXX: + * ASSERT_RTNL_NET(net); + * break; + */ + } + + /* Just to avoid unused-variable error for dev and net. */ + if (0) + ASSERT_RTNL_NET(net); + + return NOTIFY_DONE; +} + +static int rtnl_net_debug_net_id; + +static int __net_init rtnl_net_debug_net_init(struct net *net) +{ + struct notifier_block *nb; + + nb = net_generic(net, rtnl_net_debug_net_id); + nb->notifier_call = rtnl_net_debug_event; + + return register_netdevice_notifier_net(net, nb); +} + +static void __net_exit rtnl_net_debug_net_exit(struct net *net) +{ + struct notifier_block *nb; + + nb = net_generic(net, rtnl_net_debug_net_id); + unregister_netdevice_notifier_net(net, nb); +} + +static struct pernet_operations rtnl_net_debug_net_ops __net_initdata = { + .init = rtnl_net_debug_net_init, + .exit = rtnl_net_debug_net_exit, + .id = &rtnl_net_debug_net_id, + .size = sizeof(struct notifier_block), +}; + +static struct notifier_block rtnl_net_debug_block = { + .notifier_call = rtnl_net_debug_event, +}; + +static int __init rtnl_net_debug_init(void) +{ + int ret; + + ret = register_pernet_device(&rtnl_net_debug_net_ops); + if (ret) + return ret; + + ret = register_netdevice_notifier(&rtnl_net_debug_block); + if (ret) + unregister_pernet_subsys(&rtnl_net_debug_net_ops); + + return ret; +} + +subsys_initcall(rtnl_net_debug_init); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 74149dc4ee31..00afeb90c23a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5506,7 +5506,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) { bool ret; - if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) + if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data))) return true; read_lock_bh(&sk->sk_callback_lock); diff --git a/net/core/sock.c b/net/core/sock.c index 039be95c40cf..7f398bd07fb7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -286,8 +286,6 @@ EXPORT_SYMBOL(sysctl_rmem_max); __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; -int sysctl_tstamp_allow_data __read_mostly = 1; - DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); EXPORT_SYMBOL_GPL(memalloc_socks_key); @@ -822,14 +820,11 @@ EXPORT_SYMBOL(sock_set_sndtimeo); static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) { + sock_valbool_flag(sk, SOCK_RCVTSTAMP, val); + sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns); if (val) { sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); - sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns); - sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk, SOCK_TIMESTAMP); - } else { - sock_reset_flag(sk, SOCK_RCVTSTAMP); - sock_reset_flag(sk, SOCK_RCVTSTAMPNS); } } @@ -2594,14 +2589,11 @@ void __sock_wfree(struct sk_buff *skb) void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); - skb->sk = sk; #ifdef CONFIG_INET - if (unlikely(!sk_fullsock(sk))) { - skb->destructor = sock_edemux; - sock_hold(sk); - return; - } + if (unlikely(!sk_fullsock(sk))) + return skb_set_owner_edemux(skb, sk); #endif + skb->sk = sk; skb->destructor = sock_wfree; skb_set_hash_from_sk(skb, sk); /* @@ -2899,6 +2891,8 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, { u32 tsflags; + BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31)); + switch (cmsg->cmsg_type) { case SO_MARK: if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && @@ -2927,6 +2921,17 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, return -EINVAL; sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); break; + case SCM_TS_OPT_ID: + if (sk_is_tcp(sk)) + return -EINVAL; + tsflags = READ_ONCE(sk->sk_tsflags); + if (!(tsflags & SOF_TIMESTAMPING_OPT_ID)) + return -EINVAL; + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) + return -EINVAL; + sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg); + sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID; + break; /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ case SCM_RIGHTS: case SCM_CREDENTIALS: @@ -3819,9 +3824,6 @@ void sk_common_release(struct sock *sk) sk->sk_prot->unhash(sk); - if (sk->sk_socket) - sk->sk_socket->sk = NULL; - /* * In this point socket cannot receive new packets, but it is possible * that some packets are in flight because some CPU runs receiver and diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 86a2476678c4..cb8d32e5c14e 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -51,29 +51,45 @@ int sysctl_devconf_inherit_init_net __read_mostly; EXPORT_SYMBOL(sysctl_devconf_inherit_init_net); #if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS) -static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos, - struct cpumask *mask) +static int dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos, + struct cpumask *mask) { - char kbuf[128]; + char *kbuf; int len; if (*ppos || !*lenp) { *lenp = 0; - return; + return 0; + } + + /* CPUs are displayed as a hex bitmap + a comma between each groups of 8 + * nibbles (except the last one which has a newline instead). + * Guesstimate the buffer size at the group granularity level. + */ + len = min(DIV_ROUND_UP(nr_cpumask_bits, 32) * (8 + 1), *lenp); + kbuf = kmalloc(len, GFP_KERNEL); + if (!kbuf) { + *lenp = 0; + return -ENOMEM; } - len = min(sizeof(kbuf) - 1, *lenp); len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask)); if (!len) { *lenp = 0; - return; + goto free_buf; } - if (len < *lenp) - kbuf[len++] = '\n'; + /* scnprintf writes a trailing null char not counted in the returned + * length, override it with a newline. + */ + kbuf[len++] = '\n'; memcpy(buffer, kbuf, len); *lenp = len; *ppos += len; + +free_buf: + kfree(kbuf); + return 0; } #endif @@ -117,8 +133,8 @@ static int rps_default_mask_sysctl(const struct ctl_table *table, int write, if (err) goto done; } else { - dump_cpumask(buffer, lenp, ppos, - net->core.rps_default_mask ? : cpu_none_mask); + err = dump_cpumask(buffer, lenp, ppos, + net->core.rps_default_mask ? : cpu_none_mask); } done: @@ -247,7 +263,7 @@ write_unlock: } rcu_read_unlock(); - dump_cpumask(buffer, lenp, ppos, mask); + ret = dump_cpumask(buffer, lenp, ppos, mask); } done: @@ -491,15 +507,6 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, - { - .procname = "tstamp_allow_data", - .data = &sysctl_tstamp_allow_data, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE - }, #ifdef CONFIG_RPS { .procname = "rps_sock_flow_entries", @@ -665,6 +672,15 @@ static struct ctl_table netns_core_table[] = { .extra2 = SYSCTL_ONE, .proc_handler = proc_dou8vec_minmax, }, + { + .procname = "tstamp_allow_data", + .data = &init_net.core.sysctl_tstamp_allow_data, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE + }, /* sysctl_core_net_init() will set the values after this * to readonly in network namespaces */ diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 2e6b8c8fd2de..03eb1d941fca 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -2408,6 +2408,11 @@ static struct notifier_block dcbnl_nb __read_mostly = { .notifier_call = dcbnl_netdevice_event, }; +static const struct rtnl_msg_handler dcbnl_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_GETDCB, .doit = dcb_doit}, + {.msgtype = RTM_SETDCB, .doit = dcb_doit}, +}; + static int __init dcbnl_init(void) { int err; @@ -2416,8 +2421,7 @@ static int __init dcbnl_init(void) if (err) return err; - rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0); + rtnl_register_many(dcbnl_rtnl_msg_handlers); return 0; } diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h index c7a8e13f917c..a9f064ab9ed9 100644 --- a/net/devlink/devl_internal.h +++ b/net/devlink/devl_internal.h @@ -166,7 +166,7 @@ int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb, static inline struct devlink_nl_dump_state * devlink_dump_state(struct netlink_callback *cb) { - NL_ASSERT_DUMP_CTX_FITS(struct devlink_nl_dump_state); + NL_ASSERT_CTX_FITS(struct devlink_nl_dump_state); return (struct devlink_nl_dump_state *)cb->ctx; } diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 1664547deffd..5a7c0e565a89 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -1505,14 +1505,6 @@ static int dsa_switch_probe(struct dsa_switch *ds) if (!ds->num_ports) return -EINVAL; - if (ds->phylink_mac_ops) { - if (ds->ops->phylink_mac_select_pcs || - ds->ops->phylink_mac_config || - ds->ops->phylink_mac_link_down || - ds->ops->phylink_mac_link_up) - return -EINVAL; - } - if (np) { err = dsa_switch_parse_of(ds, np); if (err) diff --git a/net/dsa/port.c b/net/dsa/port.c index 25258b33e59e..ee0aaec4c8e0 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -1575,44 +1575,16 @@ void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp, cpu_dp->tag_ops = tag_ops; } -static struct phylink_pcs * -dsa_port_phylink_mac_select_pcs(struct phylink_config *config, - phy_interface_t interface) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->phylink_mac_select_pcs) - pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); - - return pcs; -} - static void dsa_port_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct dsa_port *dp = dsa_phylink_to_port(config); - struct dsa_switch *ds = dp->ds; - - if (!ds->ops->phylink_mac_config) - return; - - ds->ops->phylink_mac_config(ds, dp->index, mode, state); } static void dsa_port_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct dsa_port *dp = dsa_phylink_to_port(config); - struct dsa_switch *ds = dp->ds; - - if (!ds->ops->phylink_mac_link_down) - return; - - ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); } static void dsa_port_phylink_mac_link_up(struct phylink_config *config, @@ -1622,18 +1594,9 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct dsa_port *dp = dsa_phylink_to_port(config); - struct dsa_switch *ds = dp->ds; - - if (!ds->ops->phylink_mac_link_up) - return; - - ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, - speed, duplex, tx_pause, rx_pause); } static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { - .mac_select_pcs = dsa_port_phylink_mac_select_pcs, .mac_config = dsa_port_phylink_mac_config, .mac_link_down = dsa_port_phylink_mac_link_down, .mac_link_up = dsa_port_phylink_mac_link_up, @@ -1871,9 +1834,6 @@ static void dsa_shared_port_link_down(struct dsa_port *dp) if (ds->phylink_mac_ops && ds->phylink_mac_ops->mac_link_down) ds->phylink_mac_ops->mac_link_down(&dp->pl_config, MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); - else if (ds->ops->phylink_mac_link_down) - ds->ops->phylink_mac_link_down(ds, dp->index, MLO_AN_FIXED, - PHY_INTERFACE_MODE_NA); } int dsa_shared_port_link_register_of(struct dsa_port *dp) diff --git a/net/dsa/user.c b/net/dsa/user.c index 64f660d2334b..91a1fa5f8ab0 100644 --- a/net/dsa/user.c +++ b/net/dsa/user.c @@ -1308,8 +1308,7 @@ static int dsa_user_set_pauseparam(struct net_device *dev, } #ifdef CONFIG_NET_POLL_CONTROLLER -static int dsa_user_netpoll_setup(struct net_device *dev, - struct netpoll_info *ni) +static int dsa_user_netpoll_setup(struct net_device *dev) { struct net_device *conduit = dsa_user_to_conduit(dev); struct dsa_user_priv *p = netdev_priv(dev); diff --git a/net/ethtool/cmis.h b/net/ethtool/cmis.h index 3e7c293af78c..1e790413db0e 100644 --- a/net/ethtool/cmis.h +++ b/net/ethtool/cmis.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ #define ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH 120 +#define ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH 2048 #define ETHTOOL_CMIS_CDB_CMD_PAGE 0x9F #define ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR 0x50 @@ -23,6 +24,7 @@ enum ethtool_cmis_cdb_cmd_id { ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 0x0041, ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 0x0101, ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 0x0103, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL = 0x0104, ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 0x0107, ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 0x0109, ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 0x010A, @@ -38,6 +40,7 @@ enum ethtool_cmis_cdb_cmd_id { * @resv1: Added to match the CMIS standard request continuity. * @resv2: Added to match the CMIS standard request continuity. * @payload: Payload for the CDB commands. + * @epl: Extended payload for the CDB commands. */ struct ethtool_cmis_cdb_request { __be16 id; @@ -49,6 +52,7 @@ struct ethtool_cmis_cdb_request { u8 resv2; u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH]; ); + u8 *epl; /* Everything above this field checksummed. */ }; #define CDB_F_COMPLETION_VALID BIT(0) @@ -96,13 +100,15 @@ struct ethtool_cmis_cdb_rpl { u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH]; }; -u32 ethtool_cmis_get_max_payload_size(u8 num_of_byte_octs); +u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs); +u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs); void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args, - enum ethtool_cmis_cdb_cmd_id cmd, u8 *pl, - u8 lpl_len, u16 max_duration, - u8 read_write_len_ext, u16 msleep_pre_rpl, - u8 rpl_exp_len, u8 flags); + enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl, + u8 lpl_len, u8 *epl, u16 epl_len, + u16 max_duration, u8 read_write_len_ext, + u16 msleep_pre_rpl, u8 rpl_exp_len, + u8 flags); void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags); diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c index 4d5581147952..d159dc121bde 100644 --- a/net/ethtool/cmis_cdb.c +++ b/net/ethtool/cmis_cdb.c @@ -11,25 +11,41 @@ * min(i, 15) byte octets where i specifies the allowable additional number of * byte octets in a READ or a WRITE. */ -u32 ethtool_cmis_get_max_payload_size(u8 num_of_byte_octs) +u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs) { return 8 * (1 + min_t(u8, num_of_byte_octs, 15)); } +/* For accessing the EPL field on page 9Fh, the allowable length extension is + * min(i, 255) byte octets where i specifies the allowable additional number of + * byte octets in a READ or a WRITE. + */ +u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs) +{ + return 8 * (1 + min_t(u8, num_of_byte_octs, 255)); +} + void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args, - enum ethtool_cmis_cdb_cmd_id cmd, u8 *pl, - u8 lpl_len, u16 max_duration, - u8 read_write_len_ext, u16 msleep_pre_rpl, - u8 rpl_exp_len, u8 flags) + enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl, + u8 lpl_len, u8 *epl, u16 epl_len, + u16 max_duration, u8 read_write_len_ext, + u16 msleep_pre_rpl, u8 rpl_exp_len, u8 flags) { args->req.id = cpu_to_be16(cmd); args->req.lpl_len = lpl_len; - if (pl) - memcpy(args->req.payload, pl, args->req.lpl_len); + if (lpl) { + memcpy(args->req.payload, lpl, args->req.lpl_len); + args->read_write_len_ext = + ethtool_cmis_get_max_lpl_size(read_write_len_ext); + } + if (epl) { + args->req.epl_len = cpu_to_be16(epl_len); + args->req.epl = epl; + args->read_write_len_ext = + ethtool_cmis_get_max_epl_size(read_write_len_ext); + } args->max_duration = max_duration; - args->read_write_len_ext = - ethtool_cmis_get_max_payload_size(read_write_len_ext); args->msleep_pre_rpl = msleep_pre_rpl; args->rpl_exp_len = rpl_exp_len; args->flags = flags; @@ -183,7 +199,7 @@ cmis_cdb_validate_password(struct ethtool_cmis_cdb *cdb, } ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS, - (u8 *)&qs_pl, sizeof(qs_pl), 0, + (u8 *)&qs_pl, sizeof(qs_pl), NULL, 0, 0, cdb->read_write_len_ext, 1000, sizeof(*rpl), CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); @@ -245,8 +261,9 @@ static int cmis_cdb_module_features_get(struct ethtool_cmis_cdb *cdb, ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags); ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES, - NULL, 0, 0, cdb->read_write_len_ext, - 1000, sizeof(*rpl), flags); + NULL, 0, NULL, 0, 0, + cdb->read_write_len_ext, 1000, + sizeof(*rpl), flags); err = ethtool_cmis_cdb_execute_cmd(dev, &args); if (err < 0) { @@ -546,6 +563,49 @@ __ethtool_cmis_cdb_execute_cmd(struct net_device *dev, return err; } +#define CMIS_CDB_EPL_PAGE_START 0xA0 +#define CMIS_CDB_EPL_PAGE_END 0xAF +#define CMIS_CDB_EPL_FW_BLOCK_OFFSET_START 128 +#define CMIS_CDB_EPL_FW_BLOCK_OFFSET_END 255 + +static int +ethtool_cmis_cdb_execute_epl_cmd(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args, + struct ethtool_module_eeprom *page_data) +{ + u16 epl_len = be16_to_cpu(args->req.epl_len); + u32 bytes_written = 0; + u8 page; + int err; + + for (page = CMIS_CDB_EPL_PAGE_START; + page <= CMIS_CDB_EPL_PAGE_END && bytes_written < epl_len; page++) { + u16 offset = CMIS_CDB_EPL_FW_BLOCK_OFFSET_START; + + while (offset <= CMIS_CDB_EPL_FW_BLOCK_OFFSET_END && + bytes_written < epl_len) { + u32 bytes_left = epl_len - bytes_written; + u16 space_left, bytes_to_write; + + space_left = CMIS_CDB_EPL_FW_BLOCK_OFFSET_END - offset + 1; + bytes_to_write = min_t(u16, bytes_left, + min_t(u16, space_left, + args->read_write_len_ext)); + + err = __ethtool_cmis_cdb_execute_cmd(dev, page_data, + page, offset, + bytes_to_write, + args->req.epl + bytes_written); + if (err < 0) + return err; + + offset += bytes_to_write; + bytes_written += bytes_to_write; + } + } + return 0; +} + static u8 cmis_cdb_calc_checksum(const void *data, size_t size) { const u8 *bytes = (const u8 *)data; @@ -567,7 +627,9 @@ int ethtool_cmis_cdb_execute_cmd(struct net_device *dev, int err; args->req.chk_code = - cmis_cdb_calc_checksum(&args->req, sizeof(args->req)); + cmis_cdb_calc_checksum(&args->req, + offsetof(struct ethtool_cmis_cdb_request, + epl)); if (args->req.lpl_len > args->read_write_len_ext) { args->err_msg = "LPL length is longer than CDB read write length extension allows"; @@ -589,6 +651,12 @@ int ethtool_cmis_cdb_execute_cmd(struct net_device *dev, if (err < 0) return err; + if (args->req.epl_len) { + err = ethtool_cmis_cdb_execute_epl_cmd(dev, args, &page_data); + if (err < 0) + return err; + } + offset = CMIS_CDB_CMD_ID_OFFSET + offsetof(struct ethtool_cmis_cdb_request, id); err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data, diff --git a/net/ethtool/cmis_fw_update.c b/net/ethtool/cmis_fw_update.c index 655ff5224ffa..48aef6220f00 100644 --- a/net/ethtool/cmis_fw_update.c +++ b/net/ethtool/cmis_fw_update.c @@ -9,6 +9,7 @@ struct cmis_fw_update_fw_mng_features { u8 start_cmd_payload_size; + u8 write_mechanism; u16 max_duration_start; u16 max_duration_write; u16 max_duration_complete; @@ -36,7 +37,9 @@ struct cmis_cdb_fw_mng_features_rpl { }; enum cmis_cdb_fw_write_mechanism { + CMIS_CDB_FW_WRITE_MECHANISM_NONE = 0x00, CMIS_CDB_FW_WRITE_MECHANISM_LPL = 0x01, + CMIS_CDB_FW_WRITE_MECHANISM_EPL = 0x10, CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 0x11, }; @@ -54,7 +57,8 @@ cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb, ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags); ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES, - NULL, 0, cdb->max_completion_time, + NULL, 0, NULL, 0, + cdb->max_completion_time, cdb->read_write_len_ext, 1000, sizeof(*rpl), flags); @@ -67,10 +71,9 @@ cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb, } rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload; - if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL || - rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_BOTH)) { + if (rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_NONE) { ethnl_module_fw_flash_ntf_err(dev, ntf_params, - "Write LPL is not supported", + "CDB write mechanism is not supported", NULL); return -EOPNOTSUPP; } @@ -82,6 +85,10 @@ cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb, */ cdb->read_write_len_ext = rpl->read_write_len_ext; fw_mng->start_cmd_payload_size = rpl->start_cmd_payload_size; + fw_mng->write_mechanism = + rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL ? + CMIS_CDB_FW_WRITE_MECHANISM_LPL : + CMIS_CDB_FW_WRITE_MECHANISM_EPL; fw_mng->max_duration_start = be16_to_cpu(rpl->max_duration_start); fw_mng->max_duration_write = be16_to_cpu(rpl->max_duration_write); fw_mng->max_duration_complete = be16_to_cpu(rpl->max_duration_complete); @@ -122,7 +129,7 @@ cmis_fw_update_start_download(struct ethtool_cmis_cdb *cdb, ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD, - (u8 *)&pl, lpl_len, + (u8 *)&pl, lpl_len, NULL, 0, fw_mng->max_duration_start, cdb->read_write_len_ext, 1000, 0, CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); @@ -148,9 +155,9 @@ struct cmis_cdb_write_fw_block_lpl_pl { }; static int -cmis_fw_update_write_image(struct ethtool_cmis_cdb *cdb, - struct ethtool_cmis_fw_update_params *fw_update, - struct cmis_fw_update_fw_mng_features *fw_mng) +cmis_fw_update_write_image_lpl(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) { u8 start = fw_mng->start_cmd_payload_size; u32 offset, max_block_size, max_lpl_len; @@ -158,7 +165,7 @@ cmis_fw_update_write_image(struct ethtool_cmis_cdb *cdb, int err; max_lpl_len = min_t(u32, - ethtool_cmis_get_max_payload_size(cdb->read_write_len_ext), + ethtool_cmis_get_max_lpl_size(cdb->read_write_len_ext), ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH); max_block_size = max_lpl_len - sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl, @@ -183,7 +190,7 @@ cmis_fw_update_write_image(struct ethtool_cmis_cdb *cdb, ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL, - (u8 *)&pl, lpl_len, + (u8 *)&pl, lpl_len, NULL, 0, fw_mng->max_duration_write, cdb->read_write_len_ext, 1, 0, CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); @@ -201,6 +208,67 @@ cmis_fw_update_write_image(struct ethtool_cmis_cdb *cdb, return 0; } +struct cmis_cdb_write_fw_block_epl_pl { + u8 fw_block[ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH]; +}; + +static int +cmis_fw_update_write_image_epl(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + u8 start = fw_mng->start_cmd_payload_size; + u32 image_size = fw_update->fw->size; + u32 offset, lpl_len; + int err; + + lpl_len = sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl, + block_address); + + for (offset = start; offset < image_size; + offset += ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH) { + struct cmis_cdb_write_fw_block_lpl_pl lpl = { + .block_address = cpu_to_be32(offset - start), + }; + struct cmis_cdb_write_fw_block_epl_pl *epl; + struct ethtool_cmis_cdb_cmd_args args = {}; + u32 epl_len; + + ethnl_module_fw_flash_ntf_in_progress(fw_update->dev, + &fw_update->ntf_params, + offset - start, + image_size); + + epl_len = min_t(u32, ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH, + image_size - offset); + epl = kmalloc_array(epl_len, sizeof(u8), GFP_KERNEL); + if (!epl) + return -ENOMEM; + + memcpy(epl->fw_block, &fw_update->fw->data[offset], epl_len); + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL, + (u8 *)&lpl, lpl_len, (u8 *)epl, + epl_len, + fw_mng->max_duration_write, + cdb->read_write_len_ext, 1, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args); + kfree(epl); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(fw_update->dev, + &fw_update->ntf_params, + "Write FW block EPL command failed", + args.err_msg); + return err; + } + } + + return 0; +} + static int cmis_fw_update_complete_download(struct ethtool_cmis_cdb *cdb, struct net_device *dev, @@ -212,7 +280,8 @@ cmis_fw_update_complete_download(struct ethtool_cmis_cdb *cdb, ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD, - NULL, 0, fw_mng->max_duration_complete, + NULL, 0, NULL, 0, + fw_mng->max_duration_complete, cdb->read_write_len_ext, 1000, 0, CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); @@ -236,9 +305,15 @@ cmis_fw_update_download_image(struct ethtool_cmis_cdb *cdb, if (err < 0) return err; - err = cmis_fw_update_write_image(cdb, fw_update, fw_mng); - if (err < 0) - return err; + if (fw_mng->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL) { + err = cmis_fw_update_write_image_lpl(cdb, fw_update, fw_mng); + if (err < 0) + return err; + } else { + err = cmis_fw_update_write_image_epl(cdb, fw_update, fw_mng); + if (err < 0) + return err; + } err = cmis_fw_update_complete_download(cdb, fw_update->dev, fw_mng, &fw_update->ntf_params); @@ -294,7 +369,7 @@ cmis_fw_update_run_image(struct ethtool_cmis_cdb *cdb, struct net_device *dev, int err; ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE, - (u8 *)&pl, sizeof(pl), + (u8 *)&pl, sizeof(pl), NULL, 0, cdb->max_completion_time, cdb->read_write_len_ext, 1000, 0, CDB_F_MODULE_STATE_VALID); @@ -326,7 +401,8 @@ cmis_fw_update_commit_image(struct ethtool_cmis_cdb *cdb, ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE, - NULL, 0, cdb->max_completion_time, + NULL, 0, NULL, 0, + cdb->max_completion_time, cdb->read_write_len_ext, 1000, 0, CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); diff --git a/net/ethtool/common.c b/net/ethtool/common.c index dd345efa114b..0d62363dbd9d 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -684,6 +684,54 @@ int ethtool_check_max_channel(struct net_device *dev, return 0; } +int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_rxnfc *info; + int rc, i, rule_cnt; + + if (!ops->get_rxnfc) + return 0; + + rule_cnt = ethtool_get_rxnfc_rule_count(dev); + if (!rule_cnt) + return 0; + + if (rule_cnt < 0) + return -EINVAL; + + info = kvzalloc(struct_size(info, rule_locs, rule_cnt), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->cmd = ETHTOOL_GRXCLSRLALL; + info->rule_cnt = rule_cnt; + rc = ops->get_rxnfc(dev, info, info->rule_locs); + if (rc) + goto out_free; + + for (i = 0; i < rule_cnt; i++) { + struct ethtool_rxnfc rule_info = { + .cmd = ETHTOOL_GRXCLSRULE, + .fs.location = info->rule_locs[i], + }; + + rc = ops->get_rxnfc(dev, &rule_info, NULL); + if (rc) + goto out_free; + + if (rule_info.fs.flow_type & FLOW_RSS && + rule_info.rss_context == rss_context) { + rc = -EBUSY; + goto out_free; + } + } + +out_free: + kvfree(info); + return rc; +} + int ethtool_check_ops(const struct ethtool_ops *ops) { if (WARN_ON(ops->set_coalesce && !ops->supported_coalesce_params)) diff --git a/net/ethtool/common.h b/net/ethtool/common.h index d55d5201b085..4a2de3ce7354 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -47,6 +47,7 @@ bool convert_legacy_settings_to_link_ksettings( int ethtool_check_max_channel(struct net_device *dev, struct ethtool_channels channels, struct genl_info *info); +int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context); int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info); extern const struct ethtool_phy_ops *ethtool_phy_ops; diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 65cfe76dafbe..5cc131cdb1bc 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1462,6 +1462,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, mutex_lock(&dev->ethtool->rss_lock); locked = true; } + + if (rxfh.rss_context && rxfh_dev.rss_delete) { + ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context); + if (ret) + goto out; + } + if (create) { if (rxfh_dev.rss_delete) { ret = -EINVAL; @@ -1505,6 +1512,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, extack); /* Make sure driver populates defaults */ WARN_ON_ONCE(!ret && !rxfh_dev.key && + ops->rxfh_per_ctx_key && !memchr_inv(ethtool_rxfh_context_key(ctx), 0, ctx->key_size)); } else if (rxfh_dev.rss_delete) { diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c index e07386275e14..7cb106b590ab 100644 --- a/net/ethtool/rss.c +++ b/net/ethtool/rss.c @@ -224,7 +224,7 @@ struct rss_nl_dump_ctx { static struct rss_nl_dump_ctx *rss_dump_ctx(struct netlink_callback *cb) { - NL_ASSERT_DUMP_CTX_FITS(struct rss_nl_dump_ctx); + NL_ASSERT_CTX_FITS(struct rss_nl_dump_ctx); return (struct rss_nl_dump_ctx *)cb->ctx; } diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index f6ff0b61e08a..6f09b9512484 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -128,9 +128,9 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head) { struct hsr_priv *hsr = netdev_priv(dev); - del_timer_sync(&hsr->prune_timer); - del_timer_sync(&hsr->prune_proxy_timer); - del_timer_sync(&hsr->announce_timer); + timer_delete_sync(&hsr->prune_timer); + timer_delete_sync(&hsr->prune_proxy_timer); + timer_delete_sync(&hsr->announce_timer); timer_delete_sync(&hsr->announce_proxy_timer); hsr_debugfs_term(hsr); diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 990a83455dcf..18d267921bb5 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -1043,19 +1043,21 @@ static int ieee802154_create(struct net *net, struct socket *sock, if (sk->sk_prot->hash) { rc = sk->sk_prot->hash(sk); - if (rc) { - sk_common_release(sk); - goto out; - } + if (rc) + goto out_sk_release; } if (sk->sk_prot->init) { rc = sk->sk_prot->init(sk); if (rc) - sk_common_release(sk); + goto out_sk_release; } out: return rc; +out_sk_release: + sk_common_release(sk); + sock->sk = NULL; + goto out; } static const struct net_proto_family ieee802154_family_ops = { diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index b24d74616637..8095e82de808 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -376,32 +376,30 @@ lookup_protocol: inet->inet_sport = htons(inet->inet_num); /* Add to protocol hash chains. */ err = sk->sk_prot->hash(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (sk->sk_prot->init) { err = sk->sk_prot->init(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (!kern) { err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } out: return err; out_rcu_unlock: rcu_read_unlock(); goto out; +out_sk_release: + sk_common_release(sk); + sock->sk = NULL; + goto out; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 7cf5f7d0d0de..5f859d01cbbe 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -119,11 +119,9 @@ struct inet_fill_args { #define IN4_ADDR_HSIZE_SHIFT 8 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT) -static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; - static u32 inet_addr_hash(const struct net *net, __be32 addr) { - u32 val = (__force u32) addr ^ net_hash_mix(net); + u32 val = __ipv4_addr_hash(addr, net_hash_mix(net)); return hash_32(val, IN4_ADDR_HSIZE_SHIFT); } @@ -133,13 +131,13 @@ static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa) u32 hash = inet_addr_hash(net, ifa->ifa_local); ASSERT_RTNL(); - hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); + hlist_add_head_rcu(&ifa->addr_lst, &net->ipv4.inet_addr_lst[hash]); } static void inet_hash_remove(struct in_ifaddr *ifa) { ASSERT_RTNL(); - hlist_del_init_rcu(&ifa->hash); + hlist_del_init_rcu(&ifa->addr_lst); } /** @@ -186,9 +184,8 @@ struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr) u32 hash = inet_addr_hash(net, addr); struct in_ifaddr *ifa; - hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) - if (ifa->ifa_local == addr && - net_eq(dev_net(ifa->ifa_dev->dev), net)) + hlist_for_each_entry_rcu(ifa, &net->ipv4.inet_addr_lst[hash], addr_lst) + if (ifa->ifa_local == addr) return ifa; return NULL; @@ -227,7 +224,7 @@ static struct in_ifaddr *inet_alloc_ifa(struct in_device *in_dev) in_dev_hold(in_dev); ifa->ifa_dev = in_dev; - INIT_HLIST_NODE(&ifa->hash); + INIT_HLIST_NODE(&ifa->addr_lst); return ifa; } @@ -499,15 +496,12 @@ static void inet_del_ifa(struct in_device *in_dev, __inet_del_ifa(in_dev, ifap, destroy, NULL, 0); } -static void check_lifetime(struct work_struct *work); - -static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime); - static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, u32 portid, struct netlink_ext_ack *extack) { struct in_ifaddr __rcu **last_primary, **ifap; struct in_device *in_dev = ifa->ifa_dev; + struct net *net = dev_net(in_dev->dev); struct in_validator_info ivi; struct in_ifaddr *ifa1; int ret; @@ -576,8 +570,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, inet_hash_insert(dev_net(in_dev->dev), ifa); - cancel_delayed_work(&check_lifetime_work); - queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); + cancel_delayed_work(&net->ipv4.addr_chk_work); + queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, 0); /* Send message first, then call notifier. Notifier will trigger FIB update, so that @@ -723,16 +717,19 @@ static void check_lifetime(struct work_struct *work) unsigned long now, next, next_sec, next_sched; struct in_ifaddr *ifa; struct hlist_node *n; + struct net *net; int i; + net = container_of(to_delayed_work(work), struct net, ipv4.addr_chk_work); now = jiffies; next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); for (i = 0; i < IN4_ADDR_HSIZE; i++) { + struct hlist_head *head = &net->ipv4.inet_addr_lst[i]; bool change_needed = false; rcu_read_lock(); - hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { + hlist_for_each_entry_rcu(ifa, head, addr_lst) { unsigned long age, tstamp; u32 preferred_lft; u32 valid_lft; @@ -770,7 +767,7 @@ static void check_lifetime(struct work_struct *work) if (!change_needed) continue; rtnl_lock(); - hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { + hlist_for_each_entry_safe(ifa, n, head, addr_lst) { unsigned long age; if (ifa->ifa_flags & IFA_F_PERMANENT) @@ -819,8 +816,8 @@ static void check_lifetime(struct work_struct *work) if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX)) next_sched = now + ADDRCONF_TIMER_FUZZ_MAX; - queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, - next_sched - now); + queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, + next_sched - now); } static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft, @@ -1017,9 +1014,9 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, ifa->ifa_proto = new_proto; set_ifa_lifetime(ifa, valid_lft, prefered_lft); - cancel_delayed_work(&check_lifetime_work); + cancel_delayed_work(&net->ipv4.addr_chk_work); queue_delayed_work(system_power_efficient_wq, - &check_lifetime_work, 0); + &net->ipv4.addr_chk_work, 0); rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); } return 0; @@ -2678,14 +2675,21 @@ static struct ctl_table ctl_forward_entry[] = { static __net_init int devinet_init_net(struct net *net) { - int err; - struct ipv4_devconf *all, *dflt; #ifdef CONFIG_SYSCTL - struct ctl_table *tbl; struct ctl_table_header *forw_hdr; + struct ctl_table *tbl; #endif + struct ipv4_devconf *all, *dflt; + int err; + int i; err = -ENOMEM; + net->ipv4.inet_addr_lst = kmalloc_array(IN4_ADDR_HSIZE, + sizeof(struct hlist_head), + GFP_KERNEL); + if (!net->ipv4.inet_addr_lst) + goto err_alloc_hash; + all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL); if (!all) goto err_alloc_all; @@ -2746,6 +2750,11 @@ static __net_init int devinet_init_net(struct net *net) net->ipv4.forw_hdr = forw_hdr; #endif + for (i = 0; i < IN4_ADDR_HSIZE; i++) + INIT_HLIST_HEAD(&net->ipv4.inet_addr_lst[i]); + + INIT_DEFERRABLE_WORK(&net->ipv4.addr_chk_work, check_lifetime); + net->ipv4.devconf_all = all; net->ipv4.devconf_dflt = dflt; return 0; @@ -2763,6 +2772,8 @@ err_alloc_ctl: err_alloc_dflt: kfree(all); err_alloc_all: + kfree(net->ipv4.inet_addr_lst); +err_alloc_hash: return err; } @@ -2770,7 +2781,11 @@ static __net_exit void devinet_exit_net(struct net *net) { #ifdef CONFIG_SYSCTL const struct ctl_table *tbl; +#endif + cancel_delayed_work_sync(&net->ipv4.addr_chk_work); + +#ifdef CONFIG_SYSCTL tbl = net->ipv4.forw_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.forw_hdr); __devinet_sysctl_unregister(net, net->ipv4.devconf_dflt, @@ -2781,6 +2796,7 @@ static __net_exit void devinet_exit_net(struct net *net) #endif kfree(net->ipv4.devconf_dflt); kfree(net->ipv4.devconf_all); + kfree(net->ipv4.inet_addr_lst); } static __net_initdata struct pernet_operations devinet_ops = { @@ -2796,25 +2812,23 @@ static struct rtnl_af_ops inet_af_ops __read_mostly = { .set_link_af = inet_set_link_af, }; +static const struct rtnl_msg_handler devinet_rtnl_msg_handlers[] __initconst = { + {.protocol = PF_INET, .msgtype = RTM_NEWADDR, .doit = inet_rtm_newaddr}, + {.protocol = PF_INET, .msgtype = RTM_DELADDR, .doit = inet_rtm_deladdr}, + {.protocol = PF_INET, .msgtype = RTM_GETADDR, .dumpit = inet_dump_ifaddr, + .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, + {.protocol = PF_INET, .msgtype = RTM_GETNETCONF, + .doit = inet_netconf_get_devconf, .dumpit = inet_netconf_dump_devconf, + .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, +}; + void __init devinet_init(void) { - int i; - - for (i = 0; i < IN4_ADDR_HSIZE; i++) - INIT_HLIST_HEAD(&inet_addr_lst[i]); - register_pernet_subsys(&devinet_ops); register_netdevice_notifier(&ip_netdev_notifier); - queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); - - rtnl_af_register(&inet_af_ops); + if (rtnl_af_register(&inet_af_ops)) + panic("Unable to register inet_af_ops\n"); - rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0); - rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0); - rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, - RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE); - rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, - inet_netconf_dump_devconf, - RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED); + rtnl_register_many(devinet_rtnl_msg_handlers); } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 793e6781399a..53bd26315df5 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(fib_info_nh_uses_dev); * called with rcu_read_lock() */ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, - u8 tos, int oif, struct net_device *dev, + dscp_t dscp, int oif, struct net_device *dev, int rpf, struct in_device *idev, u32 *itag) { struct net *net = dev_net(dev); @@ -357,7 +357,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX; fl4.daddr = src; fl4.saddr = dst; - fl4.flowi4_tos = tos; + fl4.flowi4_tos = inet_dscp_to_dsfield(dscp); fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_tun_key.tun_id = 0; fl4.flowi4_flags = 0; @@ -419,7 +419,7 @@ e_rpf: /* Ignore rp_filter for packets protected by IPsec. */ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, - u8 tos, int oif, struct net_device *dev, + dscp_t dscp, int oif, struct net_device *dev, struct in_device *idev, u32 *itag) { int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); @@ -448,7 +448,8 @@ ok: } full_check: - return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag); + return __fib_validate_source(skb, src, dst, dscp, oif, dev, r, idev, + itag); } static inline __be32 sk_extract_addr(struct sockaddr *addr) @@ -1648,6 +1649,15 @@ static struct pernet_operations fib_net_ops = { .exit_batch = fib_net_exit_batch, }; +static const struct rtnl_msg_handler fib_rtnl_msg_handlers[] __initconst = { + {.protocol = PF_INET, .msgtype = RTM_NEWROUTE, + .doit = inet_rtm_newroute}, + {.protocol = PF_INET, .msgtype = RTM_DELROUTE, + .doit = inet_rtm_delroute}, + {.protocol = PF_INET, .msgtype = RTM_GETROUTE, .dumpit = inet_dump_fib, + .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, +}; + void __init ip_fib_init(void) { fib_trie_init(); @@ -1657,8 +1667,5 @@ void __init ip_fib_init(void) register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); - rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0); - rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0); - rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, - RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE); + rtnl_register_many(fib_rtnl_msg_handlers); } diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c index 0e23ade74493..b1551c26554b 100644 --- a/net/ipv4/fib_notifier.c +++ b/net/ipv4/fib_notifier.c @@ -22,15 +22,15 @@ int call_fib4_notifiers(struct net *net, enum fib_event_type event_type, ASSERT_RTNL(); info->family = AF_INET; - net->ipv4.fib_seq++; + /* Paired with READ_ONCE() in fib4_seq_read() */ + WRITE_ONCE(net->ipv4.fib_seq, net->ipv4.fib_seq + 1); return call_fib_notifiers(net, event_type, info); } -static unsigned int fib4_seq_read(struct net *net) +static unsigned int fib4_seq_read(const struct net *net) { - ASSERT_RTNL(); - - return net->ipv4.fib_seq + fib4_rules_seq_read(net); + /* Paired with WRITE_ONCE() in call_fib4_notifiers() */ + return READ_ONCE(net->ipv4.fib_seq) + fib4_rules_seq_read(net); } static int fib4_dump(struct net *net, struct notifier_block *nb, diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index b07292d50ee7..8325224ef072 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -74,7 +74,7 @@ int fib4_rules_dump(struct net *net, struct notifier_block *nb, return fib_rules_dump(net, nb, AF_INET, extack); } -unsigned int fib4_rules_seq_read(struct net *net) +unsigned int fib4_rules_seq_read(const struct net *net) { return fib_rules_seq_read(net, AF_INET); } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ba2df3d2ac15..d2cee5c314f5 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -50,17 +50,12 @@ #include "fib_lookup.h" -static DEFINE_SPINLOCK(fib_info_lock); static struct hlist_head *fib_info_hash; static struct hlist_head *fib_info_laddrhash; static unsigned int fib_info_hash_size; static unsigned int fib_info_hash_bits; static unsigned int fib_info_cnt; -#define DEVINDEX_HASHBITS 8 -#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) -static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; - /* for_nexthops and change_nexthops only used when nexthop object * is not set in a fib_info. The logic within can reference fib_nh. */ @@ -260,12 +255,11 @@ EXPORT_SYMBOL_GPL(free_fib_info); void fib_release_info(struct fib_info *fi) { - spin_lock_bh(&fib_info_lock); + ASSERT_RTNL(); if (fi && refcount_dec_and_test(&fi->fib_treeref)) { hlist_del(&fi->fib_hash); - /* Paired with READ_ONCE() in fib_create_info(). */ - WRITE_ONCE(fib_info_cnt, fib_info_cnt - 1); + fib_info_cnt--; if (fi->fib_prefsrc) hlist_del(&fi->fib_lhash); @@ -275,14 +269,13 @@ void fib_release_info(struct fib_info *fi) change_nexthops(fi) { if (!nexthop_nh->fib_nh_dev) continue; - hlist_del(&nexthop_nh->nh_hash); + hlist_del_rcu(&nexthop_nh->nh_hash); } endfor_nexthops(fi) } /* Paired with READ_ONCE() from fib_table_lookup() */ WRITE_ONCE(fi->fib_dead, 1); fib_info_put(fi); } - spin_unlock_bh(&fib_info_lock); } static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi) @@ -322,17 +315,9 @@ static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi) return 0; } -static inline unsigned int fib_devindex_hashfn(unsigned int val) -{ - return hash_32(val, DEVINDEX_HASHBITS); -} - -static struct hlist_head * -fib_info_devhash_bucket(const struct net_device *dev) +static struct hlist_head *fib_nh_head(struct net_device *dev) { - u32 val = net_hash_mix(dev_net(dev)) ^ dev->ifindex; - - return &fib_info_devhash[fib_devindex_hashfn(val)]; + return &dev->fib_nh_head; } static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope, @@ -347,11 +332,10 @@ static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope, return val; } -static unsigned int fib_info_hashfn_result(unsigned int val) +static unsigned int fib_info_hashfn_result(const struct net *net, + unsigned int val) { - unsigned int mask = (fib_info_hash_size - 1); - - return (val ^ (val >> 7) ^ (val >> 12)) & mask; + return hash_32(val ^ net_hash_mix(net), fib_info_hash_bits); } static inline unsigned int fib_info_hashfn(struct fib_info *fi) @@ -363,14 +347,14 @@ static inline unsigned int fib_info_hashfn(struct fib_info *fi) fi->fib_priority); if (fi->nh) { - val ^= fib_devindex_hashfn(fi->nh->id); + val ^= fi->nh->id; } else { for_nexthops(fi) { - val ^= fib_devindex_hashfn(nh->fib_nh_oif); + val ^= nh->fib_nh_oif; } endfor_nexthops(fi) } - return fib_info_hashfn_result(val); + return fib_info_hashfn_result(fi->fib_net, val); } /* no metrics, only nexthop id */ @@ -381,11 +365,11 @@ static struct fib_info *fib_find_info_nh(struct net *net, struct fib_info *fi; unsigned int hash; - hash = fib_info_hashfn_1(fib_devindex_hashfn(cfg->fc_nh_id), + hash = fib_info_hashfn_1(cfg->fc_nh_id, cfg->fc_protocol, cfg->fc_scope, (__force u32)cfg->fc_prefsrc, cfg->fc_priority); - hash = fib_info_hashfn_result(hash); + hash = fib_info_hashfn_result(net, hash); head = &fib_info_hash[hash]; hlist_for_each_entry(fi, head, fib_hash) { @@ -437,28 +421,23 @@ static struct fib_info *fib_find_info(struct fib_info *nfi) } /* Check, that the gateway is already configured. - * Used only by redirect accept routine. + * Used only by redirect accept routine, under rcu_read_lock(); */ int ip_fib_check_default(__be32 gw, struct net_device *dev) { struct hlist_head *head; struct fib_nh *nh; - spin_lock(&fib_info_lock); - - head = fib_info_devhash_bucket(dev); + head = fib_nh_head(dev); - hlist_for_each_entry(nh, head, nh_hash) { - if (nh->fib_nh_dev == dev && - nh->fib_nh_gw4 == gw && + hlist_for_each_entry_rcu(nh, head, nh_hash) { + DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev); + if (nh->fib_nh_gw4 == gw && !(nh->fib_nh_flags & RTNH_F_DEAD)) { - spin_unlock(&fib_info_lock); return 0; } } - spin_unlock(&fib_info_lock); - return -1; } @@ -1277,7 +1256,7 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash, unsigned int old_size = fib_info_hash_size; unsigned int i; - spin_lock_bh(&fib_info_lock); + ASSERT_RTNL(); old_info_hash = fib_info_hash; old_laddrhash = fib_info_laddrhash; fib_info_hash_size = new_size; @@ -1314,8 +1293,6 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash, } } - spin_unlock_bh(&fib_info_lock); - kvfree(old_info_hash); kvfree(old_laddrhash); } @@ -1391,6 +1368,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, int nhs = 1; struct net *net = cfg->fc_nlinfo.nl_net; + ASSERT_RTNL(); if (cfg->fc_type > RTN_MAX) goto err_inval; @@ -1433,8 +1411,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, err = -ENOBUFS; - /* Paired with WRITE_ONCE() in fib_release_info() */ - if (READ_ONCE(fib_info_cnt) >= fib_info_hash_size) { + if (fib_info_cnt >= fib_info_hash_size) { unsigned int new_size = fib_info_hash_size << 1; struct hlist_head *new_info_hash; struct hlist_head *new_laddrhash; @@ -1593,7 +1570,7 @@ link_it: refcount_set(&fi->fib_treeref, 1); refcount_set(&fi->fib_clntref, 1); - spin_lock_bh(&fib_info_lock); + fib_info_cnt++; hlist_add_head(&fi->fib_hash, &fib_info_hash[fib_info_hashfn(fi)]); @@ -1611,11 +1588,10 @@ link_it: if (!nexthop_nh->fib_nh_dev) continue; - head = fib_info_devhash_bucket(nexthop_nh->fib_nh_dev); - hlist_add_head(&nexthop_nh->nh_hash, head); + head = fib_nh_head(nexthop_nh->fib_nh_dev); + hlist_add_head_rcu(&nexthop_nh->nh_hash, head); } endfor_nexthops(fi) } - spin_unlock_bh(&fib_info_lock); return fi; err_inval: @@ -1965,12 +1941,12 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig) void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) { - struct hlist_head *head = fib_info_devhash_bucket(dev); + struct hlist_head *head = fib_nh_head(dev); struct fib_nh *nh; hlist_for_each_entry(nh, head, nh_hash) { - if (nh->fib_nh_dev == dev) - fib_nhc_update_mtu(&nh->nh_common, dev->mtu, orig_mtu); + DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev); + fib_nhc_update_mtu(&nh->nh_common, dev->mtu, orig_mtu); } } @@ -1984,7 +1960,7 @@ void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) */ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) { - struct hlist_head *head = fib_info_devhash_bucket(dev); + struct hlist_head *head = fib_nh_head(dev); struct fib_info *prev_fi = NULL; int scope = RT_SCOPE_NOWHERE; struct fib_nh *nh; @@ -1998,7 +1974,8 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) int dead; BUG_ON(!fi->fib_nhs); - if (nh->fib_nh_dev != dev || fi == prev_fi) + DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev); + if (fi == prev_fi) continue; prev_fi = fi; dead = 0; @@ -2148,7 +2125,7 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags) } prev_fi = NULL; - head = fib_info_devhash_bucket(dev); + head = fib_nh_head(dev); ret = 0; hlist_for_each_entry(nh, head, nh_hash) { @@ -2156,7 +2133,8 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags) int alive; BUG_ON(!fi->fib_nhs); - if (nh->fib_nh_dev != dev || fi == prev_fi) + DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev); + if (fi == prev_fi) continue; prev_fi = fi; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 09e31757e96c..161f5526b86c 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -292,15 +292,9 @@ static const int inflate_threshold = 50; static const int halve_threshold_root = 15; static const int inflate_threshold_root = 30; -static void __alias_free_mem(struct rcu_head *head) -{ - struct fib_alias *fa = container_of(head, struct fib_alias, rcu); - kmem_cache_free(fn_alias_kmem, fa); -} - static inline void alias_free_mem_rcu(struct fib_alias *fa) { - call_rcu(&fa->rcu, __alias_free_mem); + kfree_rcu(fa, rcu); } #define TNODE_VMALLOC_MAX \ diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c index 98b90107b5ab..3d9614609b2d 100644 --- a/net/ipv4/fou_nl.c +++ b/net/ipv4/fou_nl.c @@ -12,7 +12,7 @@ /* Global operation policy for fou */ const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = { - [FOU_ATTR_PORT] = { .type = NLA_U16, }, + [FOU_ATTR_PORT] = { .type = NLA_BE16, }, [FOU_ATTR_AF] = { .type = NLA_U8, }, [FOU_ATTR_IPPROTO] = { .type = NLA_U8, }, [FOU_ATTR_TYPE] = { .type = NLA_U8, }, @@ -21,7 +21,7 @@ const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = { [FOU_ATTR_LOCAL_V6] = { .len = 16, }, [FOU_ATTR_PEER_V4] = { .type = NLA_U32, }, [FOU_ATTR_PEER_V6] = { .len = 16, }, - [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, }, + [FOU_ATTR_PEER_PORT] = { .type = NLA_BE16, }, [FOU_ATTR_IFINDEX] = { .type = NLA_S32, }, }; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index e1384e7331d8..23664434922e 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -478,13 +478,11 @@ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) return route_lookup_dev; } -static struct rtable *icmp_route_lookup(struct net *net, - struct flowi4 *fl4, +static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, - const struct iphdr *iph, - __be32 saddr, u8 tos, u32 mark, - int type, int code, - struct icmp_bxm *param) + const struct iphdr *iph, __be32 saddr, + dscp_t dscp, u32 mark, int type, + int code, struct icmp_bxm *param) { struct net_device *route_lookup_dev; struct dst_entry *dst, *dst2; @@ -498,7 +496,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->saddr = saddr; fl4->flowi4_mark = mark; fl4->flowi4_uid = sock_net_uid(net, NULL); - fl4->flowi4_tos = tos & INET_DSCP_MASK; + fl4->flowi4_tos = inet_dscp_to_dsfield(dscp); fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; @@ -547,7 +545,7 @@ static struct rtable *icmp_route_lookup(struct net *net, orefdst = skb_in->_skb_refdst; /* save old refdst */ skb_dst_set(skb_in, NULL); err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, - tos, rt2->dst.dev); + dscp, rt2->dst.dev); dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); @@ -741,8 +739,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, ipc.opt = &icmp_param.replyopts.opt; ipc.sockc.mark = mark; - rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, - type, code, &icmp_param); + rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, + inet_dsfield_to_dscp(tos), mark, type, code, + &icmp_param); if (IS_ERR(rt)) goto out_unlock; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 9bf09de6a2e7..6a238398acc9 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1437,16 +1437,32 @@ static void ip_mc_hash_remove(struct in_device *in_dev, static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode, gfp_t gfp) { + struct ip_mc_list __rcu **mc_hash; struct ip_mc_list *im; ASSERT_RTNL(); - for_each_pmc_rtnl(in_dev, im) { - if (im->multiaddr == addr) { - im->users++; - ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0); - goto out; + mc_hash = rtnl_dereference(in_dev->mc_hash); + if (mc_hash) { + u32 hash = hash_32((__force u32)addr, MC_HASH_SZ_LOG); + + for (im = rtnl_dereference(mc_hash[hash]); + im; + im = rtnl_dereference(im->next_hash)) { + if (im->multiaddr == addr) + break; } + } else { + for_each_pmc_rtnl(in_dev, im) { + if (im->multiaddr == addr) + break; + } + } + + if (im) { + im->users++; + ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0); + goto out; } im = kzalloc(sizeof(*im), gfp); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 2b698f8419fe..491c2c6b683e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -775,7 +775,8 @@ void inet_csk_clear_xmit_timers(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - icsk->icsk_pending = icsk->icsk_ack.pending = 0; + smp_store_release(&icsk->icsk_pending, 0); + smp_store_release(&icsk->icsk_ack.pending, 0); sk_stop_timer(sk, &icsk->icsk_retransmit_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer); @@ -790,7 +791,8 @@ void inet_csk_clear_xmit_timers_sync(struct sock *sk) /* ongoing timer handlers need to acquire socket lock. */ sock_not_owned_by_me(sk); - icsk->icsk_pending = icsk->icsk_ack.pending = 0; + smp_store_release(&icsk->icsk_pending, 0); + smp_store_release(&icsk->icsk_ack.pending, 0); sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer); sk_stop_timer_sync(sk, &icsk->icsk_delack_timer); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 67639309163d..321acc8abf17 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -247,6 +247,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct nlmsghdr *nlh; struct nlattr *attr; void *info = NULL; + u8 icsk_pending; int protocol; cb_data = cb->data; @@ -307,14 +308,15 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto out; } - if (icsk->icsk_pending == ICSK_TIME_RETRANS || - icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || - icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + icsk_pending = smp_load_acquire(&icsk->icsk_pending); + if (icsk_pending == ICSK_TIME_RETRANS || + icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk_pending == ICSK_TIME_LOSS_PROBE) { r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; r->idiag_expires = jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies); - } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + } else if (icsk_pending == ICSK_TIME_PROBE0) { r->idiag_timer = 4; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 5bd759963451..5ab56f4cb529 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -128,11 +128,6 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr, return NULL; } -static void inetpeer_free_rcu(struct rcu_head *head) -{ - kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); -} - /* perform garbage collect on all items stacked during a lookup */ static void inet_peer_gc(struct inet_peer_base *base, struct inet_peer *gc_stack[], @@ -168,7 +163,7 @@ static void inet_peer_gc(struct inet_peer_base *base, if (p) { rb_erase(&p->rb_node, &base->rb_root); base->total--; - call_rcu(&p->rcu, inetpeer_free_rcu); + kfree_rcu(p, rcu); } } } @@ -242,7 +237,7 @@ void inet_putpeer(struct inet_peer *p) WRITE_ONCE(p->dtime, (__u32)jiffies); if (refcount_dec_and_test(&p->refcnt)) - call_rcu(&p->rcu, inetpeer_free_rcu); + kfree_rcu(p, rcu); } EXPORT_SYMBOL_GPL(inet_putpeer); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a92664a5ef2e..48e2810f1f27 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -175,8 +175,8 @@ static void ip_expire(struct timer_list *t) /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); - err = ip_route_input_noref(head, iph->daddr, iph->saddr, - iph->tos, head->dev); + err = ip_route_input_noref(head, iph->daddr, iph->saddr, ip4h_dscp(iph), + head->dev); if (err) goto out; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index b6e7d4921309..89bb63da6852 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -325,8 +325,8 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (ip_can_use_hint(skb, iph, hint)) { - err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos, - dev, hint); + err = ip_route_use_hint(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev, hint); if (unlikely(err)) goto drop_error; } @@ -363,7 +363,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, */ if (!skb_valid_dst(skb)) { err = ip_route_input_noref(skb, iph->daddr, iph->saddr, - iph->tos, dev); + ip4h_dscp(iph), dev); if (unlikely(err)) goto drop_error; } else { diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 68aedb8877b9..81e86e5defee 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -617,7 +617,8 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) orefdst = skb->_skb_refdst; skb_dst_set(skb, NULL); - err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev); + err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph), + dev); rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { skb_dst_drop(skb); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 49811c9281d4..0065b1996c94 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -973,7 +973,7 @@ static int __ip_append_data(struct sock *sk, unsigned int maxfraglen, fragheaderlen, maxnonfragsize; int csummode = CHECKSUM_NONE; struct rtable *rt = dst_rtable(cork->dst); - bool paged, hold_tskey, extra_uref = false; + bool paged, hold_tskey = false, extra_uref = false; unsigned int wmem_alloc_delta = 0; u32 tskey = 0; @@ -1049,10 +1049,15 @@ static int __ip_append_data(struct sock *sk, cork->length += length; - hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP && - READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID; - if (hold_tskey) - tskey = atomic_inc_return(&sk->sk_tskey) - 1; + if (cork->tx_flags & SKBTX_ANY_TSTAMP && + READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { + if (cork->flags & IPCORK_TS_OPT_ID) { + tskey = cork->ts_opt_id; + } else { + tskey = atomic_inc_return(&sk->sk_tskey) - 1; + hold_tskey = true; + } + } /* So, what's going on in the loop below? * @@ -1326,7 +1331,11 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->priority = ipc->priority; cork->transmit_time = ipc->sockc.transmit_time; cork->tx_flags = 0; - sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags); + sock_tx_timestamp(sk, &ipc->sockc, &cork->tx_flags); + if (ipc->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) { + cork->flags |= IPCORK_TS_OPT_ID; + cork->ts_opt_id = ipc->sockc.ts_opt_id; + } return 0; } @@ -1587,7 +1596,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset, * Generic function to send a packet as reply to another packet. * Used to send some TCP resets/acks so far. */ -void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, +void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk, + struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, @@ -1653,6 +1663,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; + if (orig_sk) + skb_set_owner_edemux(nskb, (struct sock *)orig_sk); if (transmit_time) nskb->tstamp_type = SKB_CLOCK_MONOTONIC; if (txhash) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 089864c6a35e..b4fc443481ce 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -288,7 +288,7 @@ static int ipmr_rules_dump(struct net *net, struct notifier_block *nb, return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack); } -static unsigned int ipmr_rules_seq_read(struct net *net) +static unsigned int ipmr_rules_seq_read(const struct net *net) { return fib_rules_seq_read(net, RTNL_FAMILY_IPMR); } @@ -346,7 +346,7 @@ static int ipmr_rules_dump(struct net *net, struct notifier_block *nb, return 0; } -static unsigned int ipmr_rules_seq_read(struct net *net) +static unsigned int ipmr_rules_seq_read(const struct net *net) { return 0; } @@ -3035,11 +3035,9 @@ static const struct net_protocol pim_protocol = { }; #endif -static unsigned int ipmr_seq_read(struct net *net) +static unsigned int ipmr_seq_read(const struct net *net) { - ASSERT_RTNL(); - - return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net); + return READ_ONCE(net->ipv4.ipmr_seq) + ipmr_rules_seq_read(net); } static int ipmr_dump(struct net *net, struct notifier_block *nb, @@ -3139,6 +3137,17 @@ static struct pernet_operations ipmr_net_ops = { .exit_batch = ipmr_net_exit_batch, }; +static const struct rtnl_msg_handler ipmr_rtnl_msg_handlers[] __initconst = { + {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETLINK, + .dumpit = ipmr_rtm_dumplink}, + {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_NEWROUTE, + .doit = ipmr_rtm_route}, + {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_DELROUTE, + .doit = ipmr_rtm_route}, + {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETROUTE, + .doit = ipmr_rtm_getroute, .dumpit = ipmr_rtm_dumproute}, +}; + int __init ip_mr_init(void) { int err; @@ -3159,15 +3168,8 @@ int __init ip_mr_init(void) goto add_proto_fail; } #endif - rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, - ipmr_rtm_getroute, ipmr_rtm_dumproute, 0); - rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE, - ipmr_rtm_route, NULL, 0); - rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE, - ipmr_rtm_route, NULL, 0); - - rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK, - NULL, ipmr_rtm_dumplink, 0); + rtnl_register_many(ipmr_rtnl_msg_handlers); + return 0; #ifdef CONFIG_IP_PIMSM_V2 diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 93aaea0006ba..570e450e008c 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -4042,25 +4042,30 @@ static struct pernet_operations nexthop_net_ops = { .exit_batch_rtnl = nexthop_net_exit_batch_rtnl, }; +static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop}, + {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop}, + {.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop, + .dumpit = rtm_dump_nexthop}, + {.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket, + .dumpit = rtm_dump_nexthop_bucket}, + {.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP, + .doit = rtm_new_nexthop}, + {.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP, + .dumpit = rtm_dump_nexthop}, + {.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP, + .doit = rtm_new_nexthop}, + {.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP, + .dumpit = rtm_dump_nexthop}, +}; + static int __init nexthop_init(void) { register_pernet_subsys(&nexthop_net_ops); register_netdevice_notifier(&nh_netdev_notifier); - rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop, - rtm_dump_nexthop, 0); - - rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); - rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); - - rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); - rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); - - rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket, - rtm_dump_nexthop_bucket, 0); + rtnl_register_many(nexthop_rtnl_msg_handlers); return 0; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 474dfd263c8b..0e9e01967ec9 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -370,7 +370,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->ip_summed = CHECKSUM_NONE; - skb_setup_tx_timestamp(skb, sockc->tsflags); + skb_setup_tx_timestamp(skb, sockc); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 723ac9181558..18a08b4f4a5a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1666,7 +1666,7 @@ EXPORT_SYMBOL(rt_dst_clone); /* called in rcu_read_lock() section */ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev, + dscp_t dscp, struct net_device *dev, struct in_device *in_dev, u32 *itag) { int err; @@ -1687,8 +1687,8 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, ip_hdr(skb)->protocol != IPPROTO_IGMP) return -EINVAL; } else { - err = fib_validate_source(skb, saddr, 0, tos, 0, dev, - in_dev, itag); + err = fib_validate_source(skb, saddr, 0, dscp, 0, dev, in_dev, + itag); if (err < 0) return err; } @@ -1697,7 +1697,7 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, /* called in rcu_read_lock() section */ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev, int our) + dscp_t dscp, struct net_device *dev, int our) { struct in_device *in_dev = __in_dev_get_rcu(dev); unsigned int flags = RTCF_MULTICAST; @@ -1705,7 +1705,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, u32 itag = 0; int err; - err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag); + err = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev, + &itag); if (err) return err; @@ -1764,10 +1765,9 @@ static void ip_handle_martian_source(struct net_device *dev, } /* called in rcu_read_lock() section */ -static int __mkroute_input(struct sk_buff *skb, - const struct fib_result *res, - struct in_device *in_dev, - __be32 daddr, __be32 saddr, u32 tos) +static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, + struct in_device *in_dev, __be32 daddr, + __be32 saddr, dscp_t dscp) { struct fib_nh_common *nhc = FIB_RES_NHC(*res); struct net_device *dev = nhc->nhc_dev; @@ -1785,7 +1785,7 @@ static int __mkroute_input(struct sk_buff *skb, return -EINVAL; } - err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), + err = fib_validate_source(skb, saddr, daddr, dscp, FIB_RES_OIF(*res), in_dev->dev, in_dev, &itag); if (err < 0) { ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, @@ -2112,11 +2112,9 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, } #endif /* CONFIG_IP_ROUTE_MULTIPATH */ -static int ip_mkroute_input(struct sk_buff *skb, - struct fib_result *res, - struct in_device *in_dev, - __be32 daddr, __be32 saddr, u32 tos, - struct flow_keys *hkeys) +static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, + struct in_device *in_dev, __be32 daddr, + __be32 saddr, dscp_t dscp, struct flow_keys *hkeys) { #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && fib_info_num_path(res->fi) > 1) { @@ -2128,7 +2126,7 @@ static int ip_mkroute_input(struct sk_buff *skb, #endif /* create a routing cache entry */ - return __mkroute_input(skb, res, in_dev, daddr, saddr, tos); + return __mkroute_input(skb, res, in_dev, daddr, saddr, dscp); } /* Implements all the saddr-related checks as ip_route_input_slow(), @@ -2136,7 +2134,7 @@ static int ip_mkroute_input(struct sk_buff *skb, * Uses the provided hint instead of performing a route lookup. */ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev, + dscp_t dscp, struct net_device *dev, const struct sk_buff *hint) { struct in_device *in_dev = __in_dev_get_rcu(dev); @@ -2160,8 +2158,8 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (rt->rt_type != RTN_LOCAL) goto skip_validate_source; - tos &= INET_DSCP_MASK; - err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag); + err = fib_validate_source(skb, saddr, daddr, dscp, 0, dev, in_dev, + &tag); if (err < 0) goto martian_source; @@ -2201,7 +2199,7 @@ static struct net_device *ip_rt_get_dev(struct net *net, */ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev, + dscp_t dscp, struct net_device *dev, struct fib_result *res) { struct in_device *in_dev = __in_dev_get_rcu(dev); @@ -2266,7 +2264,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_oif = 0; fl4.flowi4_iif = dev->ifindex; fl4.flowi4_mark = skb->mark; - fl4.flowi4_tos = tos; + fl4.flowi4_tos = inet_dscp_to_dsfield(dscp); fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_flags = 0; fl4.daddr = daddr; @@ -2299,8 +2297,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, } if (res->type == RTN_LOCAL) { - err = fib_validate_source(skb, saddr, daddr, tos, - 0, dev, in_dev, &itag); + err = fib_validate_source(skb, saddr, daddr, dscp, 0, dev, + in_dev, &itag); if (err < 0) goto martian_source; goto local_input; @@ -2314,7 +2312,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, goto martian_destination; make_route: - err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys); + err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, dscp, flkeys); out: return err; brd_input: @@ -2322,8 +2320,8 @@ brd_input: goto e_inval; if (!ipv4_is_zeronet(saddr)) { - err = fib_validate_source(skb, saddr, 0, tos, 0, dev, - in_dev, &itag); + err = fib_validate_source(skb, saddr, 0, dscp, 0, dev, in_dev, + &itag); if (err < 0) goto martian_source; } @@ -2415,7 +2413,8 @@ martian_source: /* called with rcu_read_lock held */ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev, struct fib_result *res) + dscp_t dscp, struct net_device *dev, + struct fib_result *res) { /* Multicast recognition logic is moved from route cache to here. * The problem was that too many Ethernet cards have broken/missing @@ -2455,24 +2454,23 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, IN_DEV_MFORWARD(in_dev)) #endif ) { - err = ip_route_input_mc(skb, daddr, saddr, - tos, dev, our); + err = ip_route_input_mc(skb, daddr, saddr, dscp, dev, + our); } return err; } - return ip_route_input_slow(skb, daddr, saddr, tos, dev, res); + return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res); } int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev) + dscp_t dscp, struct net_device *dev) { struct fib_result res; int err; - tos &= INET_DSCP_MASK; rcu_read_lock(); - err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res); + err = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res); rcu_read_unlock(); return err; @@ -3286,8 +3284,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, skb->dev = dev; skb->mark = mark; err = ip_route_input_rcu(skb, dst, src, - rtm->rtm_tos & INET_DSCP_MASK, dev, - &res); + inet_dsfield_to_dscp(rtm->rtm_tos), + dev, &res); rt = skb_rtable(skb); if (err == 0 && rt->dst.error) @@ -3634,6 +3632,11 @@ static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; #endif /* CONFIG_IP_ROUTE_CLASSID */ +static const struct rtnl_msg_handler ip_rt_rtnl_msg_handlers[] __initconst = { + {.protocol = PF_INET, .msgtype = RTM_GETROUTE, + .doit = inet_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED}, +}; + int __init ip_rt_init(void) { void *idents_hash; @@ -3691,8 +3694,7 @@ int __init ip_rt_init(void) xfrm_init(); xfrm4_init(); #endif - rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, - RTNL_FLAG_DOIT_UNLOCKED); + rtnl_register_many(ip_rt_rtnl_msg_handlers); #ifdef CONFIG_SYSCTL register_pernet_subsys(&sysctl_route_ops); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4f77bd862e95..82cc4a5633ce 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -477,15 +477,16 @@ void tcp_init_sock(struct sock *sk) } EXPORT_SYMBOL(tcp_init_sock); -static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) +static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) { struct sk_buff *skb = tcp_write_queue_tail(sk); + u32 tsflags = sockc->tsflags; if (tsflags && skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); - sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); + sock_tx_timestamp(sk, sockc, &shinfo->tx_flags); if (tsflags & SOF_TIMESTAMPING_TX_ACK) tcb->txstamp_ack = 1; if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) @@ -1321,7 +1322,7 @@ wait_for_space: out: if (copied) { - tcp_tx_timestamp(sk, sockc.tsflags); + tcp_tx_timestamp(sk, &sockc); tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); } out_nopush: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5afe5e57c89b..9d3dd101ea71 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -907,7 +907,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, ctl_sk->sk_mark = 0; ctl_sk->sk_priority = 0; } - ip_send_unicast_reply(ctl_sk, + ip_send_unicast_reply(ctl_sk, sk, skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len, @@ -1021,7 +1021,7 @@ static void tcp_v4_send_ack(const struct sock *sk, ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority); transmit_time = tcp_transmit_time(sk); - ip_send_unicast_reply(ctl_sk, + ip_send_unicast_reply(ctl_sk, sk, skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len, @@ -2900,15 +2900,17 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); + u8 icsk_pending; int rx_queue; int state; - if (icsk->icsk_pending == ICSK_TIME_RETRANS || - icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || - icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + icsk_pending = smp_load_acquire(&icsk->icsk_pending); + if (icsk_pending == ICSK_TIME_RETRANS || + icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; - } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + } else if (icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sk->sk_timer)) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 68804fd01daf..054244ce5117 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2957,7 +2957,7 @@ void tcp_send_loss_probe(struct sock *sk) WARN_ONCE(tp->packets_out, "invalid inflight: %u state %u cwnd %u mss %d\n", tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss); - inet_csk(sk)->icsk_pending = 0; + smp_store_release(&inet_csk(sk)->icsk_pending, 0); return; } @@ -2990,7 +2990,7 @@ probe_sent: NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); /* Reset s.t. tcp_rearm_rto will restart timer from now */ - inet_csk(sk)->icsk_pending = 0; + smp_store_release(&inet_csk(sk)->icsk_pending, 0); rearm_timer: tcp_rearm_rto(sk); } @@ -3728,7 +3728,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, switch (synack_type) { case TCP_SYNACK_NORMAL: - skb_set_owner_w(skb, req_to_sk(req)); + skb_set_owner_edemux(skb, req_to_sk(req)); break; case TCP_SYNACK_COOKIE: /* Under synflood, we do not attach skb to a socket, @@ -4131,7 +4131,10 @@ int tcp_connect(struct sock *sk) if (unlikely(!buff)) return -ENOBUFS; - tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); + /* SYN eats a sequence byte, write_seq updated by + * tcp_connect_queue_skb(). + */ + tcp_init_nondata_skb(buff, tp->write_seq, TCPHDR_SYN); tcp_mstamp_refresh(tp); tp->retrans_stamp = tcp_time_stamp_ts(tp); tcp_connect_queue_skb(sk, buff); @@ -4221,7 +4224,8 @@ void tcp_send_delayed_ack(struct sock *sk) if (!time_before(timeout, icsk->icsk_ack.timeout)) timeout = icsk->icsk_ack.timeout; } - icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; + smp_store_release(&icsk->icsk_ack.pending, + icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); icsk->icsk_ack.timeout = timeout; sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 79064580c8c0..b412ed88ccd9 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -361,6 +361,14 @@ static void tcp_delack_timer(struct timer_list *t) from_timer(icsk, t, icsk_delack_timer); struct sock *sk = &icsk->icsk_inet.sk; + /* Avoid taking socket spinlock if there is no ACK to send. + * The compressed_ack check is racy, but a separate hrtimer + * will take care of it eventually. + */ + if (!(smp_load_acquire(&icsk->icsk_ack.pending) & ICSK_ACK_TIMER) && + !READ_ONCE(tcp_sk(sk)->compressed_ack)) + goto out; + bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { tcp_delack_timer_handler(sk); @@ -371,6 +379,7 @@ static void tcp_delack_timer(struct timer_list *t) sock_hold(sk); } bh_unlock_sock(sk); +out: sock_put(sk); } @@ -701,11 +710,11 @@ void tcp_write_timer_handler(struct sock *sk) tcp_send_loss_probe(sk); break; case ICSK_TIME_RETRANS: - icsk->icsk_pending = 0; + smp_store_release(&icsk->icsk_pending, 0); tcp_retransmit_timer(sk); break; case ICSK_TIME_PROBE0: - icsk->icsk_pending = 0; + smp_store_release(&icsk->icsk_pending, 0); tcp_probe_timer(sk); break; } @@ -717,6 +726,10 @@ static void tcp_write_timer(struct timer_list *t) from_timer(icsk, t, icsk_retransmit_timer); struct sock *sk = &icsk->icsk_inet.sk; + /* Avoid locking the socket when there is no pending event. */ + if (!smp_load_acquire(&icsk->icsk_pending)) + goto out; + bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { tcp_write_timer_handler(sk); @@ -726,6 +739,7 @@ static void tcp_write_timer(struct timer_list *t) sock_hold(sk); } bh_unlock_sock(sk); +out: sock_put(sk); } @@ -851,6 +865,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) * LINUX_MIB_TCPACKCOMPRESSED accurate. */ tp->compressed_ack--; + tcp_mstamp_refresh(tp); tcp_send_ack(sk); } } else { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2849b273b131..0e24916b39d4 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -100,6 +100,7 @@ #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> +#include <net/ip.h> #include <net/ip_tunnels.h> #include <net/route.h> #include <net/checksum.h> @@ -115,7 +116,6 @@ #include <net/addrconf.h> #include <net/udp_tunnel.h> #include <net/gro.h> -#include <net/inet_dscp.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6_stubs.h> #endif @@ -2621,7 +2621,7 @@ int udp_v4_early_demux(struct sk_buff *skb) if (!inet_sk(sk)->inet_daddr && in_dev) return ip_mc_validate_source(skb, iph->daddr, iph->saddr, - iph->tos & INET_DSCP_MASK, + ip4h_dscp(iph), skb->dev, in_dev, &itag); } return 0; diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index a620618cc568..b5b06323cfd9 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -33,7 +33,7 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk, const struct iphdr *iph = ip_hdr(skb); if (ip_route_input_noref(skb, iph->daddr, iph->saddr, - iph->tos, skb->dev)) + ip4h_dscp(iph), skb->dev)) goto drop; } diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c index b146ce88c5d0..4ee624d8e66f 100644 --- a/net/ipv4/xfrm4_protocol.c +++ b/net/ipv4/xfrm4_protocol.c @@ -76,7 +76,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, const struct iphdr *iph = ip_hdr(skb); if (ip_route_input_noref(skb, iph->daddr, iph->saddr, - iph->tos, skb->dev)) + ip4h_dscp(iph), skb->dev)) goto drop; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 94dceac52884..d0a99710d65d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1016,7 +1016,7 @@ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr) { - u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net); + u32 val = __ipv6_addr_jhash(addr, net_hash_mix(net)); return hash_32(val, IN6_ADDR_HSIZE_SHIFT); } @@ -7406,6 +7406,27 @@ static struct rtnl_af_ops inet6_ops __read_mostly = { .set_link_af = inet6_set_link_af, }; +static const struct rtnl_msg_handler addrconf_rtnl_msg_handlers[] __initconst_or_module = { + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETLINK, + .dumpit = inet6_dump_ifinfo, .flags = RTNL_FLAG_DUMP_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWADDR, + .doit = inet6_rtm_newaddr}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELADDR, + .doit = inet6_rtm_deladdr}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETADDR, + .doit = inet6_rtm_getaddr, .dumpit = inet6_dump_ifaddr, + .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETMULTICAST, + .dumpit = inet6_dump_ifmcaddr, + .flags = RTNL_FLAG_DUMP_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETANYCAST, + .dumpit = inet6_dump_ifacaddr, + .flags = RTNL_FLAG_DUMP_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETNETCONF, + .doit = inet6_netconf_get_devconf, .dumpit = inet6_netconf_dump_devconf, + .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, +}; + /* * Init / cleanup code */ @@ -7447,44 +7468,14 @@ int __init addrconf_init(void) addrconf_verify(&init_net); - rtnl_af_register(&inet6_ops); + err = rtnl_af_register(&inet6_ops); + if (err) + goto erraf; - err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK, - NULL, inet6_dump_ifinfo, RTNL_FLAG_DUMP_UNLOCKED); - if (err < 0) + err = rtnl_register_many(addrconf_rtnl_msg_handlers); + if (err) goto errout; - err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR, - inet6_rtm_newaddr, NULL, 0); - if (err < 0) - goto errout; - err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR, - inet6_rtm_deladdr, NULL, 0); - if (err < 0) - goto errout; - err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR, - inet6_rtm_getaddr, inet6_dump_ifaddr, - RTNL_FLAG_DOIT_UNLOCKED | - RTNL_FLAG_DUMP_UNLOCKED); - if (err < 0) - goto errout; - err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST, - NULL, inet6_dump_ifmcaddr, - RTNL_FLAG_DUMP_UNLOCKED); - if (err < 0) - goto errout; - err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST, - NULL, inet6_dump_ifacaddr, - RTNL_FLAG_DUMP_UNLOCKED); - if (err < 0) - goto errout; - err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF, - inet6_netconf_get_devconf, - inet6_netconf_dump_devconf, - RTNL_FLAG_DOIT_UNLOCKED | - RTNL_FLAG_DUMP_UNLOCKED); - if (err < 0) - goto errout; err = ipv6_addr_label_rtnl_register(); if (err < 0) goto errout; @@ -7493,6 +7484,7 @@ int __init addrconf_init(void) errout: rtnl_unregister_all(PF_INET6); rtnl_af_unregister(&inet6_ops); +erraf: unregister_netdevice_notifier(&ipv6_dev_notf); errlo: destroy_workqueue(addrconf_wq); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index acd70b5992a7..ab054f329e12 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -634,23 +634,17 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, return err; } +static const struct rtnl_msg_handler ipv6_adddr_label_rtnl_msg_handlers[] __initconst_or_module = { + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWADDRLABEL, + .doit = ip6addrlbl_newdel, .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELADDRLABEL, + .doit = ip6addrlbl_newdel, .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETADDRLABEL, + .doit = ip6addrlbl_get, .dumpit = ip6addrlbl_dump, + .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, +}; + int __init ipv6_addr_label_rtnl_register(void) { - int ret; - - ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDRLABEL, - ip6addrlbl_newdel, - NULL, RTNL_FLAG_DOIT_UNLOCKED); - if (ret < 0) - return ret; - ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDRLABEL, - ip6addrlbl_newdel, - NULL, RTNL_FLAG_DOIT_UNLOCKED); - if (ret < 0) - return ret; - ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDRLABEL, - ip6addrlbl_get, - ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED | - RTNL_FLAG_DUMP_UNLOCKED); - return ret; + return rtnl_register_many(ipv6_adddr_label_rtnl_msg_handlers); } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index ba69b86f1c7d..f60ec8b0f8ea 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -252,31 +252,29 @@ lookup_protocol: */ inet->inet_sport = htons(inet->inet_num); err = sk->sk_prot->hash(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (sk->sk_prot->init) { err = sk->sk_prot->init(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (!kern) { err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } out: return err; out_rcu_unlock: rcu_read_unlock(); goto out; +out_sk_release: + sk_common_release(sk); + sock->sk = NULL; + goto out; } static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 0627c4c18d1a..562cace50ca9 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -49,9 +49,10 @@ static DEFINE_SPINLOCK(acaddr_hash_lock); static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr); -static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr) +static u32 inet6_acaddr_hash(const struct net *net, + const struct in6_addr *addr) { - u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net); + u32 val = __ipv6_addr_jhash(addr, net_hash_mix(net)); return hash_32(val, IN6_ADDR_HSIZE_SHIFT); } diff --git a/net/ipv6/fib6_notifier.c b/net/ipv6/fib6_notifier.c index f87ae33e1d01..949b72610df7 100644 --- a/net/ipv6/fib6_notifier.c +++ b/net/ipv6/fib6_notifier.c @@ -22,7 +22,7 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, return call_fib_notifiers(net, event_type, info); } -static unsigned int fib6_seq_read(struct net *net) +static unsigned int fib6_seq_read(const struct net *net) { return fib6_tables_seq_read(net) + fib6_rules_seq_read(net); } diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 04a9ed5e8310..c85c1627cb16 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -56,7 +56,7 @@ int fib6_rules_dump(struct net *net, struct notifier_block *nb, return fib_rules_dump(net, nb, AF_INET6, extack); } -unsigned int fib6_rules_seq_read(struct net *net) +unsigned int fib6_rules_seq_read(const struct net *net) { return fib_rules_seq_read(net, AF_INET6); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index eb111d20615c..6383263bfd04 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -198,16 +198,9 @@ static void node_free_immediate(struct net *net, struct fib6_node *fn) net->ipv6.rt6_stats->fib_nodes--; } -static void node_free_rcu(struct rcu_head *head) -{ - struct fib6_node *fn = container_of(head, struct fib6_node, rcu); - - kmem_cache_free(fib6_node_kmem, fn); -} - static void node_free(struct net *net, struct fib6_node *fn) { - call_rcu(&fn->rcu, node_free_rcu); + kfree_rcu(fn, rcu); net->ipv6.rt6_stats->fib_nodes--; } @@ -345,17 +338,17 @@ static void __net_init fib6_tables_init(struct net *net) #endif -unsigned int fib6_tables_seq_read(struct net *net) +unsigned int fib6_tables_seq_read(const struct net *net) { unsigned int h, fib_seq = 0; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { - struct hlist_head *head = &net->ipv6.fib_table_hash[h]; - struct fib6_table *tb; + const struct hlist_head *head = &net->ipv6.fib_table_hash[h]; + const struct fib6_table *tb; hlist_for_each_entry_rcu(tb, head, tb6_hlist) - fib_seq += tb->fib_seq; + fib_seq += READ_ONCE(tb->fib_seq); } rcu_read_unlock(); @@ -400,7 +393,7 @@ int call_fib6_entry_notifiers(struct net *net, .rt = rt, }; - rt->fib6_table->fib_seq++; + WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1); return call_fib6_notifiers(net, event_type, &info.info); } @@ -416,7 +409,7 @@ int call_fib6_multipath_entry_notifiers(struct net *net, .nsiblings = nsiblings, }; - rt->fib6_table->fib_seq++; + WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1); return call_fib6_notifiers(net, event_type, &info.info); } @@ -427,7 +420,7 @@ int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt) .nsiblings = rt->fib6_nsiblings, }; - rt->fib6_table->fib_seq++; + WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1); return call_fib6_notifiers(net, FIB_EVENT_ENTRY_REPLACE, &info.info); } @@ -2500,6 +2493,12 @@ static struct pernet_operations fib6_net_ops = { .exit = fib6_net_exit, }; +static const struct rtnl_msg_handler fib6_rtnl_msg_handlers[] __initconst_or_module = { + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE, + .dumpit = inet6_dump_fib, + .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, +}; + int __init fib6_init(void) { int ret = -ENOMEM; @@ -2513,9 +2512,7 @@ int __init fib6_init(void) if (ret) goto out_kmem_cache_create; - ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL, - inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED | - RTNL_FLAG_DUMP_SPLIT_NLM_DONE); + ret = rtnl_register_many(fib6_rtnl_msg_handlers); if (ret) goto out_unregister_subsys; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f26841f1490f..f7b4608bb316 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -127,7 +127,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * nexthop = rt6_nexthop(dst_rt6_info(dst), daddr); neigh = __ipv6_neigh_lookup_noref(dev, nexthop); - if (unlikely(IS_ERR_OR_NULL(neigh))) { + if (IS_ERR_OR_NULL(neigh)) { if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dev, false); if (IS_ERR(neigh)) { @@ -1401,8 +1401,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, cork->base.gso_size = ipc6->gso_size; cork->base.tx_flags = 0; cork->base.mark = ipc6->sockc.mark; - sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags); - + sock_tx_timestamp(sk, &ipc6->sockc, &cork->base.tx_flags); + if (ipc6->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) { + cork->base.flags |= IPCORK_TS_OPT_ID; + cork->base.ts_opt_id = ipc6->sockc.ts_opt_id; + } cork->base.length = 0; cork->base.transmit_time = ipc6->sockc.transmit_time; @@ -1433,7 +1436,7 @@ static int __ip6_append_data(struct sock *sk, bool zc = false; u32 tskey = 0; struct rt6_info *rt = dst_rt6_info(cork->dst); - bool paged, hold_tskey, extra_uref = false; + bool paged, hold_tskey = false, extra_uref = false; struct ipv6_txoptions *opt = v6_cork->opt; int csummode = CHECKSUM_NONE; unsigned int maxnonfragsize, headersize; @@ -1543,10 +1546,15 @@ emsgsize: flags &= ~MSG_SPLICE_PAGES; } - hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP && - READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID; - if (hold_tskey) - tskey = atomic_inc_return(&sk->sk_tskey) - 1; + if (cork->tx_flags & SKBTX_ANY_TSTAMP && + READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { + if (cork->flags & IPCORK_TS_OPT_ID) { + tskey = cork->ts_opt_id; + } else { + tskey = atomic_inc_return(&sk->sk_tskey) - 1; + hold_tskey = true; + } + } /* * Let's try using as much space as possible. diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b60e13c42bca..48fd53b98972 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -630,8 +630,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } skb_dst_set(skb2, &rt->dst); } else { - if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, - skb2->dev) || + if (ip_route_input(skb2, eiph->daddr, eiph->saddr, + ip4h_dscp(eiph), skb2->dev) || skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6) goto out; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 2ce4ae0d8dc3..8add0f45aa52 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -276,7 +276,7 @@ static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb, return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack); } -static unsigned int ip6mr_rules_seq_read(struct net *net) +static unsigned int ip6mr_rules_seq_read(const struct net *net) { return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR); } @@ -335,7 +335,7 @@ static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb, return 0; } -static unsigned int ip6mr_rules_seq_read(struct net *net) +static unsigned int ip6mr_rules_seq_read(const struct net *net) { return 0; } @@ -1260,11 +1260,9 @@ static int ip6mr_device_event(struct notifier_block *this, return NOTIFY_DONE; } -static unsigned int ip6mr_seq_read(struct net *net) +static unsigned int ip6mr_seq_read(const struct net *net) { - ASSERT_RTNL(); - - return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net); + return READ_ONCE(net->ipv6.ipmr_seq) + ip6mr_rules_seq_read(net); } static int ip6mr_dump(struct net *net, struct notifier_block *nb, @@ -1369,6 +1367,12 @@ static struct pernet_operations ip6mr_net_ops = { .exit_batch = ip6mr_net_exit_batch, }; +static const struct rtnl_msg_handler ip6mr_rtnl_msg_handlers[] __initconst_or_module = { + {.owner = THIS_MODULE, .protocol = RTNL_FAMILY_IP6MR, + .msgtype = RTM_GETROUTE, + .doit = ip6mr_rtm_getroute, .dumpit = ip6mr_rtm_dumproute}, +}; + int __init ip6_mr_init(void) { int err; @@ -1391,9 +1395,8 @@ int __init ip6_mr_init(void) goto add_proto_fail; } #endif - err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE, - ip6mr_rtm_getroute, ip6mr_rtm_dumproute, 0); - if (err == 0) + err = rtnl_register_many(ip6mr_rtnl_msg_handlers); + if (!err) return 0; #ifdef CONFIG_IPV6_PIMSM_V2 @@ -1408,9 +1411,9 @@ reg_pernet_fail: return err; } -void ip6_mr_cleanup(void) +void __init ip6_mr_cleanup(void) { - rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); + rtnl_unregister_many(ip6mr_rtnl_msg_handlers); #ifdef CONFIG_IPV6_PIMSM_V2 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); #endif diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 608fa9d05b55..8476a3944a88 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -629,7 +629,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->ip_summed = CHECKSUM_NONE; - skb_setup_tx_timestamp(skb, sockc->tsflags); + skb_setup_tx_timestamp(skb, sockc); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b4251915585f..d7ce5cf2017a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -6680,6 +6680,15 @@ static void bpf_iter_unregister(void) #endif #endif +static const struct rtnl_msg_handler ip6_route_rtnl_msg_handlers[] __initconst_or_module = { + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWROUTE, + .doit = inet6_rtm_newroute}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELROUTE, + .doit = inet6_rtm_delroute}, + {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE, + .doit = inet6_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED}, +}; + int __init ip6_route_init(void) { int ret; @@ -6722,19 +6731,7 @@ int __init ip6_route_init(void) if (ret) goto fib6_rules_init; - ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE, - inet6_rtm_newroute, NULL, 0); - if (ret < 0) - goto out_register_late_subsys; - - ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE, - inet6_rtm_delroute, NULL, 0); - if (ret < 0) - goto out_register_late_subsys; - - ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, - inet6_rtm_getroute, NULL, - RTNL_FLAG_DOIT_UNLOCKED); + ret = rtnl_register_many(ip6_route_rtnl_msg_handlers); if (ret < 0) goto out_register_late_subsys; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d71ab4e1efe1..597920061a3a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -967,6 +967,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 } if (sk) { + /* unconstify the socket only to attach it to buff with care. */ + skb_set_owner_edemux(buff, (struct sock *)sk); + if (sk->sk_state == TCP_TIME_WAIT) mark = inet_twsk(sk)->tw_mark; else @@ -2177,6 +2180,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) const struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; + u8 icsk_pending; int rx_queue; int state; @@ -2185,12 +2189,13 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); - if (icsk->icsk_pending == ICSK_TIME_RETRANS || - icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || - icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + icsk_pending = smp_load_acquire(&icsk->icsk_pending); + if (icsk_pending == ICSK_TIME_RETRANS || + icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; - } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + } else if (icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sp->sk_timer)) { diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index d4118c796290..24aec295a51c 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1584,14 +1584,6 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return err; } -static void free_mux(struct rcu_head *rcu) -{ - struct kcm_mux *mux = container_of(rcu, - struct kcm_mux, rcu); - - kmem_cache_free(kcm_muxp, mux); -} - static void release_mux(struct kcm_mux *mux) { struct kcm_net *knet = mux->knet; @@ -1619,7 +1611,7 @@ static void release_mux(struct kcm_mux *mux) knet->count--; mutex_unlock(&knet->mutex); - call_rcu(&mux->rcu, free_mux); + kfree_rcu(mux, rcu); } static void kcm_done(struct kcm_sock *kcm) diff --git a/net/mctp/device.c b/net/mctp/device.c index 85cc5f31f1e7..3d75b919995d 100644 --- a/net/mctp/device.c +++ b/net/mctp/device.c @@ -535,14 +535,20 @@ int __init mctp_device_init(void) int err; register_netdevice_notifier(&mctp_dev_nb); - rtnl_af_register(&mctp_af_ops); + + err = rtnl_af_register(&mctp_af_ops); + if (err) + goto err_notifier; err = rtnl_register_many(mctp_device_rtnl_msg_handlers); - if (err) { - rtnl_af_unregister(&mctp_af_ops); - unregister_netdevice_notifier(&mctp_dev_nb); - } + if (err) + goto err_af; + return 0; +err_af: + rtnl_af_unregister(&mctp_af_ops); +err_notifier: + unregister_netdevice_notifier(&mctp_dev_nb); return err; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index df62638b6498..1f63b32d76d6 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1664,7 +1664,7 @@ static int nla_put_via(struct sk_buff *skb, u8 table, const void *addr, int alen) { static const int table_to_family[NEIGH_NR_TABLES + 1] = { - AF_INET, AF_INET6, AF_DECnet, AF_PACKET, + AF_INET, AF_INET6, AF_PACKET, }; struct nlattr *nla; struct rtvia *via; @@ -2753,7 +2753,9 @@ static int __init mpls_init(void) dev_add_pack(&mpls_packet_type); - rtnl_af_register(&mpls_af_ops); + err = rtnl_af_register(&mpls_af_ops); + if (err) + goto out_unregister_dev_type; err = rtnl_register_many(mpls_rtnl_msg_handlers); if (err) @@ -2773,6 +2775,7 @@ out_unregister_rtnl: rtnl_unregister_many(mpls_rtnl_msg_handlers); out_unregister_rtnl_af: rtnl_af_unregister(&mpls_af_ops); +out_unregister_dev_type: dev_remove_pack(&mpls_packet_type); out_unregister_pernet: unregister_pernet_subsys(&mpls_net_ops); diff --git a/net/mptcp/mptcp_pm_gen.c b/net/mptcp/mptcp_pm_gen.c index c30a2a90a192..5a6b2b4510d3 100644 --- a/net/mptcp/mptcp_pm_gen.c +++ b/net/mptcp/mptcp_pm_gen.c @@ -14,7 +14,7 @@ const struct nla_policy mptcp_pm_address_nl_policy[MPTCP_PM_ADDR_ATTR_IF_IDX + 1] = { [MPTCP_PM_ADDR_ATTR_FAMILY] = { .type = NLA_U16, }, [MPTCP_PM_ADDR_ATTR_ID] = { .type = NLA_U8, }, - [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_U32, }, + [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_BE32, }, [MPTCP_PM_ADDR_ATTR_ADDR6] = NLA_POLICY_EXACT_LEN(16), [MPTCP_PM_ADDR_ATTR_PORT] = { .type = NLA_U16, }, [MPTCP_PM_ADDR_ATTR_FLAGS] = { .type = NLA_U32, }, diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 6d0e201c3eb2..1f5c63eb21f0 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3522,7 +3522,8 @@ static void schedule_3rdack_retransmission(struct sock *ssk) timeout += jiffies; WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); - icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; + smp_store_release(&icsk->icsk_ack.pending, + icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); icsk->icsk_ack.timeout = timeout; sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 6a1239433830..36168f8b6efa 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -3870,7 +3870,7 @@ static int __init ctnetlink_init(void) { int ret; - NL_ASSERT_DUMP_CTX_FITS(struct ctnetlink_list_dump_ctx); + NL_ASSERT_CTX_FITS(struct ctnetlink_list_dump_ctx); ret = nfnetlink_subsys_register(&ctnl_subsys); if (ret < 0) { diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 07ad65774fe2..104732d34543 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -997,7 +997,7 @@ static int genl_start(struct netlink_callback *cb) info->info.attrs = attrs; genl_info_net_set(&info->info, sock_net(cb->skb->sk)); info->info.extack = cb->extack; - memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr)); + memset(&info->info.ctx, 0, sizeof(info->info.ctx)); cb->data = info; if (ops->start) { @@ -1104,7 +1104,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family, info.attrs = attrbuf; info.extack = extack; genl_info_net_set(&info, net); - memset(&info.user_ptr, 0, sizeof(info.user_ptr)); + memset(&info.ctx, 0, sizeof(info.ctx)); if (ops->pre_doit) { err = ops->pre_doit(ops, skb, &info); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a705ec214254..886c0dd47b66 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1846,21 +1846,22 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) err = -EINVAL; spin_lock(&po->bind_lock); - if (packet_sock_flag(po, PACKET_SOCK_RUNNING) && + if (po->num && match->type == type && match->prot_hook.type == po->prot_hook.type && match->prot_hook.dev == po->prot_hook.dev) { err = -ENOSPC; if (refcount_read(&match->sk_ref) < match->max_num_members) { - __dev_remove_pack(&po->prot_hook); - /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ WRITE_ONCE(po->fanout, match); po->rollover = rollover; rollover = NULL; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); - __fanout_link(sk, po); + if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { + __dev_remove_pack(&po->prot_hook); + __fanout_link(sk, po); + } err = 0; } } @@ -2118,7 +2119,7 @@ retry: skb->priority = READ_ONCE(sk->sk_priority); skb->mark = READ_ONCE(sk->sk_mark); skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); - skb_setup_tx_timestamp(skb, sockc.tsflags); + skb_setup_tx_timestamp(skb, &sockc); if (unlikely(extra_len == 4)) skb->no_fcs = 1; @@ -2650,7 +2651,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->priority = READ_ONCE(po->sk.sk_priority); skb->mark = READ_ONCE(po->sk.sk_mark); skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid); - skb_setup_tx_timestamp(skb, sockc->tsflags); + skb_setup_tx_timestamp(skb, sockc); skb_zcopy_set_nouarg(skb, ph.raw); skb_reserve(skb, hlen); @@ -3115,7 +3116,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } - skb_setup_tx_timestamp(skb, sockc.tsflags); + skb_setup_tx_timestamp(skb, &sockc); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { @@ -3421,17 +3422,17 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; + po = pkt_sk(sk); + err = packet_alloc_pending(po); + if (err) + goto out_sk_free; + sock_init_data(sock, sk); - po = pkt_sk(sk); init_completion(&po->skb_completion); sk->sk_family = PF_PACKET; po->num = proto; - err = packet_alloc_pending(po); - if (err) - goto out2; - packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; @@ -3463,7 +3464,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, sock_prot_inuse_add(net, &packet_proto, 1); return 0; -out2: +out_sk_free: sk_free(sk); out: return err; diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index cde671d29d5d..19234d664c4f 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -22,7 +22,7 @@ #include <net/phonet/pn_dev.h> struct phonet_routes { - struct mutex lock; + spinlock_t lock; struct net_device __rcu *table[64]; }; @@ -54,7 +54,7 @@ static struct phonet_device *__phonet_device_alloc(struct net_device *dev) pnd->netdev = dev; bitmap_zero(pnd->addrs, 64); - BUG_ON(!mutex_is_locked(&pndevs->lock)); + lockdep_assert_held(&pndevs->lock); list_add_rcu(&pnd->list, &pndevs->list); return pnd; } @@ -64,7 +64,8 @@ static struct phonet_device *__phonet_get(struct net_device *dev) struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd; - BUG_ON(!mutex_is_locked(&pndevs->lock)); + lockdep_assert_held(&pndevs->lock); + list_for_each_entry(pnd, &pndevs->list, list) { if (pnd->netdev == dev) return pnd; @@ -91,17 +92,22 @@ static void phonet_device_destroy(struct net_device *dev) ASSERT_RTNL(); - mutex_lock(&pndevs->lock); + spin_lock(&pndevs->lock); + pnd = __phonet_get(dev); if (pnd) list_del_rcu(&pnd->list); - mutex_unlock(&pndevs->lock); + + spin_unlock(&pndevs->lock); if (pnd) { + struct net *net = dev_net(dev); + u32 ifindex = dev->ifindex; u8 addr; for_each_set_bit(addr, pnd->addrs, 64) - phonet_address_notify(RTM_DELADDR, dev, addr); + phonet_address_notify(net, RTM_DELADDR, ifindex, addr); + kfree(pnd); } } @@ -133,7 +139,8 @@ int phonet_address_add(struct net_device *dev, u8 addr) struct phonet_device *pnd; int err = 0; - mutex_lock(&pndevs->lock); + spin_lock(&pndevs->lock); + /* Find or create Phonet-specific device data */ pnd = __phonet_get(dev); if (pnd == NULL) @@ -142,7 +149,9 @@ int phonet_address_add(struct net_device *dev, u8 addr) err = -ENOMEM; else if (test_and_set_bit(addr >> 2, pnd->addrs)) err = -EEXIST; - mutex_unlock(&pndevs->lock); + + spin_unlock(&pndevs->lock); + return err; } @@ -152,7 +161,8 @@ int phonet_address_del(struct net_device *dev, u8 addr) struct phonet_device *pnd; int err = 0; - mutex_lock(&pndevs->lock); + spin_lock(&pndevs->lock); + pnd = __phonet_get(dev); if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) { err = -EADDRNOTAVAIL; @@ -161,7 +171,8 @@ int phonet_address_del(struct net_device *dev, u8 addr) list_del_rcu(&pnd->list); else pnd = NULL; - mutex_unlock(&pndevs->lock); + + spin_unlock(&pndevs->lock); if (pnd) kfree_rcu(pnd, rcu); @@ -244,32 +255,39 @@ static int phonet_device_autoconf(struct net_device *dev) ret = phonet_address_add(dev, req.ifr_phonet_autoconf.device); if (ret) return ret; - phonet_address_notify(RTM_NEWADDR, dev, - req.ifr_phonet_autoconf.device); + + phonet_address_notify(dev_net(dev), RTM_NEWADDR, dev->ifindex, + req.ifr_phonet_autoconf.device); return 0; } static void phonet_route_autodel(struct net_device *dev) { - struct phonet_net *pnn = phonet_pernet(dev_net(dev)); - unsigned int i; + struct net *net = dev_net(dev); DECLARE_BITMAP(deleted, 64); + u32 ifindex = dev->ifindex; + struct phonet_net *pnn; + unsigned int i; + + pnn = phonet_pernet(net); /* Remove left-over Phonet routes */ bitmap_zero(deleted, 64); - mutex_lock(&pnn->routes.lock); - for (i = 0; i < 64; i++) + + spin_lock(&pnn->routes.lock); + for (i = 0; i < 64; i++) { if (rcu_access_pointer(pnn->routes.table[i]) == dev) { RCU_INIT_POINTER(pnn->routes.table[i], NULL); set_bit(i, deleted); } - mutex_unlock(&pnn->routes.lock); + } + spin_unlock(&pnn->routes.lock); if (bitmap_empty(deleted, 64)) return; /* short-circuit RCU */ synchronize_rcu(); for_each_set_bit(i, deleted, 64) { - rtm_phonet_notify(RTM_DELROUTE, dev, i); + rtm_phonet_notify(net, RTM_DELROUTE, ifindex, i); dev_put(dev); } } @@ -309,8 +327,8 @@ static int __net_init phonet_init_net(struct net *net) return -ENOMEM; INIT_LIST_HEAD(&pnn->pndevs.list); - mutex_init(&pnn->pndevs.lock); - mutex_init(&pnn->routes.lock); + spin_lock_init(&pnn->pndevs.lock); + spin_lock_init(&pnn->routes.lock); return 0; } @@ -360,13 +378,15 @@ int phonet_route_add(struct net_device *dev, u8 daddr) int err = -EEXIST; daddr = daddr >> 2; - mutex_lock(&routes->lock); + + spin_lock(&routes->lock); if (routes->table[daddr] == NULL) { rcu_assign_pointer(routes->table[daddr], dev); dev_hold(dev); err = 0; } - mutex_unlock(&routes->lock); + spin_unlock(&routes->lock); + return err; } @@ -376,12 +396,13 @@ int phonet_route_del(struct net_device *dev, u8 daddr) struct phonet_routes *routes = &pnn->routes; daddr = daddr >> 2; - mutex_lock(&routes->lock); + + spin_lock(&routes->lock); if (rcu_access_pointer(routes->table[daddr]) == dev) RCU_INIT_POINTER(routes->table[daddr], NULL); else dev = NULL; - mutex_unlock(&routes->lock); + spin_unlock(&routes->lock); if (!dev) return -ENOENT; diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 894e5c72d6bf..ca1f04e4a2d9 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -19,10 +19,10 @@ /* Device address handling */ -static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, +static int fill_addr(struct sk_buff *skb, u32 ifindex, u8 addr, u32 portid, u32 seq, int event); -void phonet_address_notify(int event, struct net_device *dev, u8 addr) +void phonet_address_notify(struct net *net, int event, u32 ifindex, u8 addr) { struct sk_buff *skb; int err = -ENOBUFS; @@ -31,17 +31,18 @@ void phonet_address_notify(int event, struct net_device *dev, u8 addr) nla_total_size(1), GFP_KERNEL); if (skb == NULL) goto errout; - err = fill_addr(skb, dev, addr, 0, 0, event); + + err = fill_addr(skb, ifindex, addr, 0, 0, event); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } - rtnl_notify(skb, dev_net(dev), 0, - RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); + + rtnl_notify(skb, net, 0, RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); return; errout: - rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err); + rtnl_set_sk_err(net, RTNLGRP_PHONET_IFADDR, err); } static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { @@ -64,8 +65,6 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; - ASSERT_RTNL(); - err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy, extack); if (err < 0) @@ -79,21 +78,29 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, /* Phonet addresses only have 6 high-order bits */ return -EINVAL; - dev = __dev_get_by_index(net, ifm->ifa_index); - if (dev == NULL) + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifm->ifa_index); + if (!dev) { + rcu_read_unlock(); return -ENODEV; + } if (nlh->nlmsg_type == RTM_NEWADDR) err = phonet_address_add(dev, pnaddr); else err = phonet_address_del(dev, pnaddr); + + rcu_read_unlock(); + if (!err) - phonet_address_notify(nlh->nlmsg_type, dev, pnaddr); + phonet_address_notify(net, nlh->nlmsg_type, ifm->ifa_index, pnaddr); + return err; } -static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, - u32 portid, u32 seq, int event) +static int fill_addr(struct sk_buff *skb, u32 ifindex, u8 addr, + u32 portid, u32 seq, int event) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; @@ -107,7 +114,7 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, ifm->ifa_prefixlen = 0; ifm->ifa_flags = IFA_F_PERMANENT; ifm->ifa_scope = RT_SCOPE_LINK; - ifm->ifa_index = dev->ifindex; + ifm->ifa_index = ifindex; if (nla_put_u8(skb, IFA_LOCAL, addr)) goto nla_put_failure; nlmsg_end(skb, nlh); @@ -120,14 +127,17 @@ nla_put_failure: static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { + int addr_idx = 0, addr_start_idx = cb->args[1]; + int dev_idx = 0, dev_start_idx = cb->args[0]; struct phonet_device_list *pndevs; struct phonet_device *pnd; - int dev_idx = 0, dev_start_idx = cb->args[0]; - int addr_idx = 0, addr_start_idx = cb->args[1]; + int err = 0; pndevs = phonet_device_list(sock_net(skb->sk)); + rcu_read_lock(); list_for_each_entry_rcu(pnd, &pndevs->list, list) { + DECLARE_BITMAP(addrs, 64); u8 addr; if (dev_idx > dev_start_idx) @@ -136,29 +146,32 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) continue; addr_idx = 0; - for_each_set_bit(addr, pnd->addrs, 64) { + memcpy(addrs, pnd->addrs, sizeof(pnd->addrs)); + + for_each_set_bit(addr, addrs, 64) { if (addr_idx++ < addr_start_idx) continue; - if (fill_addr(skb, pnd->netdev, addr << 2, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0) + err = fill_addr(skb, READ_ONCE(pnd->netdev->ifindex), + addr << 2, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWADDR); + if (err < 0) goto out; } } - out: rcu_read_unlock(); + cb->args[0] = dev_idx; cb->args[1] = addr_idx; - return skb->len; + return err; } /* Routes handling */ -static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, - u32 portid, u32 seq, int event) +static int fill_route(struct sk_buff *skb, u32 ifindex, u8 dst, + u32 portid, u32 seq, int event) { struct rtmsg *rtm; struct nlmsghdr *nlh; @@ -177,8 +190,7 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; - if (nla_put_u8(skb, RTA_DST, dst) || - nla_put_u32(skb, RTA_OIF, READ_ONCE(dev->ifindex))) + if (nla_put_u8(skb, RTA_DST, dst) || nla_put_u32(skb, RTA_OIF, ifindex)) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; @@ -188,7 +200,7 @@ nla_put_failure: return -EMSGSIZE; } -void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) +void rtm_phonet_notify(struct net *net, int event, u32 ifindex, u8 dst) { struct sk_buff *skb; int err = -ENOBUFS; @@ -197,17 +209,18 @@ void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) nla_total_size(1) + nla_total_size(4), GFP_KERNEL); if (skb == NULL) goto errout; - err = fill_route(skb, dev, dst, 0, 0, event); + + err = fill_route(skb, ifindex, dst, 0, 0, event); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } - rtnl_notify(skb, dev_net(dev), 0, - RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL); + + rtnl_notify(skb, net, 0, RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL); return; errout: - rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err); + rtnl_set_sk_err(net, RTNLGRP_PHONET_ROUTE, err); } static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = { @@ -222,6 +235,7 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *tb[RTA_MAX+1]; struct net_device *dev; struct rtmsg *rtm; + u32 ifindex; int err; u8 dst; @@ -231,8 +245,6 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; - ASSERT_RTNL(); - err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy, extack); if (err < 0) @@ -247,16 +259,26 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, if (dst & 3) /* Phonet addresses only have 6 high-order bits */ return -EINVAL; - dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF])); - if (dev == NULL) + ifindex = nla_get_u32(tb[RTA_OIF]); + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (!dev) { + rcu_read_unlock(); return -ENODEV; + } if (nlh->nlmsg_type == RTM_NEWROUTE) err = phonet_route_add(dev, dst); else err = phonet_route_del(dev, dst); + + rcu_read_unlock(); + if (!err) - rtm_phonet_notify(nlh->nlmsg_type, dev, dst); + rtm_phonet_notify(net, nlh->nlmsg_type, ifindex, dst); + return err; } @@ -273,7 +295,7 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) if (!dev) continue; - err = fill_route(skb, dev, addr << 2, + err = fill_route(skb, READ_ONCE(dev->ifindex), addr << 2, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWROUTE); if (err < 0) @@ -286,13 +308,18 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } static const struct rtnl_msg_handler phonet_rtnl_msg_handlers[] __initdata_or_module = { - {THIS_MODULE, PF_PHONET, RTM_NEWADDR, addr_doit, NULL, 0}, - {THIS_MODULE, PF_PHONET, RTM_DELADDR, addr_doit, NULL, 0}, - {THIS_MODULE, PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, 0}, - {THIS_MODULE, PF_PHONET, RTM_NEWROUTE, route_doit, NULL, 0}, - {THIS_MODULE, PF_PHONET, RTM_DELROUTE, route_doit, NULL, 0}, - {THIS_MODULE, PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, - RTNL_FLAG_DUMP_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_NEWADDR, + .doit = addr_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_DELADDR, + .doit = addr_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_GETADDR, + .dumpit = getaddr_dumpit, .flags = RTNL_FLAG_DUMP_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_NEWROUTE, + .doit = route_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_DELROUTE, + .doit = route_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_GETROUTE, + .dumpit = route_dumpit, .flags = RTNL_FLAG_DUMP_UNLOCKED}, }; int __init phonet_netlink_register(void) diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 8f070ee7e742..d1cfceeff133 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -40,10 +40,6 @@ #include "rds.h" struct workqueue_struct *rds_ib_mr_wq; -struct rds_ib_dereg_odp_mr { - struct work_struct work; - struct ib_mr *mr; -}; static void rds_ib_odp_mr_worker(struct work_struct *work); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index eecad65fec92..839790043256 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -504,6 +504,50 @@ nla_put_failure: return -1; } +static int +tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) +{ + unsigned char *b = skb_tail_pointer(skb); + struct nlattr *nest; + int err = -EINVAL; + u32 flags; + + if (tcf_action_dump_terse(skb, a, false)) + goto nla_put_failure; + + if (a->hw_stats != TCA_ACT_HW_STATS_ANY && + nla_put_bitfield32(skb, TCA_ACT_HW_STATS, + a->hw_stats, TCA_ACT_HW_STATS_ANY)) + goto nla_put_failure; + + if (a->used_hw_stats_valid && + nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS, + a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) + goto nla_put_failure; + + flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; + if (flags && + nla_put_bitfield32(skb, TCA_ACT_FLAGS, + flags, flags)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) + goto nla_put_failure; + + nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS); + if (nest == NULL) + goto nla_put_failure; + err = tcf_action_dump_old(skb, a, bind, ref); + if (err > 0) { + nla_nest_end(skb, nest); + return err; + } + +nla_put_failure: + nlmsg_trim(skb, b); + return -1; +} + static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, struct netlink_callback *cb) { @@ -1190,51 +1234,6 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) return a->ops->dump(skb, a, bind, ref); } -int -tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) -{ - int err = -EINVAL; - unsigned char *b = skb_tail_pointer(skb); - struct nlattr *nest; - u32 flags; - - if (tcf_action_dump_terse(skb, a, false)) - goto nla_put_failure; - - if (a->hw_stats != TCA_ACT_HW_STATS_ANY && - nla_put_bitfield32(skb, TCA_ACT_HW_STATS, - a->hw_stats, TCA_ACT_HW_STATS_ANY)) - goto nla_put_failure; - - if (a->used_hw_stats_valid && - nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS, - a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) - goto nla_put_failure; - - flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; - if (flags && - nla_put_bitfield32(skb, TCA_ACT_FLAGS, - flags, flags)) - goto nla_put_failure; - - if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) - goto nla_put_failure; - - nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS); - if (nest == NULL) - goto nla_put_failure; - err = tcf_action_dump_old(skb, a, bind, ref); - if (err > 0) { - nla_nest_end(skb, nest); - return err; - } - -nla_put_failure: - nlmsg_trim(skb, b); - return -1; -} -EXPORT_SYMBOL(tcf_action_dump_1); - int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref, bool terse) { @@ -2264,13 +2263,16 @@ out_module_put: return skb->len; } +static const struct rtnl_msg_handler tc_action_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWACTION, .doit = tc_ctl_action}, + {.msgtype = RTM_DELACTION, .doit = tc_ctl_action}, + {.msgtype = RTM_GETACTION, .doit = tc_ctl_action, + .dumpit = tc_dump_action}, +}; + static int __init tc_action_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, - 0); - + rtnl_register_many(tc_action_rtnl_msg_handlers); return 0; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 17d97bbe890f..7637f979d689 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -4055,6 +4055,19 @@ static struct pernet_operations tcf_net_ops = { .size = sizeof(struct tcf_net), }; +static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter, + .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter, + .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter, + .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED}, + {.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain}, + {.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain}, + {.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain, + .dumpit = tc_dump_chain}, +}; + static int __init tc_filter_init(void) { int err; @@ -4068,17 +4081,7 @@ static int __init tc_filter_init(void) goto err_register_pernet_subsys; xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1); - - rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, - RTNL_FLAG_DOIT_UNLOCKED); - rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, - RTNL_FLAG_DOIT_UNLOCKED); - rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, - tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); - rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, - tc_dump_chain, 0); + rtnl_register_many(tc_filter_rtnl_msg_handlers); return 0; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2eefa4783879..da2da2ab858b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -2420,6 +2420,17 @@ static struct pernet_operations psched_net_ops = { DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper); #endif +static const struct rtnl_msg_handler psched_rtnl_msg_handlers[] __initconst = { + {.msgtype = RTM_NEWQDISC, .doit = tc_modify_qdisc}, + {.msgtype = RTM_DELQDISC, .doit = tc_get_qdisc}, + {.msgtype = RTM_GETQDISC, .doit = tc_get_qdisc, + .dumpit = tc_dump_qdisc}, + {.msgtype = RTM_NEWTCLASS, .doit = tc_ctl_tclass}, + {.msgtype = RTM_DELTCLASS, .doit = tc_ctl_tclass}, + {.msgtype = RTM_GETTCLASS, .doit = tc_ctl_tclass, + .dumpit = tc_dump_tclass}, +}; + static int __init pktsched_init(void) { int err; @@ -2438,14 +2449,7 @@ static int __init pktsched_init(void) register_qdisc(&mq_qdisc_ops); register_qdisc(&noqueue_qdisc_ops); - rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, - 0); - rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, - 0); + rtnl_register_many(psched_rtnl_msg_handlers); tc_wrapper_init(); diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 939425da1895..8c9a0400c862 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -310,7 +310,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) { struct ethtool_link_ksettings ecmd; int speed = SPEED_10; - int port_rate; + s64 port_rate; int err; err = __ethtool_get_link_ksettings(dev, &ecmd); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 19a49af5a9e5..a97638bef6da 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -111,6 +111,7 @@ struct fq_perband_flows { struct fq_sched_data { /* Read mostly cache line */ + u64 offload_horizon; u32 quantum; u32 initial_quantum; u32 flow_refill_delay; @@ -299,7 +300,7 @@ static void fq_gc(struct fq_sched_data *q, } /* Fast path can be used if : - * 1) Packet tstamp is in the past. + * 1) Packet tstamp is in the past, or within the pacing offload horizon. * 2) FQ qlen == 0 OR * (no flow is currently eligible for transmit, * AND fast path queue has less than 8 packets) @@ -314,7 +315,7 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb, const struct fq_sched_data *q = qdisc_priv(sch); const struct sock *sk; - if (fq_skb_cb(skb)->time_to_send > now) + if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon) return false; if (sch->q.qlen != 0) { @@ -361,8 +362,9 @@ static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb, * 3) We do not want to rate limit them (eg SYNFLOOD attack), * especially if the listener set SO_MAX_PACING_RATE * 4) We pretend they are orphaned + * TCP can also associate TIME_WAIT sockets with RST or ACK packets. */ - if (!sk || sk_listener(sk)) { + if (!sk || sk_listener_or_tw(sk)) { unsigned long hash = skb_get_hash(skb) & q->orphan_mask; /* By forcing low order bit to 1, we make sure to not @@ -595,15 +597,18 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) unsigned long sample; struct rb_node *p; - if (q->time_next_delayed_flow > now) + if (q->time_next_delayed_flow > now + q->offload_horizon) return; /* Update unthrottle latency EWMA. * This is cheap and can help diagnosing timer/latency problems. */ sample = (unsigned long)(now - q->time_next_delayed_flow); - q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; - q->unthrottle_latency_ns += sample >> 3; + if ((long)sample > 0) { + q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; + q->unthrottle_latency_ns += sample >> 3; + } + now += q->offload_horizon; q->time_next_delayed_flow = ~0ULL; while ((p = rb_first(&q->delayed)) != NULL) { @@ -687,7 +692,7 @@ begin: u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send, f->time_next_packet); - if (now < time_next_packet) { + if (now + q->offload_horizon < time_next_packet) { head->first = f->next; f->time_next_packet = time_next_packet; fq_flow_set_throttled(q, f); @@ -925,6 +930,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 }, [TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)), [TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)), + [TCA_FQ_OFFLOAD_HORIZON] = { .type = NLA_U32 }, }; /* compress a u8 array with all elems <= 3 to an array of 2-bit fields */ @@ -1100,6 +1106,17 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, WRITE_ONCE(q->horizon_drop, nla_get_u8(tb[TCA_FQ_HORIZON_DROP])); + if (tb[TCA_FQ_OFFLOAD_HORIZON]) { + u64 offload_horizon = (u64)NSEC_PER_USEC * + nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]); + + if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) { + WRITE_ONCE(q->offload_horizon, offload_horizon); + } else { + NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon"); + err = -EINVAL; + } + } if (!err) { sch_tree_unlock(sch); @@ -1183,6 +1200,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) .bands = FQ_BANDS, }; struct nlattr *opts; + u64 offload_horizon; u64 ce_threshold; s32 weights[3]; u64 horizon; @@ -1199,6 +1217,9 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) horizon = READ_ONCE(q->horizon); do_div(horizon, NSEC_PER_USEC); + offload_horizon = READ_ONCE(q->offload_horizon); + do_div(offload_horizon, NSEC_PER_USEC); + if (nla_put_u32(skb, TCA_FQ_PLIMIT, READ_ONCE(sch->limit)) || nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, @@ -1224,6 +1245,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) nla_put_u32(skb, TCA_FQ_TIMER_SLACK, READ_ONCE(q->timer_slack)) || nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) || + nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) || nla_put_u8(skb, TCA_FQ_HORIZON_DROP, READ_ONCE(q->horizon_drop))) goto nla_put_failure; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 3b9245a3c767..a4b8296a2fa1 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -77,12 +77,6 @@ #define SFQ_EMPTY_SLOT 0xffff #define SFQ_DEFAULT_HASH_DIVISOR 1024 -/* We use 16 bits to store allot, and want to handle packets up to 64K - * Scale allot by 8 (1<<3) so that no overflow occurs. - */ -#define SFQ_ALLOT_SHIFT 3 -#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT) - /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */ typedef u16 sfq_index; @@ -104,7 +98,7 @@ struct sfq_slot { sfq_index next; /* next slot in sfq RR chain */ struct sfq_head dep; /* anchor in dep[] chains */ unsigned short hash; /* hash value (index in ht[]) */ - short allot; /* credit for this slot */ + int allot; /* credit for this slot */ unsigned int backlog; struct red_vars vars; @@ -120,7 +114,6 @@ struct sfq_sched_data { siphash_key_t perturbation; u8 cur_depth; /* depth of longest slot */ u8 flags; - unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ struct tcf_proto __rcu *filter_list; struct tcf_block *block; sfq_index *ht; /* Hash table ('divisor' slots) */ @@ -456,7 +449,7 @@ enqueue: */ q->tail = slot; /* We could use a bigger initial quantum for new flows */ - slot->allot = q->scaled_quantum; + slot->allot = q->quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; @@ -493,7 +486,7 @@ next_slot: slot = &q->slots[a]; if (slot->allot <= 0) { q->tail = slot; - slot->allot += q->scaled_quantum; + slot->allot += q->quantum; goto next_slot; } skb = slot_dequeue_head(slot); @@ -512,7 +505,7 @@ next_slot: } q->tail->next = next_a; } else { - slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb)); + slot->allot -= qdisc_pkt_len(skb); } return skb; } @@ -595,7 +588,7 @@ drop: q->tail->next = x; } q->tail = slot; - slot->allot = q->scaled_quantum; + slot->allot = q->quantum; } } sch->q.qlen -= dropped; @@ -628,7 +621,8 @@ static void sfq_perturbation(struct timer_list *t) rcu_read_unlock(); } -static int sfq_change(struct Qdisc *sch, struct nlattr *opt) +static int sfq_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); @@ -646,14 +640,10 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; - /* slot->allot is a short, make sure quantum is not too big. */ - if (ctl->quantum) { - unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); - - if (scaled <= 0 || scaled > SHRT_MAX) - return -EINVAL; + if ((int)ctl->quantum < 0) { + NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); + return -EINVAL; } - if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) return -EINVAL; @@ -663,10 +653,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) return -ENOMEM; } sch_tree_lock(sch); - if (ctl->quantum) { + if (ctl->quantum) q->quantum = ctl->quantum; - q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); - } WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ); if (ctl->flows) q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); @@ -762,12 +750,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt, q->divisor = SFQ_DEFAULT_HASH_DIVISOR; q->maxflows = SFQ_DEFAULT_FLOWS; q->quantum = psched_mtu(qdisc_dev(sch)); - q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); q->perturb_period = 0; get_random_bytes(&q->perturbation, sizeof(q->perturbation)); if (opt) { - int err = sfq_change(sch, opt); + int err = sfq_change(sch, opt, extack); if (err) return err; } @@ -878,7 +865,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, if (idx != SFQ_EMPTY_SLOT) { const struct sfq_slot *slot = &q->slots[idx]; - xstats.allot = slot->allot << SFQ_ALLOT_SHIFT; + xstats.allot = slot->allot; qs.qlen = slot->qlen; qs.backlog = slot->backlog; } diff --git a/net/shaper/Makefile b/net/shaper/Makefile new file mode 100644 index 000000000000..54af7169a331 --- /dev/null +++ b/net/shaper/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the net shaper infrastructure. +# +# Copyright (c) 2024, Red Hat, Inc. +# + +obj-y += shaper.o shaper_nl_gen.o diff --git a/net/shaper/shaper.c b/net/shaper/shaper.c new file mode 100644 index 000000000000..15463062fe7b --- /dev/null +++ b/net/shaper/shaper.c @@ -0,0 +1,1438 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/netlink.h> +#include <linux/skbuff.h> +#include <linux/xarray.h> +#include <net/devlink.h> +#include <net/net_shaper.h> + +#include "shaper_nl_gen.h" + +#include "../core/dev.h" + +#define NET_SHAPER_SCOPE_SHIFT 26 +#define NET_SHAPER_ID_MASK GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0) +#define NET_SHAPER_SCOPE_MASK GENMASK(31, NET_SHAPER_SCOPE_SHIFT) + +#define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK + +struct net_shaper_hierarchy { + struct xarray shapers; +}; + +struct net_shaper_nl_ctx { + struct net_shaper_binding binding; + netdevice_tracker dev_tracker; + unsigned long start_index; +}; + +static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx) +{ + return &((struct net_shaper_nl_ctx *)ctx)->binding; +} + +static void net_shaper_lock(struct net_shaper_binding *binding) +{ + switch (binding->type) { + case NET_SHAPER_BINDING_TYPE_NETDEV: + mutex_lock(&binding->netdev->lock); + break; + } +} + +static void net_shaper_unlock(struct net_shaper_binding *binding) +{ + switch (binding->type) { + case NET_SHAPER_BINDING_TYPE_NETDEV: + mutex_unlock(&binding->netdev->lock); + break; + } +} + +static struct net_shaper_hierarchy * +net_shaper_hierarchy(struct net_shaper_binding *binding) +{ + /* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */ + if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV) + return READ_ONCE(binding->netdev->net_shaper_hierarchy); + + /* No other type supported yet. */ + return NULL; +} + +static const struct net_shaper_ops * +net_shaper_ops(struct net_shaper_binding *binding) +{ + if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV) + return binding->netdev->netdev_ops->net_shaper_ops; + + /* No other type supported yet. */ + return NULL; +} + +/* Count the number of [multi] attributes of the given type. */ +static int net_shaper_list_len(struct genl_info *info, int type) +{ + struct nlattr *attr; + int rem, cnt = 0; + + nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr), + genlmsg_len(info->genlhdr), rem) + cnt++; + return cnt; +} + +static int net_shaper_handle_size(void) +{ + return nla_total_size(nla_total_size(sizeof(u32)) + + nla_total_size(sizeof(u32))); +} + +static int net_shaper_fill_binding(struct sk_buff *msg, + const struct net_shaper_binding *binding, + u32 type) +{ + /* Should never happen, as currently only NETDEV is supported. */ + if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV)) + return -EINVAL; + + if (nla_put_u32(msg, type, binding->netdev->ifindex)) + return -EMSGSIZE; + + return 0; +} + +static int net_shaper_fill_handle(struct sk_buff *msg, + const struct net_shaper_handle *handle, + u32 type) +{ + struct nlattr *handle_attr; + + if (handle->scope == NET_SHAPER_SCOPE_UNSPEC) + return 0; + + handle_attr = nla_nest_start(msg, type); + if (!handle_attr) + return -EMSGSIZE; + + if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) || + (handle->scope >= NET_SHAPER_SCOPE_QUEUE && + nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id))) + goto handle_nest_cancel; + + nla_nest_end(msg, handle_attr); + return 0; + +handle_nest_cancel: + nla_nest_cancel(msg, handle_attr); + return -EMSGSIZE; +} + +static int +net_shaper_fill_one(struct sk_buff *msg, + const struct net_shaper_binding *binding, + const struct net_shaper *shaper, + const struct genl_info *info) +{ + void *hdr; + + hdr = genlmsg_iput(msg, info); + if (!hdr) + return -EMSGSIZE; + + if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) || + net_shaper_fill_handle(msg, &shaper->parent, + NET_SHAPER_A_PARENT) || + net_shaper_fill_handle(msg, &shaper->handle, + NET_SHAPER_A_HANDLE) || + ((shaper->bw_min || shaper->bw_max || shaper->burst) && + nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) || + (shaper->bw_min && + nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) || + (shaper->bw_max && + nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) || + (shaper->burst && + nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) || + (shaper->priority && + nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) || + (shaper->weight && + nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight))) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +/* Initialize the context fetching the relevant device and + * acquiring a reference to it. + */ +static int net_shaper_ctx_setup(const struct genl_info *info, int type, + struct net_shaper_nl_ctx *ctx) +{ + struct net *ns = genl_info_net(info); + struct net_device *dev; + int ifindex; + + if (GENL_REQ_ATTR_CHECK(info, type)) + return -EINVAL; + + ifindex = nla_get_u32(info->attrs[type]); + dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL); + if (!dev) { + NL_SET_BAD_ATTR(info->extack, info->attrs[type]); + return -ENOENT; + } + + if (!dev->netdev_ops->net_shaper_ops) { + NL_SET_BAD_ATTR(info->extack, info->attrs[type]); + netdev_put(dev, &ctx->dev_tracker); + return -EOPNOTSUPP; + } + + ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV; + ctx->binding.netdev = dev; + return 0; +} + +static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx) +{ + if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV) + netdev_put(ctx->binding.netdev, &ctx->dev_tracker); +} + +static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle) +{ + return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) | + FIELD_PREP(NET_SHAPER_ID_MASK, handle->id); +} + +static void net_shaper_index_to_handle(u32 index, + struct net_shaper_handle *handle) +{ + handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index); + handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index); +} + +static void net_shaper_default_parent(const struct net_shaper_handle *handle, + struct net_shaper_handle *parent) +{ + switch (handle->scope) { + case NET_SHAPER_SCOPE_UNSPEC: + case NET_SHAPER_SCOPE_NETDEV: + case __NET_SHAPER_SCOPE_MAX: + parent->scope = NET_SHAPER_SCOPE_UNSPEC; + break; + + case NET_SHAPER_SCOPE_QUEUE: + case NET_SHAPER_SCOPE_NODE: + parent->scope = NET_SHAPER_SCOPE_NETDEV; + break; + } + parent->id = 0; +} + +/* + * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as + * it's cleared by xa_store(). + */ +#define NET_SHAPER_NOT_VALID XA_MARK_1 + +static struct net_shaper * +net_shaper_lookup(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + u32 index = net_shaper_handle_to_index(handle); + + if (!hierarchy || xa_get_mark(&hierarchy->shapers, index, + NET_SHAPER_NOT_VALID)) + return NULL; + + return xa_load(&hierarchy->shapers, index); +} + +/* Allocate on demand the per device shaper's hierarchy container. + * Called under the net shaper lock + */ +static struct net_shaper_hierarchy * +net_shaper_hierarchy_setup(struct net_shaper_binding *binding) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + + if (hierarchy) + return hierarchy; + + hierarchy = kmalloc(sizeof(*hierarchy), GFP_KERNEL); + if (!hierarchy) + return NULL; + + /* The flag is required for ID allocation */ + xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC); + + switch (binding->type) { + case NET_SHAPER_BINDING_TYPE_NETDEV: + /* Pairs with READ_ONCE in net_shaper_hierarchy. */ + WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy); + break; + } + return hierarchy; +} + +/* Prepare the hierarchy container to actually insert the given shaper, doing + * in advance the needed allocations. + */ +static int net_shaper_pre_insert(struct net_shaper_binding *binding, + struct net_shaper_handle *handle, + struct netlink_ext_ack *extack) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + struct net_shaper *prev, *cur; + bool id_allocated = false; + int ret, index; + + if (!hierarchy) + return -ENOMEM; + + index = net_shaper_handle_to_index(handle); + cur = xa_load(&hierarchy->shapers, index); + if (cur) + return 0; + + /* Allocated a new id, if needed. */ + if (handle->scope == NET_SHAPER_SCOPE_NODE && + handle->id == NET_SHAPER_ID_UNSPEC) { + u32 min, max; + + handle->id = NET_SHAPER_ID_MASK - 1; + max = net_shaper_handle_to_index(handle); + handle->id = 0; + min = net_shaper_handle_to_index(handle); + + ret = xa_alloc(&hierarchy->shapers, &index, NULL, + XA_LIMIT(min, max), GFP_KERNEL); + if (ret < 0) { + NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper"); + return ret; + } + + net_shaper_index_to_handle(index, handle); + id_allocated = true; + } + + cur = kzalloc(sizeof(*cur), GFP_KERNEL); + if (!cur) { + ret = -ENOMEM; + goto free_id; + } + + /* Mark 'tentative' shaper inside the hierarchy container. + * xa_set_mark is a no-op if the previous store fails. + */ + xa_lock(&hierarchy->shapers); + prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL); + __xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID); + xa_unlock(&hierarchy->shapers); + if (xa_err(prev)) { + NL_SET_ERR_MSG(extack, "Can't insert shaper into device store"); + kfree_rcu(cur, rcu); + ret = xa_err(prev); + goto free_id; + } + return 0; + +free_id: + if (id_allocated) + xa_erase(&hierarchy->shapers, index); + return ret; +} + +/* Commit the tentative insert with the actual values. + * Must be called only after a successful net_shaper_pre_insert(). + */ +static void net_shaper_commit(struct net_shaper_binding *binding, + int nr_shapers, const struct net_shaper *shapers) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + struct net_shaper *cur; + int index; + int i; + + xa_lock(&hierarchy->shapers); + for (i = 0; i < nr_shapers; ++i) { + index = net_shaper_handle_to_index(&shapers[i].handle); + + cur = xa_load(&hierarchy->shapers, index); + if (WARN_ON_ONCE(!cur)) + continue; + + /* Successful update: drop the tentative mark + * and update the hierarchy container. + */ + __xa_clear_mark(&hierarchy->shapers, index, + NET_SHAPER_NOT_VALID); + *cur = shapers[i]; + } + xa_unlock(&hierarchy->shapers); +} + +/* Rollback all the tentative inserts from the hierarchy. */ +static void net_shaper_rollback(struct net_shaper_binding *binding) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + struct net_shaper *cur; + unsigned long index; + + if (!hierarchy) + return; + + xa_lock(&hierarchy->shapers); + xa_for_each_marked(&hierarchy->shapers, index, cur, + NET_SHAPER_NOT_VALID) { + __xa_erase(&hierarchy->shapers, index); + kfree(cur); + } + xa_unlock(&hierarchy->shapers); +} + +static int net_shaper_parse_handle(const struct nlattr *attr, + const struct genl_info *info, + struct net_shaper_handle *handle) +{ + struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1]; + struct nlattr *id_attr; + u32 id = 0; + int ret; + + ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr, + net_shaper_handle_nl_policy, info->extack); + if (ret < 0) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, + NET_SHAPER_A_HANDLE_SCOPE)) + return -EINVAL; + + handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]); + + /* The default id for NODE scope shapers is an invalid one + * to help the 'group' operation discriminate between new + * NODE shaper creation (ID_UNSPEC) and reuse of existing + * shaper (any other value). + */ + id_attr = tb[NET_SHAPER_A_HANDLE_ID]; + if (id_attr) + id = nla_get_u32(id_attr); + else if (handle->scope == NET_SHAPER_SCOPE_NODE) + id = NET_SHAPER_ID_UNSPEC; + + handle->id = id; + return 0; +} + +static int net_shaper_validate_caps(struct net_shaper_binding *binding, + struct nlattr **tb, + const struct genl_info *info, + struct net_shaper *shaper) +{ + const struct net_shaper_ops *ops = net_shaper_ops(binding); + struct nlattr *bad = NULL; + unsigned long caps = 0; + + ops->capabilities(binding, shaper->handle.scope, &caps); + + if (tb[NET_SHAPER_A_PRIORITY] && + !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY))) + bad = tb[NET_SHAPER_A_PRIORITY]; + if (tb[NET_SHAPER_A_WEIGHT] && + !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT))) + bad = tb[NET_SHAPER_A_WEIGHT]; + if (tb[NET_SHAPER_A_BW_MIN] && + !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN))) + bad = tb[NET_SHAPER_A_BW_MIN]; + if (tb[NET_SHAPER_A_BW_MAX] && + !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX))) + bad = tb[NET_SHAPER_A_BW_MAX]; + if (tb[NET_SHAPER_A_BURST] && + !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST))) + bad = tb[NET_SHAPER_A_BURST]; + + if (!caps) + bad = tb[NET_SHAPER_A_HANDLE]; + + if (bad) { + NL_SET_BAD_ATTR(info->extack, bad); + return -EOPNOTSUPP; + } + + if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE && + binding->type == NET_SHAPER_BINDING_TYPE_NETDEV && + shaper->handle.id >= binding->netdev->real_num_tx_queues) { + NL_SET_ERR_MSG_FMT(info->extack, + "Not existing queue id %d max %d", + shaper->handle.id, + binding->netdev->real_num_tx_queues); + return -ENOENT; + } + + /* The metric is really used only if there is *any* rate-related + * setting, either in current attributes set or in pre-existing + * values. + */ + if (shaper->burst || shaper->bw_min || shaper->bw_max) { + u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS + + shaper->metric; + + /* The metric test can fail even when the user did not + * specify the METRIC attribute. Pointing to rate related + * attribute will be confusing, as the attribute itself + * could be indeed supported, with a different metric. + * Be more specific. + */ + if (!(caps & BIT(metric_cap))) { + NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d", + shaper->metric); + return -EOPNOTSUPP; + } + } + return 0; +} + +static int net_shaper_parse_info(struct net_shaper_binding *binding, + struct nlattr **tb, + const struct genl_info *info, + struct net_shaper *shaper, + bool *exists) +{ + struct net_shaper *old; + int ret; + + /* The shaper handle is the only mandatory attribute. */ + if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE)) + return -EINVAL; + + ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info, + &shaper->handle); + if (ret) + return ret; + + if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) { + NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]); + return -EINVAL; + } + + /* Fetch existing hierarchy, if any, so that user provide info will + * incrementally update the existing shaper configuration. + */ + old = net_shaper_lookup(binding, &shaper->handle); + if (old) + *shaper = *old; + *exists = !!old; + + if (tb[NET_SHAPER_A_METRIC]) + shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]); + + if (tb[NET_SHAPER_A_BW_MIN]) + shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]); + + if (tb[NET_SHAPER_A_BW_MAX]) + shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]); + + if (tb[NET_SHAPER_A_BURST]) + shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]); + + if (tb[NET_SHAPER_A_PRIORITY]) + shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]); + + if (tb[NET_SHAPER_A_WEIGHT]) + shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]); + + ret = net_shaper_validate_caps(binding, tb, info, shaper); + if (ret < 0) + return ret; + + return 0; +} + +static int net_shaper_validate_nesting(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + const struct net_shaper_ops *ops = net_shaper_ops(binding); + unsigned long caps = 0; + + ops->capabilities(binding, shaper->handle.scope, &caps); + if (!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_NESTING))) { + NL_SET_ERR_MSG_FMT(extack, + "Nesting not supported for scope %d", + shaper->handle.scope); + return -EOPNOTSUPP; + } + return 0; +} + +/* Fetch the existing leaf and update it with the user-provided + * attributes. + */ +static int net_shaper_parse_leaf(struct net_shaper_binding *binding, + const struct nlattr *attr, + const struct genl_info *info, + const struct net_shaper *node, + struct net_shaper *shaper) +{ + struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1]; + bool exists; + int ret; + + ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr, + net_shaper_leaf_info_nl_policy, info->extack); + if (ret < 0) + return ret; + + ret = net_shaper_parse_info(binding, tb, info, shaper, &exists); + if (ret < 0) + return ret; + + if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) { + NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]); + return -EINVAL; + } + + if (node->handle.scope == NET_SHAPER_SCOPE_NODE) { + ret = net_shaper_validate_nesting(binding, shaper, + info->extack); + if (ret < 0) + return ret; + } + + if (!exists) + net_shaper_default_parent(&shaper->handle, &shaper->parent); + return 0; +} + +/* Alike net_parse_shaper_info(), but additionally allow the user specifying + * the shaper's parent handle. + */ +static int net_shaper_parse_node(struct net_shaper_binding *binding, + struct nlattr **tb, + const struct genl_info *info, + struct net_shaper *shaper) +{ + bool exists; + int ret; + + ret = net_shaper_parse_info(binding, tb, info, shaper, &exists); + if (ret) + return ret; + + if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE && + shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) { + NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]); + return -EINVAL; + } + + if (tb[NET_SHAPER_A_PARENT]) { + ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info, + &shaper->parent); + if (ret) + return ret; + + if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE && + shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) { + NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]); + return -EINVAL; + } + } + return 0; +} + +static int net_shaper_generic_pre(struct genl_info *info, int type) +{ + struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx; + + BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx)); + + return net_shaper_ctx_setup(info, type, ctx); +} + +int net_shaper_nl_pre_doit(const struct genl_split_ops *ops, + struct sk_buff *skb, struct genl_info *info) +{ + return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX); +} + +static void net_shaper_generic_post(struct genl_info *info) +{ + net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx); +} + +void net_shaper_nl_post_doit(const struct genl_split_ops *ops, + struct sk_buff *skb, struct genl_info *info) +{ + net_shaper_generic_post(info); +} + +int net_shaper_nl_pre_dumpit(struct netlink_callback *cb) +{ + struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; + const struct genl_info *info = genl_info_dump(cb); + + return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx); +} + +int net_shaper_nl_post_dumpit(struct netlink_callback *cb) +{ + net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx); + return 0; +} + +int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops, + struct sk_buff *skb, struct genl_info *info) +{ + return net_shaper_generic_pre(info, NET_SHAPER_A_CAPS_IFINDEX); +} + +void net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops, + struct sk_buff *skb, struct genl_info *info) +{ + net_shaper_generic_post(info); +} + +int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb) +{ + struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; + + return net_shaper_ctx_setup(genl_info_dump(cb), + NET_SHAPER_A_CAPS_IFINDEX, ctx); +} + +int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb) +{ + struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; + + net_shaper_ctx_cleanup(ctx); + return 0; +} + +int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct net_shaper_binding *binding; + struct net_shaper_handle handle; + struct net_shaper *shaper; + struct sk_buff *msg; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE)) + return -EINVAL; + + binding = net_shaper_binding_from_ctx(info->ctx); + ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info, + &handle); + if (ret < 0) + return ret; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + rcu_read_lock(); + shaper = net_shaper_lookup(binding, &handle); + if (!shaper) { + NL_SET_BAD_ATTR(info->extack, + info->attrs[NET_SHAPER_A_HANDLE]); + rcu_read_unlock(); + ret = -ENOENT; + goto free_msg; + } + + ret = net_shaper_fill_one(msg, binding, shaper, info); + rcu_read_unlock(); + if (ret) + goto free_msg; + + ret = genlmsg_reply(msg, info); + if (ret) + goto free_msg; + + return 0; + +free_msg: + nlmsg_free(msg); + return ret; +} + +int net_shaper_nl_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; + const struct genl_info *info = genl_info_dump(cb); + struct net_shaper_hierarchy *hierarchy; + struct net_shaper_binding *binding; + struct net_shaper *shaper; + int ret = 0; + + /* Don't error out dumps performed before any set operation. */ + binding = net_shaper_binding_from_ctx(ctx); + hierarchy = net_shaper_hierarchy(binding); + if (!hierarchy) + return 0; + + rcu_read_lock(); + for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index, + U32_MAX, XA_PRESENT)); ctx->start_index++) { + ret = net_shaper_fill_one(skb, binding, shaper, info); + if (ret) + break; + } + rcu_read_unlock(); + + return ret; +} + +int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct net_shaper_hierarchy *hierarchy; + struct net_shaper_binding *binding; + const struct net_shaper_ops *ops; + struct net_shaper_handle handle; + struct net_shaper shaper = {}; + bool exists; + int ret; + + binding = net_shaper_binding_from_ctx(info->ctx); + + net_shaper_lock(binding); + ret = net_shaper_parse_info(binding, info->attrs, info, &shaper, + &exists); + if (ret) + goto unlock; + + if (!exists) + net_shaper_default_parent(&shaper.handle, &shaper.parent); + + hierarchy = net_shaper_hierarchy_setup(binding); + if (!hierarchy) { + ret = -ENOMEM; + goto unlock; + } + + /* The 'set' operation can't create node-scope shapers. */ + handle = shaper.handle; + if (handle.scope == NET_SHAPER_SCOPE_NODE && + !net_shaper_lookup(binding, &handle)) { + ret = -ENOENT; + goto unlock; + } + + ret = net_shaper_pre_insert(binding, &handle, info->extack); + if (ret) + goto unlock; + + ops = net_shaper_ops(binding); + ret = ops->set(binding, &shaper, info->extack); + if (ret) { + net_shaper_rollback(binding); + goto unlock; + } + + net_shaper_commit(binding, 1, &shaper); + +unlock: + net_shaper_unlock(binding); + return ret; +} + +static int __net_shaper_delete(struct net_shaper_binding *binding, + struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + struct net_shaper_handle parent_handle, handle = shaper->handle; + const struct net_shaper_ops *ops = net_shaper_ops(binding); + int ret; + +again: + parent_handle = shaper->parent; + + ret = ops->delete(binding, &handle, extack); + if (ret < 0) + return ret; + + xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle)); + kfree_rcu(shaper, rcu); + + /* Eventually delete the parent, if it is left over with no leaves. */ + if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) { + shaper = net_shaper_lookup(binding, &parent_handle); + if (shaper && !--shaper->leaves) { + handle = parent_handle; + goto again; + } + } + return 0; +} + +static int net_shaper_handle_cmp(const struct net_shaper_handle *a, + const struct net_shaper_handle *b) +{ + /* Must avoid holes in struct net_shaper_handle. */ + BUILD_BUG_ON(sizeof(*a) != 8); + + return memcmp(a, b, sizeof(*a)); +} + +static int net_shaper_parent_from_leaves(int leaves_count, + const struct net_shaper *leaves, + struct net_shaper *node, + struct netlink_ext_ack *extack) +{ + struct net_shaper_handle parent = leaves[0].parent; + int i; + + for (i = 1; i < leaves_count; ++i) { + if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) { + NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent"); + return -EINVAL; + } + } + + node->parent = parent; + return 0; +} + +static int __net_shaper_group(struct net_shaper_binding *binding, + bool update_node, int leaves_count, + struct net_shaper *leaves, + struct net_shaper *node, + struct netlink_ext_ack *extack) +{ + const struct net_shaper_ops *ops = net_shaper_ops(binding); + struct net_shaper_handle leaf_handle; + struct net_shaper *parent = NULL; + bool new_node = false; + int i, ret; + + if (node->handle.scope == NET_SHAPER_SCOPE_NODE) { + new_node = node->handle.id == NET_SHAPER_ID_UNSPEC; + + if (!new_node && !net_shaper_lookup(binding, &node->handle)) { + /* The related attribute is not available when + * reaching here from the delete() op. + */ + NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists", + node->handle.scope, node->handle.id); + return -ENOENT; + } + + /* When unspecified, the node parent scope is inherited from + * the leaves. + */ + if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) { + ret = net_shaper_parent_from_leaves(leaves_count, + leaves, node, + extack); + if (ret) + return ret; + } + + } else { + net_shaper_default_parent(&node->handle, &node->parent); + } + + if (node->parent.scope == NET_SHAPER_SCOPE_NODE) { + parent = net_shaper_lookup(binding, &node->parent); + if (!parent) { + NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists", + node->parent.scope, node->parent.id); + return -ENOENT; + } + + ret = net_shaper_validate_nesting(binding, node, extack); + if (ret < 0) + return ret; + } + + if (update_node) { + /* For newly created node scope shaper, the following will + * update the handle, due to id allocation. + */ + ret = net_shaper_pre_insert(binding, &node->handle, extack); + if (ret) + return ret; + } + + for (i = 0; i < leaves_count; ++i) { + leaf_handle = leaves[i].handle; + + ret = net_shaper_pre_insert(binding, &leaf_handle, extack); + if (ret) + goto rollback; + + if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle)) + continue; + + /* The leaves shapers will be nested to the node, update the + * linking accordingly. + */ + leaves[i].parent = node->handle; + node->leaves++; + } + + ret = ops->group(binding, leaves_count, leaves, node, extack); + if (ret < 0) + goto rollback; + + /* The node's parent gains a new leaf only when the node itself + * is created by this group operation + */ + if (new_node && parent) + parent->leaves++; + if (update_node) + net_shaper_commit(binding, 1, node); + net_shaper_commit(binding, leaves_count, leaves); + return 0; + +rollback: + net_shaper_rollback(binding); + return ret; +} + +static int net_shaper_pre_del_node(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + struct net_shaper *cur, *leaves, node = {}; + int ret, leaves_count = 0; + unsigned long index; + bool update_node; + + if (!shaper->leaves) + return 0; + + /* Fetch the new node information. */ + node.handle = shaper->parent; + cur = net_shaper_lookup(binding, &node.handle); + if (cur) { + node = *cur; + } else { + /* A scope NODE shaper can be nested only to the NETDEV scope + * shaper without creating the latter, this check may fail only + * if the data is in inconsistent status. + */ + if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV)) + return -EINVAL; + } + + leaves = kcalloc(shaper->leaves, sizeof(struct net_shaper), + GFP_KERNEL); + if (!leaves) + return -ENOMEM; + + /* Build the leaves arrays. */ + xa_for_each(&hierarchy->shapers, index, cur) { + if (net_shaper_handle_cmp(&cur->parent, &shaper->handle)) + continue; + + if (WARN_ON_ONCE(leaves_count == shaper->leaves)) { + ret = -EINVAL; + goto free; + } + + leaves[leaves_count++] = *cur; + } + + /* When re-linking to the netdev shaper, avoid the eventual, implicit, + * creation of the new node, would be surprising since the user is + * doing a delete operation. + */ + update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV; + ret = __net_shaper_group(binding, update_node, leaves_count, + leaves, &node, extack); + +free: + kfree(leaves); + return ret; +} + +int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct net_shaper_hierarchy *hierarchy; + struct net_shaper_binding *binding; + struct net_shaper_handle handle; + struct net_shaper *shaper; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE)) + return -EINVAL; + + binding = net_shaper_binding_from_ctx(info->ctx); + + net_shaper_lock(binding); + ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info, + &handle); + if (ret) + goto unlock; + + hierarchy = net_shaper_hierarchy(binding); + if (!hierarchy) { + ret = -ENOENT; + goto unlock; + } + + shaper = net_shaper_lookup(binding, &handle); + if (!shaper) { + ret = -ENOENT; + goto unlock; + } + + if (handle.scope == NET_SHAPER_SCOPE_NODE) { + ret = net_shaper_pre_del_node(binding, shaper, info->extack); + if (ret) + goto unlock; + } + + ret = __net_shaper_delete(binding, shaper, info->extack); + +unlock: + net_shaper_unlock(binding); + return ret; +} + +static int net_shaper_group_send_reply(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct genl_info *info, + struct sk_buff *msg) +{ + void *hdr; + + hdr = genlmsg_iput(msg, info); + if (!hdr) + goto free_msg; + + if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) || + net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE)) + goto free_msg; + + genlmsg_end(msg, hdr); + + return genlmsg_reply(msg, info); + +free_msg: + /* Should never happen as msg is pre-allocated with enough space. */ + WARN_ONCE(true, "calculated message payload length (%d)", + net_shaper_handle_size()); + nlmsg_free(msg); + return -EMSGSIZE; +} + +int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct net_shaper **old_nodes, *leaves, node = {}; + struct net_shaper_hierarchy *hierarchy; + struct net_shaper_binding *binding; + int i, ret, rem, leaves_count; + int old_nodes_count = 0; + struct sk_buff *msg; + struct nlattr *attr; + + if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES)) + return -EINVAL; + + binding = net_shaper_binding_from_ctx(info->ctx); + + /* The group operation is optional. */ + if (!net_shaper_ops(binding)->group) + return -EOPNOTSUPP; + + net_shaper_lock(binding); + leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES); + if (!leaves_count) { + NL_SET_BAD_ATTR(info->extack, + info->attrs[NET_SHAPER_A_LEAVES]); + ret = -EINVAL; + goto unlock; + } + + leaves = kcalloc(leaves_count, sizeof(struct net_shaper) + + sizeof(struct net_shaper *), GFP_KERNEL); + if (!leaves) { + ret = -ENOMEM; + goto unlock; + } + old_nodes = (void *)&leaves[leaves_count]; + + ret = net_shaper_parse_node(binding, info->attrs, info, &node); + if (ret) + goto free_leaves; + + i = 0; + nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES, + genlmsg_data(info->genlhdr), + genlmsg_len(info->genlhdr), rem) { + if (WARN_ON_ONCE(i >= leaves_count)) + goto free_leaves; + + ret = net_shaper_parse_leaf(binding, attr, info, + &node, &leaves[i]); + if (ret) + goto free_leaves; + i++; + } + + /* Prepare the msg reply in advance, to avoid device operation + * rollback on allocation failure. + */ + msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL); + if (!msg) + goto free_leaves; + + hierarchy = net_shaper_hierarchy_setup(binding); + if (!hierarchy) { + ret = -ENOMEM; + goto free_msg; + } + + /* Record the node shapers that this group() operation can make + * childless for later cleanup. + */ + for (i = 0; i < leaves_count; i++) { + if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE && + net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) { + struct net_shaper *tmp; + + tmp = net_shaper_lookup(binding, &leaves[i].parent); + if (!tmp) + continue; + + old_nodes[old_nodes_count++] = tmp; + } + } + + ret = __net_shaper_group(binding, true, leaves_count, leaves, &node, + info->extack); + if (ret) + goto free_msg; + + /* Check if we need to delete any node left alone by the new leaves + * linkage. + */ + for (i = 0; i < old_nodes_count; ++i) { + struct net_shaper *tmp = old_nodes[i]; + + if (--tmp->leaves > 0) + continue; + + /* Errors here are not fatal: the grouping operation is + * completed, and user-space can still explicitly clean-up + * left-over nodes. + */ + __net_shaper_delete(binding, tmp, info->extack); + } + + ret = net_shaper_group_send_reply(binding, &node.handle, info, msg); + if (ret) + GENL_SET_ERR_MSG_FMT(info, "Can't send reply"); + +free_leaves: + kfree(leaves); + +unlock: + net_shaper_unlock(binding); + return ret; + +free_msg: + kfree_skb(msg); + goto free_leaves; +} + +static int +net_shaper_cap_fill_one(struct sk_buff *msg, + struct net_shaper_binding *binding, + enum net_shaper_scope scope, unsigned long flags, + const struct genl_info *info) +{ + unsigned long cur; + void *hdr; + + hdr = genlmsg_iput(msg, info); + if (!hdr) + return -EMSGSIZE; + + if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_CAPS_IFINDEX) || + nla_put_u32(msg, NET_SHAPER_A_CAPS_SCOPE, scope)) + goto nla_put_failure; + + for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS; + cur <= NET_SHAPER_A_CAPS_MAX; ++cur) { + if (flags & BIT(cur) && nla_put_flag(msg, cur)) + goto nla_put_failure; + } + + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct net_shaper_binding *binding; + const struct net_shaper_ops *ops; + enum net_shaper_scope scope; + unsigned long flags = 0; + struct sk_buff *msg; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_CAPS_SCOPE)) + return -EINVAL; + + binding = net_shaper_binding_from_ctx(info->ctx); + scope = nla_get_u32(info->attrs[NET_SHAPER_A_CAPS_SCOPE]); + ops = net_shaper_ops(binding); + ops->capabilities(binding, scope, &flags); + if (!flags) + return -EOPNOTSUPP; + + msg = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + ret = net_shaper_cap_fill_one(msg, binding, scope, flags, info); + if (ret) + goto free_msg; + + ret = genlmsg_reply(msg, info); + if (ret) + goto free_msg; + return 0; + +free_msg: + nlmsg_free(msg); + return ret; +} + +int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + const struct genl_info *info = genl_info_dump(cb); + struct net_shaper_binding *binding; + const struct net_shaper_ops *ops; + enum net_shaper_scope scope; + int ret; + + binding = net_shaper_binding_from_ctx(cb->ctx); + ops = net_shaper_ops(binding); + for (scope = 0; scope <= NET_SHAPER_SCOPE_MAX; ++scope) { + unsigned long flags = 0; + + ops->capabilities(binding, scope, &flags); + if (!flags) + continue; + + ret = net_shaper_cap_fill_one(skb, binding, scope, flags, + info); + if (ret) + return ret; + } + + return 0; +} + +static void net_shaper_flush(struct net_shaper_binding *binding) +{ + struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); + struct net_shaper *cur; + unsigned long index; + + if (!hierarchy) + return; + + net_shaper_lock(binding); + xa_lock(&hierarchy->shapers); + xa_for_each(&hierarchy->shapers, index, cur) { + __xa_erase(&hierarchy->shapers, index); + kfree(cur); + } + xa_unlock(&hierarchy->shapers); + net_shaper_unlock(binding); + + kfree(hierarchy); +} + +void net_shaper_flush_netdev(struct net_device *dev) +{ + struct net_shaper_binding binding = { + .type = NET_SHAPER_BINDING_TYPE_NETDEV, + .netdev = dev, + }; + + net_shaper_flush(&binding); +} + +void net_shaper_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + struct net_shaper_hierarchy *hierarchy; + struct net_shaper_binding binding; + int i; + + binding.type = NET_SHAPER_BINDING_TYPE_NETDEV; + binding.netdev = dev; + hierarchy = net_shaper_hierarchy(&binding); + if (!hierarchy) + return; + + /* Only drivers implementing shapers support ensure + * the lock is acquired in advance. + */ + lockdep_assert_held(&dev->lock); + + /* Take action only when decreasing the tx queue number. */ + for (i = txq; i < dev->real_num_tx_queues; ++i) { + struct net_shaper_handle handle, parent_handle; + struct net_shaper *shaper; + u32 index; + + handle.scope = NET_SHAPER_SCOPE_QUEUE; + handle.id = i; + shaper = net_shaper_lookup(&binding, &handle); + if (!shaper) + continue; + + /* Don't touch the H/W for the queue shaper, the drivers already + * deleted the queue and related resources. + */ + parent_handle = shaper->parent; + index = net_shaper_handle_to_index(&handle); + xa_erase(&hierarchy->shapers, index); + kfree_rcu(shaper, rcu); + + /* The recursion on parent does the full job. */ + if (parent_handle.scope != NET_SHAPER_SCOPE_NODE) + continue; + + shaper = net_shaper_lookup(&binding, &parent_handle); + if (shaper && !--shaper->leaves) + __net_shaper_delete(&binding, shaper, NULL); + } +} + +static int __init shaper_init(void) +{ + return genl_register_family(&net_shaper_nl_family); +} + +subsys_initcall(shaper_init); diff --git a/net/shaper/shaper_nl_gen.c b/net/shaper/shaper_nl_gen.c new file mode 100644 index 000000000000..204c8ae8c7b1 --- /dev/null +++ b/net/shaper/shaper_nl_gen.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/net_shaper.yaml */ +/* YNL-GEN kernel source */ + +#include <net/netlink.h> +#include <net/genetlink.h> + +#include "shaper_nl_gen.h" + +#include <uapi/linux/net_shaper.h> + +/* Common nested types */ +const struct nla_policy net_shaper_handle_nl_policy[NET_SHAPER_A_HANDLE_ID + 1] = { + [NET_SHAPER_A_HANDLE_SCOPE] = NLA_POLICY_MAX(NLA_U32, 3), + [NET_SHAPER_A_HANDLE_ID] = { .type = NLA_U32, }, +}; + +const struct nla_policy net_shaper_leaf_info_nl_policy[NET_SHAPER_A_WEIGHT + 1] = { + [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy), + [NET_SHAPER_A_PRIORITY] = { .type = NLA_U32, }, + [NET_SHAPER_A_WEIGHT] = { .type = NLA_U32, }, +}; + +/* NET_SHAPER_CMD_GET - do */ +static const struct nla_policy net_shaper_get_do_nl_policy[NET_SHAPER_A_IFINDEX + 1] = { + [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, }, + [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy), +}; + +/* NET_SHAPER_CMD_GET - dump */ +static const struct nla_policy net_shaper_get_dump_nl_policy[NET_SHAPER_A_IFINDEX + 1] = { + [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, }, +}; + +/* NET_SHAPER_CMD_SET - do */ +static const struct nla_policy net_shaper_set_nl_policy[NET_SHAPER_A_IFINDEX + 1] = { + [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, }, + [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy), + [NET_SHAPER_A_METRIC] = NLA_POLICY_MAX(NLA_U32, 1), + [NET_SHAPER_A_BW_MIN] = { .type = NLA_UINT, }, + [NET_SHAPER_A_BW_MAX] = { .type = NLA_UINT, }, + [NET_SHAPER_A_BURST] = { .type = NLA_UINT, }, + [NET_SHAPER_A_PRIORITY] = { .type = NLA_U32, }, + [NET_SHAPER_A_WEIGHT] = { .type = NLA_U32, }, +}; + +/* NET_SHAPER_CMD_DELETE - do */ +static const struct nla_policy net_shaper_delete_nl_policy[NET_SHAPER_A_IFINDEX + 1] = { + [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, }, + [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy), +}; + +/* NET_SHAPER_CMD_GROUP - do */ +static const struct nla_policy net_shaper_group_nl_policy[NET_SHAPER_A_LEAVES + 1] = { + [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, }, + [NET_SHAPER_A_PARENT] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy), + [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy), + [NET_SHAPER_A_METRIC] = NLA_POLICY_MAX(NLA_U32, 1), + [NET_SHAPER_A_BW_MIN] = { .type = NLA_UINT, }, + [NET_SHAPER_A_BW_MAX] = { .type = NLA_UINT, }, + [NET_SHAPER_A_BURST] = { .type = NLA_UINT, }, + [NET_SHAPER_A_PRIORITY] = { .type = NLA_U32, }, + [NET_SHAPER_A_WEIGHT] = { .type = NLA_U32, }, + [NET_SHAPER_A_LEAVES] = NLA_POLICY_NESTED(net_shaper_leaf_info_nl_policy), +}; + +/* NET_SHAPER_CMD_CAP_GET - do */ +static const struct nla_policy net_shaper_cap_get_do_nl_policy[NET_SHAPER_A_CAPS_SCOPE + 1] = { + [NET_SHAPER_A_CAPS_IFINDEX] = { .type = NLA_U32, }, + [NET_SHAPER_A_CAPS_SCOPE] = NLA_POLICY_MAX(NLA_U32, 3), +}; + +/* NET_SHAPER_CMD_CAP_GET - dump */ +static const struct nla_policy net_shaper_cap_get_dump_nl_policy[NET_SHAPER_A_CAPS_IFINDEX + 1] = { + [NET_SHAPER_A_CAPS_IFINDEX] = { .type = NLA_U32, }, +}; + +/* Ops table for net_shaper */ +static const struct genl_split_ops net_shaper_nl_ops[] = { + { + .cmd = NET_SHAPER_CMD_GET, + .pre_doit = net_shaper_nl_pre_doit, + .doit = net_shaper_nl_get_doit, + .post_doit = net_shaper_nl_post_doit, + .policy = net_shaper_get_do_nl_policy, + .maxattr = NET_SHAPER_A_IFINDEX, + .flags = GENL_CMD_CAP_DO, + }, + { + .cmd = NET_SHAPER_CMD_GET, + .start = net_shaper_nl_pre_dumpit, + .dumpit = net_shaper_nl_get_dumpit, + .done = net_shaper_nl_post_dumpit, + .policy = net_shaper_get_dump_nl_policy, + .maxattr = NET_SHAPER_A_IFINDEX, + .flags = GENL_CMD_CAP_DUMP, + }, + { + .cmd = NET_SHAPER_CMD_SET, + .pre_doit = net_shaper_nl_pre_doit, + .doit = net_shaper_nl_set_doit, + .post_doit = net_shaper_nl_post_doit, + .policy = net_shaper_set_nl_policy, + .maxattr = NET_SHAPER_A_IFINDEX, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = NET_SHAPER_CMD_DELETE, + .pre_doit = net_shaper_nl_pre_doit, + .doit = net_shaper_nl_delete_doit, + .post_doit = net_shaper_nl_post_doit, + .policy = net_shaper_delete_nl_policy, + .maxattr = NET_SHAPER_A_IFINDEX, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = NET_SHAPER_CMD_GROUP, + .pre_doit = net_shaper_nl_pre_doit, + .doit = net_shaper_nl_group_doit, + .post_doit = net_shaper_nl_post_doit, + .policy = net_shaper_group_nl_policy, + .maxattr = NET_SHAPER_A_LEAVES, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = NET_SHAPER_CMD_CAP_GET, + .pre_doit = net_shaper_nl_cap_pre_doit, + .doit = net_shaper_nl_cap_get_doit, + .post_doit = net_shaper_nl_cap_post_doit, + .policy = net_shaper_cap_get_do_nl_policy, + .maxattr = NET_SHAPER_A_CAPS_SCOPE, + .flags = GENL_CMD_CAP_DO, + }, + { + .cmd = NET_SHAPER_CMD_CAP_GET, + .start = net_shaper_nl_cap_pre_dumpit, + .dumpit = net_shaper_nl_cap_get_dumpit, + .done = net_shaper_nl_cap_post_dumpit, + .policy = net_shaper_cap_get_dump_nl_policy, + .maxattr = NET_SHAPER_A_CAPS_IFINDEX, + .flags = GENL_CMD_CAP_DUMP, + }, +}; + +struct genl_family net_shaper_nl_family __ro_after_init = { + .name = NET_SHAPER_FAMILY_NAME, + .version = NET_SHAPER_FAMILY_VERSION, + .netnsok = true, + .parallel_ops = true, + .module = THIS_MODULE, + .split_ops = net_shaper_nl_ops, + .n_split_ops = ARRAY_SIZE(net_shaper_nl_ops), +}; diff --git a/net/shaper/shaper_nl_gen.h b/net/shaper/shaper_nl_gen.h new file mode 100644 index 000000000000..cb7f9026fc23 --- /dev/null +++ b/net/shaper/shaper_nl_gen.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/net_shaper.yaml */ +/* YNL-GEN kernel header */ + +#ifndef _LINUX_NET_SHAPER_GEN_H +#define _LINUX_NET_SHAPER_GEN_H + +#include <net/netlink.h> +#include <net/genetlink.h> + +#include <uapi/linux/net_shaper.h> + +/* Common nested types */ +extern const struct nla_policy net_shaper_handle_nl_policy[NET_SHAPER_A_HANDLE_ID + 1]; +extern const struct nla_policy net_shaper_leaf_info_nl_policy[NET_SHAPER_A_WEIGHT + 1]; + +int net_shaper_nl_pre_doit(const struct genl_split_ops *ops, + struct sk_buff *skb, struct genl_info *info); +int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops, + struct sk_buff *skb, struct genl_info *info); +void +net_shaper_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, + struct genl_info *info); +void +net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops, + struct sk_buff *skb, struct genl_info *info); +int net_shaper_nl_pre_dumpit(struct netlink_callback *cb); +int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb); +int net_shaper_nl_post_dumpit(struct netlink_callback *cb); +int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb); + +int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info); +int net_shaper_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb); +int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info); +int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info); +int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info); +int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info); +int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb); + +extern struct genl_family net_shaper_nl_family; + +#endif /* _LINUX_NET_SHAPER_GEN_H */ diff --git a/net/smc/smc.h b/net/smc/smc.h index ad77d6b6b8d3..78ae10d06ed2 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -278,7 +278,7 @@ struct smc_connection { */ u64 peer_token; /* SMC-D token of peer */ u8 killed : 1; /* abnormal termination */ - u8 freed : 1; /* normal termiation */ + u8 freed : 1; /* normal termination */ u8 out_of_sync : 1; /* out of sync with peer */ }; diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 5625fda2960b..5fd6f5b8ef03 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -156,7 +156,7 @@ struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/ } __aligned(4); struct smc_clc_msg_smcd { /* SMC-D GID information */ - struct smc_clc_smcd_gid_chid ism; /* ISM native GID+CHID of requestor */ + struct smc_clc_smcd_gid_chid ism; /* ISM native GID+CHID of requester */ __be16 v2_ext_offset; /* SMC Version 2 Extension Offset */ u8 vendor_oui[3]; /* vendor organizationally unique identifier */ u8 vendor_exp_options[5]; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 4e694860ece4..500952c2e67b 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -2321,7 +2321,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, } if (lgr->buf_type == SMCR_PHYS_CONT_BUFS) goto out; - fallthrough; // try virtually continguous buf + fallthrough; // try virtually contiguous buf case SMCR_VIRT_CONT_BUFS: buf_desc->order = get_order(bufsize); buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 0db4e5f79ac4..69b54ecd6503 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -30,7 +30,7 @@ */ #define SMC_CONN_PER_LGR_PREFER 255 /* Preferred connections per link group used for * SMC-R v2.1 and later negotiation, vendors or - * distrubutions may modify it to a value between + * distributions may modify it to a value between * 16-255 as needed. */ @@ -181,7 +181,7 @@ struct smc_link { */ #define SMC_LINKS_PER_LGR_MAX_PREFER 2 /* Preferred max links per link group used for * SMC-R v2.1 and later negotiation, vendors or - * distrubutions may modify it to a value between + * distributions may modify it to a value between * 1-2 as needed. */ diff --git a/net/socket.c b/net/socket.c index 042451f01c65..9a8e4452b9b2 100644 --- a/net/socket.c +++ b/net/socket.c @@ -687,7 +687,7 @@ void sock_release(struct socket *sock) } EXPORT_SYMBOL(sock_release); -void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) +void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags) { u8 flags = *tx_flags; @@ -1576,9 +1576,9 @@ int __sock_create(struct net *net, int family, int type, int protocol, err = pf->create(net, sock, protocol, kern); if (err < 0) { /* ->create should release the allocated sock->sk object on error - * but it may leave the dangling pointer + * and make sure sock->sk is set to NULL to avoid use-after-free */ - sock->sk = NULL; + DEBUG_NET_WARN_ON_ONCE(sock->sk); goto out_module_put; } diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 1140b2a120ca..7d7e37f53708 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -141,7 +141,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, u64 addr; int err; - addr = xp_get_handle(xskb); + addr = xp_get_handle(xskb, xskb->pool); err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); if (err) { xs->rx_queue_full++; @@ -171,14 +171,14 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) return 0; xskb_list = &xskb->pool->xskb_list; - list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { + list_for_each_entry_safe(pos, tmp, xskb_list, list_node) { if (list_is_singular(xskb_list)) contd = 0; len = pos->xdp.data_end - pos->xdp.data; err = __xsk_rcv_zc(xs, pos, len, contd); if (err) goto err; - list_del(&pos->xskb_list_node); + list_del(&pos->list_node); } return 0; @@ -527,34 +527,34 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags) return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); } -static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr) +static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr) { unsigned long flags; int ret; - spin_lock_irqsave(&xs->pool->cq_lock, flags); - ret = xskq_prod_reserve_addr(xs->pool->cq, addr); - spin_unlock_irqrestore(&xs->pool->cq_lock, flags); + spin_lock_irqsave(&pool->cq_lock, flags); + ret = xskq_prod_reserve_addr(pool->cq, addr); + spin_unlock_irqrestore(&pool->cq_lock, flags); return ret; } -static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n) +static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n) { unsigned long flags; - spin_lock_irqsave(&xs->pool->cq_lock, flags); - xskq_prod_submit_n(xs->pool->cq, n); - spin_unlock_irqrestore(&xs->pool->cq_lock, flags); + spin_lock_irqsave(&pool->cq_lock, flags); + xskq_prod_submit_n(pool->cq, n); + spin_unlock_irqrestore(&pool->cq_lock, flags); } -static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n) +static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n) { unsigned long flags; - spin_lock_irqsave(&xs->pool->cq_lock, flags); - xskq_prod_cancel_n(xs->pool->cq, n); - spin_unlock_irqrestore(&xs->pool->cq_lock, flags); + spin_lock_irqsave(&pool->cq_lock, flags); + xskq_prod_cancel_n(pool->cq, n); + spin_unlock_irqrestore(&pool->cq_lock, flags); } static u32 xsk_get_num_desc(struct sk_buff *skb) @@ -571,7 +571,7 @@ static void xsk_destruct_skb(struct sk_buff *skb) *compl->tx_timestamp = ktime_get_tai_fast_ns(); } - xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb)); + xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb)); sock_wfree(skb); } @@ -587,7 +587,7 @@ static void xsk_consume_skb(struct sk_buff *skb) struct xdp_sock *xs = xdp_sk(skb->sk); skb->destructor = sock_wfree; - xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb)); + xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb)); /* Free skb without triggering the perf drop trace */ consume_skb(skb); xs->skb = NULL; @@ -765,7 +765,7 @@ free_err: xskq_cons_release(xs->tx); } else { /* Let application retry */ - xsk_cq_cancel_locked(xs, 1); + xsk_cq_cancel_locked(xs->pool, 1); } return ERR_PTR(err); @@ -802,7 +802,7 @@ static int __xsk_generic_xmit(struct sock *sk) * if there is space in it. This avoids having to implement * any buffering in the Tx path. */ - if (xsk_cq_reserve_addr_locked(xs, desc.addr)) + if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr)) goto out; skb = xsk_build_skb(xs, &desc); diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 521a2938e50a..ae71da7d2cd6 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -101,8 +101,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, xskb = &pool->heads[i]; xskb->pool = pool; xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; - INIT_LIST_HEAD(&xskb->free_list_node); - INIT_LIST_HEAD(&xskb->xskb_list_node); + INIT_LIST_HEAD(&xskb->list_node); if (pool->unaligned) pool->free_heads[i] = xskb; else @@ -230,6 +229,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, goto err_unreg_xsk; } pool->umem->zc = true; + pool->xdp_zc_max_segs = netdev->xdp_zc_max_segs; return 0; err_unreg_xsk: @@ -417,8 +417,10 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_ for (i = 0; i < pool->heads_cnt; i++) { struct xdp_buff_xsk *xskb = &pool->heads[i]; + u64 orig_addr; - xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); + orig_addr = xskb->xdp.data_hard_start - pool->addrs - pool->headroom; + xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, orig_addr); } } @@ -501,6 +503,22 @@ static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) return *addr < pool->addrs_cnt; } +static struct xdp_buff_xsk *xp_get_xskb(struct xsk_buff_pool *pool, u64 addr) +{ + struct xdp_buff_xsk *xskb; + + if (pool->unaligned) { + xskb = pool->free_heads[--pool->free_heads_cnt]; + xp_init_xskb_addr(xskb, pool, addr); + if (pool->dma_pages) + xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); + } else { + xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; + } + + return xskb; +} + static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) { struct xdp_buff_xsk *xskb; @@ -526,14 +544,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) break; } - if (pool->unaligned) { - xskb = pool->free_heads[--pool->free_heads_cnt]; - xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages) - xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); - } else { - xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; - } + xskb = xp_get_xskb(pool, addr); xskq_cons_release(pool->fq); return xskb; @@ -550,8 +561,8 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) } else { pool->free_list_cnt--; xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, - free_list_node); - list_del_init(&xskb->free_list_node); + list_node); + list_del_init(&xskb->list_node); } xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; @@ -591,14 +602,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd continue; } - if (pool->unaligned) { - xskb = pool->free_heads[--pool->free_heads_cnt]; - xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages) - xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); - } else { - xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; - } + xskb = xp_get_xskb(pool, addr); *xdp = &xskb->xdp; xdp++; @@ -617,8 +621,8 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3 i = nb_entries; while (i--) { - xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node); - list_del_init(&xskb->free_list_node); + xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, list_node); + list_del_init(&xskb->list_node); *xdp = &xskb->xdp; xdp++; @@ -688,11 +692,11 @@ EXPORT_SYMBOL(xp_can_alloc); void xp_free(struct xdp_buff_xsk *xskb) { - if (!list_empty(&xskb->free_list_node)) + if (!list_empty(&xskb->list_node)) return; xskb->pool->free_list_cnt++; - list_add(&xskb->free_list_node, &xskb->pool->free_list); + list_add(&xskb->list_node, &xskb->pool->free_list); } EXPORT_SYMBOL(xp_free); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 406b20dfee8d..46d87e961ad6 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -260,7 +260,7 @@ u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, nr_frags = 0; } else { nr_frags++; - if (nr_frags == pool->netdev->xdp_zc_max_segs) { + if (nr_frags == pool->xdp_zc_max_segs) { nr_frags = 0; break; } diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs index 910ce867480a..801907fba199 100644 --- a/rust/kernel/net/phy.rs +++ b/rust/kernel/net/phy.rs @@ -848,9 +848,7 @@ impl DeviceMask { /// } /// }; /// -/// #[cfg(MODULE)] -/// #[no_mangle] -/// static __mod_mdio__phydev_device_table: [::kernel::bindings::mdio_device_id; 2] = [ +/// const _DEVICE_TABLE: [::kernel::bindings::mdio_device_id; 2] = [ /// ::kernel::bindings::mdio_device_id { /// phy_id: 0x00000001, /// phy_id_mask: 0xffffffff, @@ -860,6 +858,9 @@ impl DeviceMask { /// phy_id_mask: 0, /// }, /// ]; +/// #[cfg(MODULE)] +/// #[no_mangle] +/// static __mod_mdio__phydev_device_table: [::kernel::bindings::mdio_device_id; 2] = _DEVICE_TABLE; /// ``` #[macro_export] macro_rules! module_phy_driver { @@ -871,9 +872,7 @@ macro_rules! module_phy_driver { (@device_table [$($dev:expr),+]) => { // SAFETY: C will not read off the end of this constant since the last element is zero. - #[cfg(MODULE)] - #[no_mangle] - static __mod_mdio__phydev_device_table: [$crate::bindings::mdio_device_id; + const _DEVICE_TABLE: [$crate::bindings::mdio_device_id; $crate::module_phy_driver!(@count_devices $($dev),+) + 1] = [ $($dev.mdio_device_id()),+, $crate::bindings::mdio_device_id { @@ -881,6 +880,11 @@ macro_rules! module_phy_driver { phy_id_mask: 0 } ]; + + #[cfg(MODULE)] + #[no_mangle] + static __mod_mdio__phydev_device_table: [$crate::bindings::mdio_device_id; + $crate::module_phy_driver!(@count_devices $($dev),+) + 1] = _DEVICE_TABLE; }; (drivers: [$($driver:ident),+ $(,)?], device_table: [$($dev:expr),+ $(,)?], $($f:tt)*) => { diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h index 54d9c8bf7c55..281df9139d2b 100644 --- a/tools/include/uapi/asm-generic/socket.h +++ b/tools/include/uapi/asm-generic/socket.h @@ -124,6 +124,8 @@ #define SO_PASSPIDFD 76 #define SO_PEERPIDFD 77 +#define SCM_TS_OPT_ID 78 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index f0d71b2a3f1e..8516c1ccd57a 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -377,6 +377,7 @@ enum { IFLA_GSO_IPV4_MAX_SIZE, IFLA_GRO_IPV4_MAX_SIZE, IFLA_DPLL_PIN, + IFLA_MAX_PACING_OFFLOAD_HORIZON, __IFLA_MAX }; @@ -461,6 +462,286 @@ enum in6_addr_gen_mode { /* Bridge section */ +/** + * DOC: Bridge enum definition + * + * Please *note* that the timer values in the following section are expected + * in clock_t format, which is seconds multiplied by USER_HZ (generally + * defined as 100). + * + * @IFLA_BR_FORWARD_DELAY + * The bridge forwarding delay is the time spent in LISTENING state + * (before moving to LEARNING) and in LEARNING state (before moving + * to FORWARDING). Only relevant if STP is enabled. + * + * The valid values are between (2 * USER_HZ) and (30 * USER_HZ). + * The default value is (15 * USER_HZ). + * + * @IFLA_BR_HELLO_TIME + * The time between hello packets sent by the bridge, when it is a root + * bridge or a designated bridge. Only relevant if STP is enabled. + * + * The valid values are between (1 * USER_HZ) and (10 * USER_HZ). + * The default value is (2 * USER_HZ). + * + * @IFLA_BR_MAX_AGE + * The hello packet timeout is the time until another bridge in the + * spanning tree is assumed to be dead, after reception of its last hello + * message. Only relevant if STP is enabled. + * + * The valid values are between (6 * USER_HZ) and (40 * USER_HZ). + * The default value is (20 * USER_HZ). + * + * @IFLA_BR_AGEING_TIME + * Configure the bridge's FDB entries aging time. It is the time a MAC + * address will be kept in the FDB after a packet has been received from + * that address. After this time has passed, entries are cleaned up. + * Allow values outside the 802.1 standard specification for special cases: + * + * * 0 - entry never ages (all permanent) + * * 1 - entry disappears (no persistence) + * + * The default value is (300 * USER_HZ). + * + * @IFLA_BR_STP_STATE + * Turn spanning tree protocol on (*IFLA_BR_STP_STATE* > 0) or off + * (*IFLA_BR_STP_STATE* == 0) for this bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_PRIORITY + * Set this bridge's spanning tree priority, used during STP root bridge + * election. + * + * The valid values are between 0 and 65535. + * + * @IFLA_BR_VLAN_FILTERING + * Turn VLAN filtering on (*IFLA_BR_VLAN_FILTERING* > 0) or off + * (*IFLA_BR_VLAN_FILTERING* == 0). When disabled, the bridge will not + * consider the VLAN tag when handling packets. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_VLAN_PROTOCOL + * Set the protocol used for VLAN filtering. + * + * The valid values are 0x8100(802.1Q) or 0x88A8(802.1AD). The default value + * is 0x8100(802.1Q). + * + * @IFLA_BR_GROUP_FWD_MASK + * The group forwarding mask. This is the bitmask that is applied to + * decide whether to forward incoming frames destined to link-local + * addresses (of the form 01:80:C2:00:00:0X). + * + * The default value is 0, which means the bridge does not forward any + * link-local frames coming on this port. + * + * @IFLA_BR_ROOT_ID + * The bridge root id, read only. + * + * @IFLA_BR_BRIDGE_ID + * The bridge id, read only. + * + * @IFLA_BR_ROOT_PORT + * The bridge root port, read only. + * + * @IFLA_BR_ROOT_PATH_COST + * The bridge root path cost, read only. + * + * @IFLA_BR_TOPOLOGY_CHANGE + * The bridge topology change, read only. + * + * @IFLA_BR_TOPOLOGY_CHANGE_DETECTED + * The bridge topology change detected, read only. + * + * @IFLA_BR_HELLO_TIMER + * The bridge hello timer, read only. + * + * @IFLA_BR_TCN_TIMER + * The bridge tcn timer, read only. + * + * @IFLA_BR_TOPOLOGY_CHANGE_TIMER + * The bridge topology change timer, read only. + * + * @IFLA_BR_GC_TIMER + * The bridge gc timer, read only. + * + * @IFLA_BR_GROUP_ADDR + * Set the MAC address of the multicast group this bridge uses for STP. + * The address must be a link-local address in standard Ethernet MAC address + * format. It is an address of the form 01:80:C2:00:00:0X, with X in [0, 4..f]. + * + * The default value is 0. + * + * @IFLA_BR_FDB_FLUSH + * Flush bridge's fdb dynamic entries. + * + * @IFLA_BR_MCAST_ROUTER + * Set bridge's multicast router if IGMP snooping is enabled. + * The valid values are: + * + * * 0 - disabled. + * * 1 - automatic (queried). + * * 2 - permanently enabled. + * + * The default value is 1. + * + * @IFLA_BR_MCAST_SNOOPING + * Turn multicast snooping on (*IFLA_BR_MCAST_SNOOPING* > 0) or off + * (*IFLA_BR_MCAST_SNOOPING* == 0). + * + * The default value is 1. + * + * @IFLA_BR_MCAST_QUERY_USE_IFADDR + * If enabled use the bridge's own IP address as source address for IGMP + * queries (*IFLA_BR_MCAST_QUERY_USE_IFADDR* > 0) or the default of 0.0.0.0 + * (*IFLA_BR_MCAST_QUERY_USE_IFADDR* == 0). + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_QUERIER + * Enable (*IFLA_BR_MULTICAST_QUERIER* > 0) or disable + * (*IFLA_BR_MULTICAST_QUERIER* == 0) IGMP querier, ie sending of multicast + * queries by the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_HASH_ELASTICITY + * Set multicast database hash elasticity, It is the maximum chain length in + * the multicast hash table. This attribute is *deprecated* and the value + * is always 16. + * + * @IFLA_BR_MCAST_HASH_MAX + * Set maximum size of the multicast hash table + * + * The default value is 4096, the value must be a power of 2. + * + * @IFLA_BR_MCAST_LAST_MEMBER_CNT + * The Last Member Query Count is the number of Group-Specific Queries + * sent before the router assumes there are no local members. The Last + * Member Query Count is also the number of Group-and-Source-Specific + * Queries sent before the router assumes there are no listeners for a + * particular source. + * + * The default value is 2. + * + * @IFLA_BR_MCAST_STARTUP_QUERY_CNT + * The Startup Query Count is the number of Queries sent out on startup, + * separated by the Startup Query Interval. + * + * The default value is 2. + * + * @IFLA_BR_MCAST_LAST_MEMBER_INTVL + * The Last Member Query Interval is the Max Response Time inserted into + * Group-Specific Queries sent in response to Leave Group messages, and + * is also the amount of time between Group-Specific Query messages. + * + * The default value is (1 * USER_HZ). + * + * @IFLA_BR_MCAST_MEMBERSHIP_INTVL + * The interval after which the bridge will leave a group, if no membership + * reports for this group are received. + * + * The default value is (260 * USER_HZ). + * + * @IFLA_BR_MCAST_QUERIER_INTVL + * The interval between queries sent by other routers. if no queries are + * seen after this delay has passed, the bridge will start to send its own + * queries (as if *IFLA_BR_MCAST_QUERIER_INTVL* was enabled). + * + * The default value is (255 * USER_HZ). + * + * @IFLA_BR_MCAST_QUERY_INTVL + * The Query Interval is the interval between General Queries sent by + * the Querier. + * + * The default value is (125 * USER_HZ). The minimum value is (1 * USER_HZ). + * + * @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL + * The Max Response Time used to calculate the Max Resp Code inserted + * into the periodic General Queries. + * + * The default value is (10 * USER_HZ). + * + * @IFLA_BR_MCAST_STARTUP_QUERY_INTVL + * The interval between queries in the startup phase. + * + * The default value is (125 * USER_HZ) / 4. The minimum value is (1 * USER_HZ). + * + * @IFLA_BR_NF_CALL_IPTABLES + * Enable (*NF_CALL_IPTABLES* > 0) or disable (*NF_CALL_IPTABLES* == 0) + * iptables hooks on the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_NF_CALL_IP6TABLES + * Enable (*NF_CALL_IP6TABLES* > 0) or disable (*NF_CALL_IP6TABLES* == 0) + * ip6tables hooks on the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_NF_CALL_ARPTABLES + * Enable (*NF_CALL_ARPTABLES* > 0) or disable (*NF_CALL_ARPTABLES* == 0) + * arptables hooks on the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_VLAN_DEFAULT_PVID + * VLAN ID applied to untagged and priority-tagged incoming packets. + * + * The default value is 1. Setting to the special value 0 makes all ports of + * this bridge not have a PVID by default, which means that they will + * not accept VLAN-untagged traffic. + * + * @IFLA_BR_PAD + * Bridge attribute padding type for netlink message. + * + * @IFLA_BR_VLAN_STATS_ENABLED + * Enable (*IFLA_BR_VLAN_STATS_ENABLED* == 1) or disable + * (*IFLA_BR_VLAN_STATS_ENABLED* == 0) per-VLAN stats accounting. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_STATS_ENABLED + * Enable (*IFLA_BR_MCAST_STATS_ENABLED* > 0) or disable + * (*IFLA_BR_MCAST_STATS_ENABLED* == 0) multicast (IGMP/MLD) stats + * accounting. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_IGMP_VERSION + * Set the IGMP version. + * + * The valid values are 2 and 3. The default value is 2. + * + * @IFLA_BR_MCAST_MLD_VERSION + * Set the MLD version. + * + * The valid values are 1 and 2. The default value is 1. + * + * @IFLA_BR_VLAN_STATS_PER_PORT + * Enable (*IFLA_BR_VLAN_STATS_PER_PORT* == 1) or disable + * (*IFLA_BR_VLAN_STATS_PER_PORT* == 0) per-VLAN per-port stats accounting. + * Can be changed only when there are no port VLANs configured. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MULTI_BOOLOPT + * The multi_boolopt is used to control new boolean options to avoid adding + * new netlink attributes. You can look at ``enum br_boolopt_id`` for those + * options. + * + * @IFLA_BR_MCAST_QUERIER_STATE + * Bridge mcast querier states, read only. + * + * @IFLA_BR_FDB_N_LEARNED + * The number of dynamically learned FDB entries for the current bridge, + * read only. + * + * @IFLA_BR_FDB_MAX_LEARNED + * Set the number of max dynamically learned FDB entries for the current + * bridge. + */ enum { IFLA_BR_UNSPEC, IFLA_BR_FORWARD_DELAY, @@ -510,6 +791,8 @@ enum { IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, IFLA_BR_MCAST_QUERIER_STATE, + IFLA_BR_FDB_N_LEARNED, + IFLA_BR_FDB_MAX_LEARNED, __IFLA_BR_MAX, }; @@ -520,11 +803,252 @@ struct ifla_bridge_id { __u8 addr[6]; /* ETH_ALEN */ }; +/** + * DOC: Bridge mode enum definition + * + * @BRIDGE_MODE_HAIRPIN + * Controls whether traffic may be sent back out of the port on which it + * was received. This option is also called reflective relay mode, and is + * used to support basic VEPA (Virtual Ethernet Port Aggregator) + * capabilities. By default, this flag is turned off and the bridge will + * not forward traffic back out of the receiving port. + */ enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, }; +/** + * DOC: Bridge port enum definition + * + * @IFLA_BRPORT_STATE + * The operation state of the port. Here are the valid values. + * + * * 0 - port is in STP *DISABLED* state. Make this port completely + * inactive for STP. This is also called BPDU filter and could be used + * to disable STP on an untrusted port, like a leaf virtual device. + * The traffic forwarding is also stopped on this port. + * * 1 - port is in STP *LISTENING* state. Only valid if STP is enabled + * on the bridge. In this state the port listens for STP BPDUs and + * drops all other traffic frames. + * * 2 - port is in STP *LEARNING* state. Only valid if STP is enabled on + * the bridge. In this state the port will accept traffic only for the + * purpose of updating MAC address tables. + * * 3 - port is in STP *FORWARDING* state. Port is fully active. + * * 4 - port is in STP *BLOCKING* state. Only valid if STP is enabled on + * the bridge. This state is used during the STP election process. + * In this state, port will only process STP BPDUs. + * + * @IFLA_BRPORT_PRIORITY + * The STP port priority. The valid values are between 0 and 255. + * + * @IFLA_BRPORT_COST + * The STP path cost of the port. The valid values are between 1 and 65535. + * + * @IFLA_BRPORT_MODE + * Set the bridge port mode. See *BRIDGE_MODE_HAIRPIN* for more details. + * + * @IFLA_BRPORT_GUARD + * Controls whether STP BPDUs will be processed by the bridge port. By + * default, the flag is turned off to allow BPDU processing. Turning this + * flag on will disable the bridge port if a STP BPDU packet is received. + * + * If the bridge has Spanning Tree enabled, hostile devices on the network + * may send BPDU on a port and cause network failure. Setting *guard on* + * will detect and stop this by disabling the port. The port will be + * restarted if the link is brought down, or removed and reattached. + * + * @IFLA_BRPORT_PROTECT + * Controls whether a given port is allowed to become a root port or not. + * Only used when STP is enabled on the bridge. By default the flag is off. + * + * This feature is also called root port guard. If BPDU is received from a + * leaf (edge) port, it should not be elected as root port. This could + * be used if using STP on a bridge and the downstream bridges are not fully + * trusted; this prevents a hostile guest from rerouting traffic. + * + * @IFLA_BRPORT_FAST_LEAVE + * This flag allows the bridge to immediately stop multicast traffic + * forwarding on a port that receives an IGMP Leave message. It is only used + * when IGMP snooping is enabled on the bridge. By default the flag is off. + * + * @IFLA_BRPORT_LEARNING + * Controls whether a given port will learn *source* MAC addresses from + * received traffic or not. Also controls whether dynamic FDB entries + * (which can also be added by software) will be refreshed by incoming + * traffic. By default this flag is on. + * + * @IFLA_BRPORT_UNICAST_FLOOD + * Controls whether unicast traffic for which there is no FDB entry will + * be flooded towards this port. By default this flag is on. + * + * @IFLA_BRPORT_PROXYARP + * Enable proxy ARP on this port. + * + * @IFLA_BRPORT_LEARNING_SYNC + * Controls whether a given port will sync MAC addresses learned on device + * port to bridge FDB. + * + * @IFLA_BRPORT_PROXYARP_WIFI + * Enable proxy ARP on this port which meets extended requirements by + * IEEE 802.11 and Hotspot 2.0 specifications. + * + * @IFLA_BRPORT_ROOT_ID + * + * @IFLA_BRPORT_BRIDGE_ID + * + * @IFLA_BRPORT_DESIGNATED_PORT + * + * @IFLA_BRPORT_DESIGNATED_COST + * + * @IFLA_BRPORT_ID + * + * @IFLA_BRPORT_NO + * + * @IFLA_BRPORT_TOPOLOGY_CHANGE_ACK + * + * @IFLA_BRPORT_CONFIG_PENDING + * + * @IFLA_BRPORT_MESSAGE_AGE_TIMER + * + * @IFLA_BRPORT_FORWARD_DELAY_TIMER + * + * @IFLA_BRPORT_HOLD_TIMER + * + * @IFLA_BRPORT_FLUSH + * Flush bridge ports' fdb dynamic entries. + * + * @IFLA_BRPORT_MULTICAST_ROUTER + * Configure the port's multicast router presence. A port with + * a multicast router will receive all multicast traffic. + * The valid values are: + * + * * 0 disable multicast routers on this port + * * 1 let the system detect the presence of routers (default) + * * 2 permanently enable multicast traffic forwarding on this port + * * 3 enable multicast routers temporarily on this port, not depending + * on incoming queries. + * + * @IFLA_BRPORT_PAD + * + * @IFLA_BRPORT_MCAST_FLOOD + * Controls whether a given port will flood multicast traffic for which + * there is no MDB entry. By default this flag is on. + * + * @IFLA_BRPORT_MCAST_TO_UCAST + * Controls whether a given port will replicate packets using unicast + * instead of multicast. By default this flag is off. + * + * This is done by copying the packet per host and changing the multicast + * destination MAC to a unicast one accordingly. + * + * *mcast_to_unicast* works on top of the multicast snooping feature of the + * bridge. Which means unicast copies are only delivered to hosts which + * are interested in unicast and signaled this via IGMP/MLD reports previously. + * + * This feature is intended for interface types which have a more reliable + * and/or efficient way to deliver unicast packets than broadcast ones + * (e.g. WiFi). + * + * However, it should only be enabled on interfaces where no IGMPv2/MLDv1 + * report suppression takes place. IGMP/MLD report suppression issue is + * usually overcome by the network daemon (supplicant) enabling AP isolation + * and by that separating all STAs. + * + * Delivery of STA-to-STA IP multicast is made possible again by enabling + * and utilizing the bridge hairpin mode, which considers the incoming port + * as a potential outgoing port, too (see *BRIDGE_MODE_HAIRPIN* option). + * Hairpin mode is performed after multicast snooping, therefore leading + * to only deliver reports to STAs running a multicast router. + * + * @IFLA_BRPORT_VLAN_TUNNEL + * Controls whether vlan to tunnel mapping is enabled on the port. + * By default this flag is off. + * + * @IFLA_BRPORT_BCAST_FLOOD + * Controls flooding of broadcast traffic on the given port. By default + * this flag is on. + * + * @IFLA_BRPORT_GROUP_FWD_MASK + * Set the group forward mask. This is a bitmask that is applied to + * decide whether to forward incoming frames destined to link-local + * addresses. The addresses of the form are 01:80:C2:00:00:0X (defaults + * to 0, which means the bridge does not forward any link-local frames + * coming on this port). + * + * @IFLA_BRPORT_NEIGH_SUPPRESS + * Controls whether neighbor discovery (arp and nd) proxy and suppression + * is enabled on the port. By default this flag is off. + * + * @IFLA_BRPORT_ISOLATED + * Controls whether a given port will be isolated, which means it will be + * able to communicate with non-isolated ports only. By default this + * flag is off. + * + * @IFLA_BRPORT_BACKUP_PORT + * Set a backup port. If the port loses carrier all traffic will be + * redirected to the configured backup port. Set the value to 0 to disable + * it. + * + * @IFLA_BRPORT_MRP_RING_OPEN + * + * @IFLA_BRPORT_MRP_IN_OPEN + * + * @IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + * The number of per-port EHT hosts limit. The default value is 512. + * Setting to 0 is not allowed. + * + * @IFLA_BRPORT_MCAST_EHT_HOSTS_CNT + * The current number of tracked hosts, read only. + * + * @IFLA_BRPORT_LOCKED + * Controls whether a port will be locked, meaning that hosts behind the + * port will not be able to communicate through the port unless an FDB + * entry with the unit's MAC address is in the FDB. The common use case is + * that hosts are allowed access through authentication with the IEEE 802.1X + * protocol or based on whitelists. By default this flag is off. + * + * Please note that secure 802.1X deployments should always use the + * *BR_BOOLOPT_NO_LL_LEARN* flag, to not permit the bridge to populate its + * FDB based on link-local (EAPOL) traffic received on the port. + * + * @IFLA_BRPORT_MAB + * Controls whether a port will use MAC Authentication Bypass (MAB), a + * technique through which select MAC addresses may be allowed on a locked + * port, without using 802.1X authentication. Packets with an unknown source + * MAC address generates a "locked" FDB entry on the incoming bridge port. + * The common use case is for user space to react to these bridge FDB + * notifications and optionally replace the locked FDB entry with a normal + * one, allowing traffic to pass for whitelisted MAC addresses. + * + * Setting this flag also requires *IFLA_BRPORT_LOCKED* and + * *IFLA_BRPORT_LEARNING*. *IFLA_BRPORT_LOCKED* ensures that unauthorized + * data packets are dropped, and *IFLA_BRPORT_LEARNING* allows the dynamic + * FDB entries installed by user space (as replacements for the locked FDB + * entries) to be refreshed and/or aged out. + * + * @IFLA_BRPORT_MCAST_N_GROUPS + * + * @IFLA_BRPORT_MCAST_MAX_GROUPS + * Sets the maximum number of MDB entries that can be registered for a + * given port. Attempts to register more MDB entries at the port than this + * limit allows will be rejected, whether they are done through netlink + * (e.g. the bridge tool), or IGMP or MLD membership reports. Setting a + * limit of 0 disables the limit. The default value is 0. + * + * @IFLA_BRPORT_NEIGH_VLAN_SUPPRESS + * Controls whether neighbor discovery (arp and nd) proxy and suppression is + * enabled for a given port. By default this flag is off. + * + * Note that this option only takes effect when *IFLA_BRPORT_NEIGH_SUPPRESS* + * is enabled for a given port. + * + * @IFLA_BRPORT_BACKUP_NHID + * The FDB nexthop object ID to attach to packets being redirected to a + * backup port that has VLAN tunnel mapping enabled (via the + * *IFLA_BRPORT_VLAN_TUNNEL* option). Setting a value of 0 (default) has + * the effect of not attaching any ID. + */ enum { IFLA_BRPORT_UNSPEC, IFLA_BRPORT_STATE, /* Spanning tree state */ @@ -769,6 +1293,19 @@ enum netkit_mode { NETKIT_L3, }; +/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to + * the BPF program if attached. This also means the latter can + * consume the two fields if they were populated earlier. + * + * NETKIT_SCRUB_DEFAULT zeroes skb->{mark,priority} fields before + * invoking the attached BPF program when the peer device resides + * in a different network namespace. This is the default behavior. + */ +enum netkit_scrub { + NETKIT_SCRUB_NONE, + NETKIT_SCRUB_DEFAULT, +}; + enum { IFLA_NETKIT_UNSPEC, IFLA_NETKIT_PEER_INFO, @@ -776,6 +1313,8 @@ enum { IFLA_NETKIT_POLICY, IFLA_NETKIT_PEER_POLICY, IFLA_NETKIT_MODE, + IFLA_NETKIT_SCRUB, + IFLA_NETKIT_PEER_SCRUB, __IFLA_NETKIT_MAX, }; #define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) @@ -854,6 +1393,7 @@ enum { IFLA_VXLAN_DF, IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ IFLA_VXLAN_LOCALBYPASS, + IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */ __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -871,6 +1411,13 @@ enum ifla_vxlan_df { VXLAN_DF_MAX = __VXLAN_DF_END - 1, }; +enum ifla_vxlan_label_policy { + VXLAN_LABEL_FIXED = 0, + VXLAN_LABEL_INHERIT = 1, + __VXLAN_LABEL_END, + VXLAN_LABEL_MAX = __VXLAN_LABEL_END - 1, +}; + /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, @@ -935,6 +1482,8 @@ enum { IFLA_GTP_ROLE, IFLA_GTP_CREATE_SOCKETS, IFLA_GTP_RESTART_COUNT, + IFLA_GTP_LOCAL, + IFLA_GTP_LOCAL6, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -1240,6 +1789,7 @@ enum { IFLA_HSR_PROTOCOL, /* Indicate different protocol than * HSR. For example PRP. */ + IFLA_HSR_INTERLINK, /* HSR interlink network device */ __IFLA_HSR_MAX, }; @@ -1417,7 +1967,9 @@ enum { enum { IFLA_DSA_UNSPEC, - IFLA_DSA_MASTER, + IFLA_DSA_CONDUIT, + /* Deprecated, use IFLA_DSA_CONDUIT instead */ + IFLA_DSA_MASTER = IFLA_DSA_CONDUIT, __IFLA_DSA_MAX, }; diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h index 7c308f04e7a0..e3ebb49f60d2 100644 --- a/tools/include/uapi/linux/netdev.h +++ b/tools/include/uapi/linux/netdev.h @@ -122,6 +122,8 @@ enum { NETDEV_A_NAPI_ID, NETDEV_A_NAPI_IRQ, NETDEV_A_NAPI_PID, + NETDEV_A_NAPI_DEFER_HARD_IRQS, + NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, __NETDEV_A_NAPI_MAX, NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1) @@ -199,6 +201,7 @@ enum { NETDEV_CMD_NAPI_GET, NETDEV_CMD_QSTATS_GET, NETDEV_CMD_BIND_RX, + NETDEV_CMD_NAPI_SET, __NETDEV_CMD_MAX, NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1) diff --git a/tools/net/ynl/cli.py b/tools/net/ynl/cli.py index b8481f401376..9e95016b85b3 100755 --- a/tools/net/ynl/cli.py +++ b/tools/net/ynl/cli.py @@ -5,6 +5,7 @@ import argparse import json import pprint import time +import signal from lib import YnlFamily, Netlink, NlError @@ -17,6 +18,8 @@ class YnlEncoder(json.JSONEncoder): return list(obj) return json.JSONEncoder.default(self, obj) +def handle_timeout(sig, frame): + exit(0) def main(): description = """ @@ -81,7 +84,8 @@ def main(): ynl.ntf_subscribe(args.ntf) if args.sleep: - time.sleep(args.sleep) + signal.signal(signal.SIGALRM, handle_timeout) + signal.alarm(args.sleep) if args.list_ops: for op_name, op in ynl.ops.items(): @@ -106,8 +110,8 @@ def main(): exit(1) if args.ntf: - ynl.check_ntf() - output(ynl.async_msg_queue) + for msg in ynl.check_ntf(): + output(msg) if __name__ == "__main__": diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py index c22c22bf2cb7..92f85698c50e 100644 --- a/tools/net/ynl/lib/ynl.py +++ b/tools/net/ynl/lib/ynl.py @@ -12,6 +12,8 @@ import sys import yaml import ipaddress import uuid +import queue +import time from .nlspec import SpecFamily @@ -489,7 +491,7 @@ class YnlFamily(SpecFamily): self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_GET_STRICT_CHK, 1) self.async_msg_ids = set() - self.async_msg_queue = [] + self.async_msg_queue = queue.Queue() for msg in self.msgs.values(): if msg.is_async: @@ -903,32 +905,39 @@ class YnlFamily(SpecFamily): msg['name'] = op['name'] msg['msg'] = attrs - self.async_msg_queue.append(msg) + self.async_msg_queue.put(msg) - def check_ntf(self): + def check_ntf(self, interval=0.1): while True: try: reply = self.sock.recv(self._recv_size, socket.MSG_DONTWAIT) - except BlockingIOError: - return + nms = NlMsgs(reply) + self._recv_dbg_print(reply, nms) + for nl_msg in nms: + if nl_msg.error: + print("Netlink error in ntf!?", os.strerror(-nl_msg.error)) + print(nl_msg) + continue + if nl_msg.done: + print("Netlink done while checking for ntf!?") + continue - nms = NlMsgs(reply) - self._recv_dbg_print(reply, nms) - for nl_msg in nms: - if nl_msg.error: - print("Netlink error in ntf!?", os.strerror(-nl_msg.error)) - print(nl_msg) - continue - if nl_msg.done: - print("Netlink done while checking for ntf!?") - continue + decoded = self.nlproto.decode(self, nl_msg, None) + if decoded.cmd() not in self.async_msg_ids: + print("Unexpected msg id while checking for ntf", decoded) + continue - decoded = self.nlproto.decode(self, nl_msg, None) - if decoded.cmd() not in self.async_msg_ids: - print("Unexpected msg id done while checking for ntf", decoded) - continue + self.handle_ntf(decoded) + except BlockingIOError: + pass - self.handle_ntf(decoded) + try: + yield self.async_msg_queue.get_nowait() + except queue.Empty: + try: + time.sleep(interval) + except KeyboardInterrupt: + return def operation_do_attributes(self, name): """ diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index 717530bc9c52..1a825b4081b2 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -80,11 +80,21 @@ class Type(SpecAttr): value = self.checks.get(limit, default) if value is None: return value - elif value in self.family.consts: + if isinstance(value, int): + return value + if value in self.family.consts: + raise Exception("Resolving family constants not implemented, yet") + return limit_to_number(value) + + def get_limit_str(self, limit, default=None, suffix=''): + value = self.checks.get(limit, default) + if value is None: + return '' + if isinstance(value, int): + return str(value) + suffix + if value in self.family.consts: return c_upper(f"{self.family['name']}-{value}") - if not isinstance(value, int): - value = limit_to_number(value) - return value + return c_upper(value) def resolve(self): if 'name-prefix' in self.attr: @@ -157,7 +167,10 @@ class Type(SpecAttr): return '{ .type = ' + policy + ', }' def attr_policy(self, cw): - policy = c_upper('nla-' + self.attr['type']) + policy = f'NLA_{c_upper(self.type)}' + if self.attr.get('byte-order') == 'big-endian': + if self.type in {'u16', 'u32'}: + policy = f'NLA_BE{self.type[1:]}' spec = self._attr_policy(policy) cw.p(f"\t[{self.enum_name}] = {spec},") @@ -358,11 +371,11 @@ class TypeScalar(Type): elif 'full-range' in self.checks: return f"NLA_POLICY_FULL_RANGE({policy}, &{c_lower(self.enum_name)}_range)" elif 'range' in self.checks: - return f"NLA_POLICY_RANGE({policy}, {self.get_limit('min')}, {self.get_limit('max')})" + return f"NLA_POLICY_RANGE({policy}, {self.get_limit_str('min')}, {self.get_limit_str('max')})" elif 'min' in self.checks: - return f"NLA_POLICY_MIN({policy}, {self.get_limit('min')})" + return f"NLA_POLICY_MIN({policy}, {self.get_limit_str('min')})" elif 'max' in self.checks: - return f"NLA_POLICY_MAX({policy}, {self.get_limit('max')})" + return f"NLA_POLICY_MAX({policy}, {self.get_limit_str('max')})" return super()._attr_policy(policy) def _attr_typol(self): @@ -413,11 +426,11 @@ class TypeString(Type): def _attr_policy(self, policy): if 'exact-len' in self.checks: - mem = 'NLA_POLICY_EXACT_LEN(' + str(self.get_limit('exact-len')) + ')' + mem = 'NLA_POLICY_EXACT_LEN(' + self.get_limit_str('exact-len') + ')' else: mem = '{ .type = ' + policy if 'max-len' in self.checks: - mem += ', .len = ' + str(self.get_limit('max-len')) + mem += ', .len = ' + self.get_limit_str('max-len') mem += ', }' return mem @@ -464,17 +477,22 @@ class TypeBinary(Type): return f'.type = YNL_PT_BINARY,' def _attr_policy(self, policy): - if 'exact-len' in self.checks: - mem = 'NLA_POLICY_EXACT_LEN(' + str(self.get_limit('exact-len')) + ')' + if len(self.checks) == 0: + pass + elif len(self.checks) == 1: + check_name = list(self.checks)[0] + if check_name not in {'exact-len', 'min-len'}: + raise Exception('Unsupported check for binary type: ' + check_name) else: - mem = '{ ' - if len(self.checks) == 1 and 'min-len' in self.checks: - mem += '.len = ' + str(self.get_limit('min-len')) - elif len(self.checks) == 0: - mem += '.type = NLA_BINARY' - else: - raise Exception('One or more of binary type checks not implemented, yet') - mem += ', }' + raise Exception('More than one check for binary type not implemented, yet') + + if len(self.checks) == 0: + mem = '{ .type = NLA_BINARY, }' + elif 'exact-len' in self.checks: + mem = 'NLA_POLICY_EXACT_LEN(' + self.get_limit_str('exact-len') + ')' + elif 'min-len' in self.checks: + mem = '{ .len = ' + self.get_limit_str('min-len') + ', }' + return mem def attr_put(self, ri, var): @@ -2161,9 +2179,9 @@ def print_kernel_policy_ranges(family, cw): cw.block_start(line=f'static const struct netlink_range_validation{sign} {c_lower(attr.enum_name)}_range =') members = [] if 'min' in attr.checks: - members.append(('min', str(attr.get_limit('min')) + suffix)) + members.append(('min', attr.get_limit_str('min', suffix=suffix))) if 'max' in attr.checks: - members.append(('max', str(attr.get_limit('max')) + suffix)) + members.append(('max', attr.get_limit_str('max', suffix=suffix))) cw.write_struct_init(members) cw.block_end(line=';') cw.nl() diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index c72c16e1aff8..5764155b6d25 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NETWORK_HELPERS_H #define __NETWORK_HELPERS_H +#include <arpa/inet.h> #include <sys/socket.h> #include <sys/types.h> #include <linux/types.h> diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index d2ca32fa3b21..be3cad2aff77 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -5,12 +5,17 @@ #include <linux/const.h> #include <netinet/in.h> #include <test_progs.h> +#include <unistd.h> #include "cgroup_helpers.h" #include "network_helpers.h" #include "mptcp_sock.skel.h" #include "mptcpify.skel.h" +#include "mptcp_subflow.skel.h" #define NS_TEST "mptcp_ns" +#define ADDR_1 "10.0.1.1" +#define ADDR_2 "10.0.1.2" +#define PORT_1 10001 #ifndef IPPROTO_MPTCP #define IPPROTO_MPTCP 262 @@ -335,10 +340,126 @@ fail: close(cgroup_fd); } +static int endpoint_init(char *flags) +{ + SYS(fail, "ip -net %s link add veth1 type veth peer name veth2", NS_TEST); + SYS(fail, "ip -net %s addr add %s/24 dev veth1", NS_TEST, ADDR_1); + SYS(fail, "ip -net %s link set dev veth1 up", NS_TEST); + SYS(fail, "ip -net %s addr add %s/24 dev veth2", NS_TEST, ADDR_2); + SYS(fail, "ip -net %s link set dev veth2 up", NS_TEST); + if (SYS_NOFAIL("ip -net %s mptcp endpoint add %s %s", NS_TEST, ADDR_2, flags)) { + printf("'ip mptcp' not supported, skip this test.\n"); + test__skip(); + goto fail; + } + + return 0; +fail: + return -1; +} + +static void wait_for_new_subflows(int fd) +{ + socklen_t len; + u8 subflows; + int err, i; + + len = sizeof(subflows); + /* Wait max 5 sec for new subflows to be created */ + for (i = 0; i < 50; i++) { + err = getsockopt(fd, SOL_MPTCP, MPTCP_INFO, &subflows, &len); + if (!err && subflows > 0) + break; + + usleep(100000); /* 0.1s */ + } +} + +static void run_subflow(void) +{ + int server_fd, client_fd, err; + char new[TCP_CA_NAME_MAX]; + char cc[TCP_CA_NAME_MAX]; + unsigned int mark; + socklen_t len; + + server_fd = start_mptcp_server(AF_INET, ADDR_1, PORT_1, 0); + if (!ASSERT_OK_FD(server_fd, "start_mptcp_server")) + return; + + client_fd = connect_to_fd(server_fd, 0); + if (!ASSERT_OK_FD(client_fd, "connect_to_fd")) + goto close_server; + + send_byte(client_fd); + wait_for_new_subflows(client_fd); + + len = sizeof(mark); + err = getsockopt(client_fd, SOL_SOCKET, SO_MARK, &mark, &len); + if (ASSERT_OK(err, "getsockopt(client_fd, SO_MARK)")) + ASSERT_EQ(mark, 0, "mark"); + + len = sizeof(new); + err = getsockopt(client_fd, SOL_TCP, TCP_CONGESTION, new, &len); + if (ASSERT_OK(err, "getsockopt(client_fd, TCP_CONGESTION)")) { + get_msk_ca_name(cc); + ASSERT_STREQ(new, cc, "cc"); + } + + close(client_fd); +close_server: + close(server_fd); +} + +static void test_subflow(void) +{ + struct mptcp_subflow *skel; + struct nstoken *nstoken; + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/mptcp_subflow"); + if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_subflow")) + return; + + skel = mptcp_subflow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_subflow")) + goto close_cgroup; + + skel->bss->pid = getpid(); + + skel->links.mptcp_subflow = + bpf_program__attach_cgroup(skel->progs.mptcp_subflow, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.mptcp_subflow, "attach mptcp_subflow")) + goto skel_destroy; + + skel->links._getsockopt_subflow = + bpf_program__attach_cgroup(skel->progs._getsockopt_subflow, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links._getsockopt_subflow, "attach _getsockopt_subflow")) + goto skel_destroy; + + nstoken = create_netns(); + if (!ASSERT_OK_PTR(nstoken, "create_netns: mptcp_subflow")) + goto skel_destroy; + + if (endpoint_init("subflow") < 0) + goto close_netns; + + run_subflow(); + +close_netns: + cleanup_netns(nstoken); +skel_destroy: + mptcp_subflow__destroy(skel); +close_cgroup: + close(cgroup_fd); +} + void test_mptcp(void) { if (test__start_subtest("base")) test_base(); if (test__start_subtest("mptcpify")) test_mptcpify(); + if (test__start_subtest("subflow")) + test_subflow(); } diff --git a/tools/testing/selftests/bpf/prog_tests/netns_cookie.c b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c index 71d8f3ba7d6b..ac3c3c097c0e 100644 --- a/tools/testing/selftests/bpf/prog_tests/netns_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c @@ -8,12 +8,16 @@ #define SO_NETNS_COOKIE 71 #endif +#define loopback 1 + static int duration; void test_netns_cookie(void) { + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); int server_fd = -1, client_fd = -1, cgroup_fd = -1; - int err, val, ret, map, verdict; + int err, val, ret, map, verdict, tc_fd; struct netns_cookie_prog *skel; uint64_t cookie_expected_value; socklen_t vallen = sizeof(cookie_expected_value); @@ -38,36 +42,47 @@ void test_netns_cookie(void) if (!ASSERT_OK(err, "prog_attach")) goto done; + tc_fd = bpf_program__fd(skel->progs.get_netns_cookie_tcx); + err = bpf_prog_attach_opts(tc_fd, loopback, BPF_TCX_INGRESS, &opta); + if (!ASSERT_OK(err, "prog_attach")) + goto done; + server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno)) - goto done; + goto cleanup_tc; client_fd = connect_to_fd(server_fd, 0); if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno)) - goto done; + goto cleanup_tc; ret = send(client_fd, send_msg, sizeof(send_msg), 0); if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n", ret)) - goto done; + goto cleanup_tc; err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sockops_netns_cookies), &client_fd, &val); if (!ASSERT_OK(err, "map_lookup(sockops_netns_cookies)")) - goto done; + goto cleanup_tc; err = getsockopt(client_fd, SOL_SOCKET, SO_NETNS_COOKIE, &cookie_expected_value, &vallen); if (!ASSERT_OK(err, "getsockopt")) - goto done; + goto cleanup_tc; ASSERT_EQ(val, cookie_expected_value, "cookie_value"); err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_msg_netns_cookies), &client_fd, &val); if (!ASSERT_OK(err, "map_lookup(sk_msg_netns_cookies)")) - goto done; + goto cleanup_tc; ASSERT_EQ(val, cookie_expected_value, "cookie_value"); + ASSERT_EQ(skel->bss->tcx_init_netns_cookie, cookie_expected_value, "cookie_value"); + ASSERT_EQ(skel->bss->tcx_netns_cookie, cookie_expected_value, "cookie_value"); + +cleanup_tc: + err = bpf_prog_detach_opts(tc_fd, loopback, BPF_TCX_INGRESS, &optd); + ASSERT_OK(err, "prog_detach"); done: if (server_fd != -1) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c index b9135720024c..151a4210028f 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c @@ -14,7 +14,9 @@ #include "netlink_helpers.h" #include "tc_helpers.h" -#define ICMP_ECHO 8 +#define MARK 42 +#define PRIO 0xeb9f +#define ICMP_ECHO 8 struct icmphdr { __u8 type; @@ -33,7 +35,7 @@ struct iplink_req { }; static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, - bool same_netns) + bool same_netns, int scrub, int peer_scrub) { struct rtnl_handle rth = { .fd = -1 }; struct iplink_req req = {}; @@ -58,6 +60,8 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA); addattr32(&req.n, sizeof(req), IFLA_NETKIT_POLICY, policy); addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_POLICY, peer_policy); + addattr32(&req.n, sizeof(req), IFLA_NETKIT_SCRUB, scrub); + addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_SCRUB, peer_scrub); addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode); addattr_nest_end(&req.n, data); addattr_nest_end(&req.n, linkinfo); @@ -118,9 +122,9 @@ static void destroy_netkit(void) static int __send_icmp(__u32 dest) { + int sock, ret, mark = MARK, prio = PRIO; struct sockaddr_in addr; struct icmphdr icmp; - int sock, ret; ret = write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0"); if (!ASSERT_OK(ret, "write_sysctl(net.ipv4.ping_group_range)")) @@ -135,6 +139,15 @@ static int __send_icmp(__u32 dest) if (!ASSERT_OK(ret, "setsockopt(SO_BINDTODEVICE)")) goto out; + ret = setsockopt(sock, SOL_SOCKET, SO_MARK, &mark, sizeof(mark)); + if (!ASSERT_OK(ret, "setsockopt(SO_MARK)")) + goto out; + + ret = setsockopt(sock, SOL_SOCKET, SO_PRIORITY, + &prio, sizeof(prio)); + if (!ASSERT_OK(ret, "setsockopt(SO_PRIORITY)")) + goto out; + memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(dest); @@ -171,7 +184,8 @@ void serial_test_tc_netkit_basic(void) int err, ifindex; err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS, - &ifindex, false); + &ifindex, false, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT); if (err) return; @@ -285,7 +299,8 @@ static void serial_test_tc_netkit_multi_links_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false); + &ifindex, false, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT); if (err) return; @@ -413,7 +428,8 @@ static void serial_test_tc_netkit_multi_opts_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false); + &ifindex, false, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT); if (err) return; @@ -527,7 +543,8 @@ void serial_test_tc_netkit_device(void) int err, ifindex, ifindex2; err = create_netkit(NETKIT_L3, NETKIT_PASS, NETKIT_PASS, - &ifindex, true); + &ifindex, true, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT); if (err) return; @@ -638,7 +655,8 @@ static void serial_test_tc_netkit_neigh_links_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false); + &ifindex, false, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT); if (err) return; @@ -715,7 +733,8 @@ static void serial_test_tc_netkit_pkt_type_mode(int mode) struct bpf_link *link; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, true); + &ifindex, true, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT); if (err) return; @@ -779,3 +798,60 @@ void serial_test_tc_netkit_pkt_type(void) serial_test_tc_netkit_pkt_type_mode(NETKIT_L2); serial_test_tc_netkit_pkt_type_mode(NETKIT_L3); } + +static void serial_test_tc_netkit_scrub_type(int scrub) +{ + LIBBPF_OPTS(bpf_netkit_opts, optl); + struct test_tc_link *skel; + struct bpf_link *link; + int err, ifindex; + + err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS, + &ifindex, false, scrub, scrub); + if (err) + return; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc8, + BPF_NETKIT_PRIMARY), 0, "tc8_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + + ASSERT_EQ(skel->bss->seen_tc8, false, "seen_tc8"); + + link = bpf_program__attach_netkit(skel->progs.tc8, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc8 = link; + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc8, true, "seen_tc8"); + ASSERT_EQ(skel->bss->mark, scrub == NETKIT_SCRUB_NONE ? MARK : 0, "mark"); + ASSERT_EQ(skel->bss->prio, scrub == NETKIT_SCRUB_NONE ? PRIO : 0, "prio"); +cleanup: + test_tc_link__destroy(skel); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + destroy_netkit(); +} + +void serial_test_tc_netkit_scrub(void) +{ + serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_DEFAULT); + serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_NONE); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index 481626a875d1..c7f74f068e78 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -2,35 +2,41 @@ #include <uapi/linux/bpf.h> #include <linux/if_link.h> #include <test_progs.h> +#include <network_helpers.h> #include "test_xdp_with_cpumap_frags_helpers.skel.h" #include "test_xdp_with_cpumap_helpers.skel.h" #define IFINDEX_LO 1 +#define TEST_NS "cpu_attach_ns" static void test_xdp_with_cpumap_helpers(void) { - struct test_xdp_with_cpumap_helpers *skel; + struct test_xdp_with_cpumap_helpers *skel = NULL; struct bpf_prog_info info = {}; __u32 len = sizeof(info); struct bpf_cpumap_val val = { .qsize = 192, }; - int err, prog_fd, map_fd; + int err, prog_fd, prog_redir_fd, map_fd; + struct nstoken *nstoken = NULL; __u32 idx = 0; + SYS(out_close, "ip netns add %s", TEST_NS); + nstoken = open_netns(TEST_NS); + if (!ASSERT_OK_PTR(nstoken, "open_netns")) + goto out_close; + SYS(out_close, "ip link set dev lo up"); + skel = test_xdp_with_cpumap_helpers__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load")) return; - prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog); - err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); + prog_redir_fd = bpf_program__fd(skel->progs.xdp_redir_prog); + err = bpf_xdp_attach(IFINDEX_LO, prog_redir_fd, XDP_FLAGS_SKB_MODE, NULL); if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP")) goto out_close; - err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); - ASSERT_OK(err, "XDP program detach"); - prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm); map_fd = bpf_map__fd(skel->maps.cpu_map); err = bpf_prog_get_info_by_fd(prog_fd, &info, &len); @@ -45,6 +51,26 @@ static void test_xdp_with_cpumap_helpers(void) ASSERT_OK(err, "Read cpumap entry"); ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id"); + /* send a packet to trigger any potential bugs in there */ + char data[10] = {}; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = 10, + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, + .repeat = 1, + ); + err = bpf_prog_test_run_opts(prog_redir_fd, &opts); + ASSERT_OK(err, "XDP test run"); + + /* wait for the packets to be flushed, then check that redirect has been + * performed + */ + kern_sync_rcu(); + ASSERT_NEQ(skel->bss->redirect_count, 0, "redirected packets"); + + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); + ASSERT_OK(err, "XDP program detach"); + /* can not attach BPF_XDP_CPUMAP program to a device */ err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program")) @@ -65,6 +91,8 @@ static void test_xdp_with_cpumap_helpers(void) ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry"); out_close: + close_netns(nstoken); + SYS_NOFAIL("ip netns del %s", TEST_NS); test_xdp_with_cpumap_helpers__destroy(skel); } @@ -111,7 +139,7 @@ out_close: test_xdp_with_cpumap_frags_helpers__destroy(skel); } -void serial_test_xdp_cpumap_attach(void) +void test_xdp_cpumap_attach(void) { if (test__start_subtest("CPUMAP with programs in entries")) test_xdp_with_cpumap_helpers(); diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf.h b/tools/testing/selftests/bpf/progs/mptcp_bpf.h new file mode 100644 index 000000000000..3b188ccdcc40 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/mptcp_bpf.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __MPTCP_BPF_H__ +#define __MPTCP_BPF_H__ + +#include "bpf_experimental.h" + +/* list helpers from include/linux/list.h */ +static inline int list_is_head(const struct list_head *list, + const struct list_head *head) +{ + return list == head; +} + +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) + +#define list_entry_is_head(pos, head, member) \ + list_is_head(&pos->member, (head)) + +/* small difference: 'can_loop' has been added in the conditions */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_first_entry(head, typeof(*pos), member); \ + !list_entry_is_head(pos, head, member) && can_loop; \ + pos = list_next_entry(pos, member)) + +/* mptcp helpers from protocol.h */ +#define mptcp_for_each_subflow(__msk, __subflow) \ + list_for_each_entry(__subflow, &((__msk)->conn_list), node) + +static __always_inline struct sock * +mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow) +{ + return subflow->tcp_sock; +} + +#endif diff --git a/tools/testing/selftests/bpf/progs/mptcp_subflow.c b/tools/testing/selftests/bpf/progs/mptcp_subflow.c new file mode 100644 index 000000000000..70302477e326 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/mptcp_subflow.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Tessares SA. */ +/* Copyright (c) 2024, Kylin Software */ + +/* vmlinux.h, bpf_helpers.h and other 'define' */ +#include "bpf_tracing_net.h" +#include "mptcp_bpf.h" + +char _license[] SEC("license") = "GPL"; + +char cc[TCP_CA_NAME_MAX] = "reno"; +int pid; + +/* Associate a subflow counter to each token */ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); + __uint(max_entries, 100); +} mptcp_sf SEC(".maps"); + +SEC("sockops") +int mptcp_subflow(struct bpf_sock_ops *skops) +{ + __u32 init = 1, key, mark, *cnt; + struct mptcp_sock *msk; + struct bpf_sock *sk; + int err; + + if (skops->op != BPF_SOCK_OPS_TCP_CONNECT_CB) + return 1; + + sk = skops->sk; + if (!sk) + return 1; + + msk = bpf_skc_to_mptcp_sock(sk); + if (!msk) + return 1; + + key = msk->token; + cnt = bpf_map_lookup_elem(&mptcp_sf, &key); + if (cnt) { + /* A new subflow is added to an existing MPTCP connection */ + __sync_fetch_and_add(cnt, 1); + mark = *cnt; + } else { + /* A new MPTCP connection is just initiated and this is its primary subflow */ + bpf_map_update_elem(&mptcp_sf, &key, &init, BPF_ANY); + mark = init; + } + + /* Set the mark of the subflow's socket based on appearance order */ + err = bpf_setsockopt(skops, SOL_SOCKET, SO_MARK, &mark, sizeof(mark)); + if (err < 0) + return 1; + if (mark == 2) + err = bpf_setsockopt(skops, SOL_TCP, TCP_CONGESTION, cc, TCP_CA_NAME_MAX); + + return 1; +} + +static int _check_getsockopt_subflow_mark(struct mptcp_sock *msk, struct bpf_sockopt *ctx) +{ + struct mptcp_subflow_context *subflow; + int i = 0; + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk; + + ssk = mptcp_subflow_tcp_sock(bpf_core_cast(subflow, + struct mptcp_subflow_context)); + + if (ssk->sk_mark != ++i) { + ctx->retval = -2; + break; + } + } + + return 1; +} + +static int _check_getsockopt_subflow_cc(struct mptcp_sock *msk, struct bpf_sockopt *ctx) +{ + struct mptcp_subflow_context *subflow; + + mptcp_for_each_subflow(msk, subflow) { + struct inet_connection_sock *icsk; + struct sock *ssk; + + ssk = mptcp_subflow_tcp_sock(bpf_core_cast(subflow, + struct mptcp_subflow_context)); + icsk = bpf_core_cast(ssk, struct inet_connection_sock); + + if (ssk->sk_mark == 2 && + __builtin_memcmp(icsk->icsk_ca_ops->name, cc, TCP_CA_NAME_MAX)) { + ctx->retval = -2; + break; + } + } + + return 1; +} + +SEC("cgroup/getsockopt") +int _getsockopt_subflow(struct bpf_sockopt *ctx) +{ + struct bpf_sock *sk = ctx->sk; + struct mptcp_sock *msk; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + if (!sk || sk->protocol != IPPROTO_MPTCP || + (!(ctx->level == SOL_SOCKET && ctx->optname == SO_MARK) && + !(ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION))) + return 1; + + msk = bpf_core_cast(sk, struct mptcp_sock); + if (msk->pm.subflows != 1) { + ctx->retval = -1; + return 1; + } + + if (ctx->optname == SO_MARK) + return _check_getsockopt_subflow_mark(msk, ctx); + return _check_getsockopt_subflow_cc(msk, ctx); +} diff --git a/tools/testing/selftests/bpf/progs/netns_cookie_prog.c b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c index aeff3a4f9287..c6edf8dbefeb 100644 --- a/tools/testing/selftests/bpf/progs/netns_cookie_prog.c +++ b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c @@ -27,6 +27,8 @@ struct { __type(value, __u64); } sock_map SEC(".maps"); +int tcx_init_netns_cookie, tcx_netns_cookie; + SEC("sockops") int get_netns_cookie_sockops(struct bpf_sock_ops *ctx) { @@ -81,4 +83,12 @@ int get_netns_cookie_sk_msg(struct sk_msg_md *msg) return 1; } +SEC("tcx/ingress") +int get_netns_cookie_tcx(struct __sk_buff *skb) +{ + tcx_init_netns_cookie = bpf_get_netns_cookie(NULL); + tcx_netns_cookie = bpf_get_netns_cookie(skb); + return TCX_PASS; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c index ab3eae3d6af8..10d825928499 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_link.c +++ b/tools/testing/selftests/bpf/progs/test_tc_link.c @@ -18,6 +18,7 @@ bool seen_tc4; bool seen_tc5; bool seen_tc6; bool seen_tc7; +bool seen_tc8; bool set_type; @@ -25,6 +26,8 @@ bool seen_eth; bool seen_host; bool seen_mcast; +int mark, prio; + SEC("tc/ingress") int tc1(struct __sk_buff *skb) { @@ -100,3 +103,12 @@ out: seen_tc7 = true; return TCX_PASS; } + +SEC("tc/egress") +int tc8(struct __sk_buff *skb) +{ + seen_tc8 = true; + mark = skb->mark; + prio = skb->priority; + return TCX_PASS; +} diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c index 20ec6723df18..3619239b01b7 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c @@ -12,10 +12,12 @@ struct { __uint(max_entries, 4); } cpu_map SEC(".maps"); +__u32 redirect_count = 0; + SEC("xdp") int xdp_redir_prog(struct xdp_md *ctx) { - return bpf_redirect_map(&cpu_map, 1, 0); + return bpf_redirect_map(&cpu_map, 0, 0); } SEC("xdp") @@ -27,6 +29,9 @@ int xdp_dummy_prog(struct xdp_md *ctx) SEC("xdp/cpumap") int xdp_dummy_cm(struct xdp_md *ctx) { + if (bpf_get_smp_processor_id() == 0) + redirect_count++; + if (ctx->ingress_ifindex == IFINDEX_LO) return XDP_DROP; diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile index 39fb97a8c1df..0fec8f9801ad 100644 --- a/tools/testing/selftests/drivers/net/Makefile +++ b/tools/testing/selftests/drivers/net/Makefile @@ -9,6 +9,7 @@ TEST_PROGS := \ ping.py \ queues.py \ stats.py \ + shaper.py \ # end of TEST_PROGS include ../../lib.mk diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py index 9d7adb3cf33b..29995586993c 100755 --- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py +++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py @@ -6,7 +6,7 @@ import random from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt from lib.py import NetDrvEpEnv from lib.py import EthtoolFamily, NetdevFamily -from lib.py import KsftSkipEx +from lib.py import KsftSkipEx, KsftFailEx from lib.py import rand_port from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure @@ -606,6 +606,33 @@ def test_rss_context_overlap2(cfg): test_rss_context_overlap(cfg, True) +def test_delete_rss_context_busy(cfg): + """ + Test that deletion returns -EBUSY when an rss context is being used + by an ntuple filter. + """ + + require_ntuple(cfg) + + # create additional rss context + ctx_id = ethtool_create(cfg, "-X", "context new") + ctx_deleter = defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete") + + # utilize context from ntuple filter + port = rand_port() + flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}" + ntuple_id = ethtool_create(cfg, "-N", flow) + defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") + + # attempt to delete in-use context + try: + ctx_deleter.exec_only() + ctx_deleter.cancel() + raise KsftFailEx(f"deleted context {ctx_id} used by rule {ntuple_id}") + except CmdExitFailure: + pass + + def main() -> None: with NetDrvEpEnv(__file__, nsim_test=False) as cfg: cfg.ethnl = EthtoolFamily() @@ -616,7 +643,8 @@ def main() -> None: test_rss_context, test_rss_context4, test_rss_context32, test_rss_context_dump, test_rss_context_queue_reconfigure, test_rss_context_overlap, test_rss_context_overlap2, - test_rss_context_out_of_order, test_rss_context4_create_with_cfg], + test_rss_context_out_of_order, test_rss_context4_create_with_cfg, + test_delete_rss_context_busy], args=(cfg, )) ksft_exit() diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh index 0bd5ffc218ac..29a672c2270f 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh @@ -45,63 +45,52 @@ source $lib_dir/devlink_lib.sh h1_create() { simple_if_init $h1 192.0.2.1/24 + defer simple_if_fini $h1 192.0.2.1/24 + mtu_set $h1 10000 + defer mtu_restore $h1 ip -4 route add default vrf v$h1 nexthop via 192.0.2.2 -} - -h1_destroy() -{ - ip -4 route del default vrf v$h1 nexthop via 192.0.2.2 - - mtu_restore $h1 - simple_if_fini $h1 192.0.2.1/24 + defer ip -4 route del default vrf v$h1 nexthop via 192.0.2.2 } h2_create() { simple_if_init $h2 198.51.100.1/24 + defer simple_if_fini $h2 198.51.100.1/24 + mtu_set $h2 10000 + defer mtu_restore $h2 ip -4 route add default vrf v$h2 nexthop via 198.51.100.2 -} - -h2_destroy() -{ - ip -4 route del default vrf v$h2 nexthop via 198.51.100.2 - - mtu_restore $h2 - simple_if_fini $h2 198.51.100.1/24 + defer ip -4 route del default vrf v$h2 nexthop via 198.51.100.2 } router_create() { ip link set dev $rp1 up + defer ip link set dev $rp1 down + ip link set dev $rp2 up + defer ip link set dev $rp2 down __addr_add_del $rp1 add 192.0.2.2/24 + defer __addr_add_del $rp1 del 192.0.2.2/24 + __addr_add_del $rp2 add 198.51.100.2/24 + defer __addr_add_del $rp2 del 198.51.100.2/24 + mtu_set $rp1 10000 + defer mtu_restore $rp1 + mtu_set $rp2 10000 + defer mtu_restore $rp2 ip -4 route add blackhole 198.51.100.100 + defer ip -4 route del blackhole 198.51.100.100 devlink trap set $DEVLINK_DEV trap blackhole_route action trap -} - -router_destroy() -{ - devlink trap set $DEVLINK_DEV trap blackhole_route action drop - - ip -4 route del blackhole 198.51.100.100 - - mtu_restore $rp2 - mtu_restore $rp1 - __addr_add_del $rp2 del 198.51.100.2/24 - __addr_add_del $rp1 del 192.0.2.2/24 - - ip link set dev $rp2 down - ip link set dev $rp1 down + defer devlink trap set $DEVLINK_DEV trap blackhole_route action drop } setup_prepare() @@ -114,7 +103,11 @@ setup_prepare() rp1_mac=$(mac_get $rp1) + # Reload to ensure devlink-trap settings are back to default. + defer devlink_reload + vrf_prepare + defer vrf_cleanup h1_create h2_create @@ -122,21 +115,6 @@ setup_prepare() router_create } -cleanup() -{ - pre_cleanup - - router_destroy - - h2_destroy - h1_destroy - - vrf_cleanup - - # Reload to ensure devlink-trap settings are back to default. - devlink_reload -} - rate_limits_test() { RET=0 @@ -214,7 +192,10 @@ __rate_test() # by the policer. Make sure measured received rate is about 1000 pps log_info "=== Tx rate: Highest, Policer rate: 1000 pps ===" + defer_scope_push + start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac + defer stop_traffic $! sleep 5 # Take measurements when rate is stable @@ -229,13 +210,16 @@ __rate_test() check_err $? "Expected non-zero policer drop rate, got 0" log_info "Measured policer drop rate of $drop_rate pps" - stop_traffic + defer_scope_pop # Send packets at a rate of 1000 pps and make sure they are not dropped # by the policer log_info "=== Tx rate: 1000 pps, Policer rate: 1000 pps ===" + defer_scope_push + start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac -d 1msec + defer stop_traffic $! sleep 5 # Take measurements when rate is stable @@ -244,7 +228,7 @@ __rate_test() check_err $? "Expected zero policer drop rate, got a drop rate of $drop_rate pps" log_info "Measured policer drop rate of $drop_rate pps" - stop_traffic + defer_scope_pop # Unbind the policer and send packets at highest possible rate. Make # sure they are not dropped by the policer and that the measured @@ -253,7 +237,10 @@ __rate_test() devlink trap group set $DEVLINK_DEV group l3_drops nopolicer + defer_scope_push + start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac + defer stop_traffic $! rate=$(trap_rate_get) (( rate > 1000 )) @@ -265,7 +252,7 @@ __rate_test() check_err $? "Expected zero policer drop rate, got a drop rate of $drop_rate pps" log_info "Measured policer drop rate of $drop_rate pps" - stop_traffic + defer_scope_pop log_test "Trap policer rate" } diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh index fee74f215cec..d5b6f2cc9a29 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh @@ -58,65 +58,62 @@ source qos_lib.sh h1_create() { simple_if_init $h1 + defer simple_if_fini $h1 + mtu_set $h1 10000 + defer mtu_restore $h1 vlan_create $h1 111 v$h1 192.0.2.33/28 + defer vlan_destroy $h1 111 ip link set dev $h1.111 type vlan egress-qos-map 0:1 } -h1_destroy() -{ - vlan_destroy $h1 111 - - mtu_restore $h1 - simple_if_fini $h1 -} - h2_create() { simple_if_init $h2 + defer simple_if_fini $h2 + mtu_set $h2 10000 + defer mtu_restore $h2 vlan_create $h2 222 v$h2 192.0.2.65/28 + defer vlan_destroy $h2 222 ip link set dev $h2.222 type vlan egress-qos-map 0:2 } -h2_destroy() -{ - vlan_destroy $h2 222 - - mtu_restore $h2 - simple_if_fini $h2 -} - h3_create() { simple_if_init $h3 + defer simple_if_fini $h3 + mtu_set $h3 10000 + defer mtu_restore $h3 vlan_create $h3 111 v$h3 192.0.2.34/28 - vlan_create $h3 222 v$h3 192.0.2.66/28 -} - -h3_destroy() -{ - vlan_destroy $h3 222 - vlan_destroy $h3 111 + defer vlan_destroy $h3 111 - mtu_restore $h3 - simple_if_fini $h3 + vlan_create $h3 222 v$h3 192.0.2.66/28 + defer vlan_destroy $h3 222 } switch_create() { ip link set dev $swp1 up + defer ip link set dev $swp1 down + mtu_set $swp1 10000 + defer mtu_restore $swp1 ip link set dev $swp2 up + defer ip link set dev $swp2 down + mtu_set $swp2 10000 + defer mtu_restore $swp2 # prio n -> TC n, strict scheduling lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7 + defer lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0 + lldptool -T -i $swp3 -V ETS-CFG tsa=$( )"0:strict,"$( )"1:strict,"$( @@ -129,85 +126,90 @@ switch_create() sleep 1 ip link set dev $swp3 up + defer ip link set dev $swp3 down + mtu_set $swp3 10000 + defer mtu_restore $swp3 + tc qdisc replace dev $swp3 root handle 101: tbf rate 1gbit \ burst 128K limit 1G + defer tc qdisc del dev $swp3 root handle 101: vlan_create $swp1 111 + defer vlan_destroy $swp1 111 + vlan_create $swp2 222 + defer vlan_destroy $swp2 222 + vlan_create $swp3 111 + defer vlan_destroy $swp3 111 + vlan_create $swp3 222 + defer vlan_destroy $swp3 222 ip link add name br111 type bridge vlan_filtering 0 + defer ip link del dev br111 ip link set dev br111 addrgenmode none + ip link set dev br111 up + defer ip link set dev br111 down + ip link set dev $swp1.111 master br111 + defer ip link set dev $swp1.111 nomaster + ip link set dev $swp3.111 master br111 + defer ip link set dev $swp3.111 nomaster ip link add name br222 type bridge vlan_filtering 0 + defer ip link del dev br222 ip link set dev br222 addrgenmode none + ip link set dev br222 up + defer ip link set dev br222 down + ip link set dev $swp2.222 master br222 + defer ip link set dev $swp2.222 nomaster + ip link set dev $swp3.222 master br222 + defer ip link set dev $swp3.222 nomaster # Make sure that ingress quotas are smaller than egress so that there is # room for both streams of traffic to be admitted to shared buffer. devlink_pool_size_thtype_save 0 devlink_pool_size_thtype_set 0 dynamic 10000000 + defer devlink_pool_size_thtype_restore 0 + devlink_pool_size_thtype_save 4 devlink_pool_size_thtype_set 4 dynamic 10000000 + defer devlink_pool_size_thtype_restore 4 devlink_port_pool_th_save $swp1 0 devlink_port_pool_th_set $swp1 0 6 + defer devlink_port_pool_th_restore $swp1 0 + devlink_tc_bind_pool_th_save $swp1 1 ingress devlink_tc_bind_pool_th_set $swp1 1 ingress 0 6 + defer devlink_tc_bind_pool_th_restore $swp1 1 ingress devlink_port_pool_th_save $swp2 0 devlink_port_pool_th_set $swp2 0 6 + defer devlink_port_pool_th_restore $swp2 0 + devlink_tc_bind_pool_th_save $swp2 2 ingress devlink_tc_bind_pool_th_set $swp2 2 ingress 0 6 + defer devlink_tc_bind_pool_th_restore $swp2 2 ingress devlink_tc_bind_pool_th_save $swp3 1 egress devlink_tc_bind_pool_th_set $swp3 1 egress 4 7 + defer devlink_tc_bind_pool_th_restore $swp3 1 egress + devlink_tc_bind_pool_th_save $swp3 2 egress devlink_tc_bind_pool_th_set $swp3 2 egress 4 7 + defer devlink_tc_bind_pool_th_restore $swp3 2 egress + devlink_port_pool_th_save $swp3 4 devlink_port_pool_th_set $swp3 4 7 -} - -switch_destroy() -{ - devlink_port_pool_th_restore $swp3 4 - devlink_tc_bind_pool_th_restore $swp3 2 egress - devlink_tc_bind_pool_th_restore $swp3 1 egress - - devlink_tc_bind_pool_th_restore $swp2 2 ingress - devlink_port_pool_th_restore $swp2 0 - - devlink_tc_bind_pool_th_restore $swp1 1 ingress - devlink_port_pool_th_restore $swp1 0 - - devlink_pool_size_thtype_restore 4 - devlink_pool_size_thtype_restore 0 - - ip link del dev br222 - ip link del dev br111 - - vlan_destroy $swp3 222 - vlan_destroy $swp3 111 - vlan_destroy $swp2 222 - vlan_destroy $swp1 111 - - tc qdisc del dev $swp3 root handle 101: - mtu_restore $swp3 - ip link set dev $swp3 down - lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0 - - mtu_restore $swp2 - ip link set dev $swp2 down - - mtu_restore $swp1 - ip link set dev $swp1 down + defer devlink_port_pool_th_restore $swp3 4 } setup_prepare() @@ -224,6 +226,7 @@ setup_prepare() h3mac=$(mac_get $h3) vrf_prepare + defer vrf_cleanup h1_create h2_create @@ -231,18 +234,6 @@ setup_prepare() switch_create } -cleanup() -{ - pre_cleanup - - switch_destroy - h3_destroy - h2_destroy - h1_destroy - - vrf_cleanup -} - ping_ipv4() { ping_test $h1 192.0.2.34 " from H1" @@ -261,21 +252,38 @@ rel() " } +__run_hi_measure_rate() +{ + local what=$1; shift + local -a uc_rate + + start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac + defer stop_traffic $! + + uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_2 "$what")) + check_err $? "Could not get high enough $what ingress rate" + + echo ${uc_rate[@]} +} + +run_hi_measure_rate() +{ + in_defer_scope __run_hi_measure_rate "$@" +} + test_ets_strict() { RET=0 # Run high-prio traffic on its own. - start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac local -a rate_2 - rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2")) - check_err $? "Could not get high enough prio-2 ingress rate" + rate_2=($(run_hi_measure_rate "prio 2")) local rate_2_in=${rate_2[0]} local rate_2_eg=${rate_2[1]} - stop_traffic # $h2.222 # Start low-prio stream. start_traffic $h1.111 192.0.2.33 192.0.2.34 $h3mac + defer stop_traffic $! local -a rate_1 rate_1=($(measure_rate $swp1 $h3 rx_octets_prio_1 "prio 1")) @@ -290,14 +298,9 @@ test_ets_strict() check_err $(bc <<< "$rel21 > 105") # Start the high-prio stream--now both streams run. - start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac - rate_3=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2 w/ 1")) - check_err $? "Could not get high enough prio-2 ingress rate with prio-1" + rate_3=($(run_hi_measure_rate "prio 2+1")) local rate_3_in=${rate_3[0]} local rate_3_eg=${rate_3[1]} - stop_traffic # $h2.222 - - stop_traffic # $h1.111 # High-prio should have about the same throughput whether or not # low-prio is in the system. diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh index 5ac4f795e333..2b5d2c2751d5 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh @@ -69,127 +69,103 @@ mlxsw_only_on_spectrum 2+ || exit h1_create() { simple_if_init $h1 + defer simple_if_fini $h1 vlan_create $h1 111 v$h1 192.0.2.33/28 + defer vlan_destroy $h1 111 ip link set dev $h1.111 type vlan egress-qos-map 0:1 } -h1_destroy() -{ - vlan_destroy $h1 111 - - simple_if_fini $h1 -} - h2_create() { simple_if_init $h2 + defer simple_if_fini $h2 vlan_create $h2 111 v$h2 192.0.2.34/28 -} - -h2_destroy() -{ - vlan_destroy $h2 111 - - simple_if_fini $h2 + defer vlan_destroy $h2 111 } switch_create() { # pools # ----- + # devlink_pool_size_thtype_restore needs to be done first so that we can + # reset the various limits to values that are only valid for the + # original static / dynamic setting. devlink_pool_size_thtype_save 1 - devlink_pool_size_thtype_save 6 - - devlink_port_pool_th_save $swp1 1 - devlink_port_pool_th_save $swp2 6 - - devlink_tc_bind_pool_th_save $swp1 1 ingress - devlink_tc_bind_pool_th_save $swp2 1 egress - devlink_pool_size_thtype_set 1 dynamic $MAX_POOL_SIZE + defer_prio devlink_pool_size_thtype_restore 1 + + devlink_pool_size_thtype_save 6 devlink_pool_size_thtype_set 6 static $MAX_POOL_SIZE + defer_prio devlink_pool_size_thtype_restore 6 # $swp1 # ----- ip link set dev $swp1 up + defer ip link set dev $swp1 down + vlan_create $swp1 111 + defer vlan_destroy $swp1 111 ip link set dev $swp1.111 type vlan ingress-qos-map 0:0 1:1 + devlink_port_pool_th_save $swp1 1 devlink_port_pool_th_set $swp1 1 16 + defer devlink_tc_bind_pool_th_restore $swp1 1 ingress + + devlink_tc_bind_pool_th_save $swp1 1 ingress devlink_tc_bind_pool_th_set $swp1 1 ingress 1 16 + defer devlink_port_pool_th_restore $swp1 1 tc qdisc replace dev $swp1 root handle 1: \ ets bands 8 strict 8 priomap 7 6 + defer tc qdisc del dev $swp1 root + dcb buffer set dev $swp1 prio-buffer all:0 1:1 + defer dcb buffer set dev $swp1 prio-buffer all:0 # $swp2 # ----- ip link set dev $swp2 up + defer ip link set dev $swp2 down + vlan_create $swp2 111 + defer vlan_destroy $swp2 111 ip link set dev $swp2.111 type vlan egress-qos-map 0:0 1:1 + devlink_port_pool_th_save $swp2 6 devlink_port_pool_th_set $swp2 6 $MAX_POOL_SIZE + defer devlink_tc_bind_pool_th_restore $swp2 1 egress + + devlink_tc_bind_pool_th_save $swp2 1 egress devlink_tc_bind_pool_th_set $swp2 1 egress 6 $MAX_POOL_SIZE + defer devlink_port_pool_th_restore $swp2 6 tc qdisc replace dev $swp2 root handle 1: tbf rate $SHAPER_RATE \ burst 128K limit 500M + defer tc qdisc del dev $swp2 root + tc qdisc replace dev $swp2 parent 1:1 handle 11: \ ets bands 8 strict 8 priomap 7 6 + defer tc qdisc del dev $swp2 parent 1:1 handle 11: # bridge # ------ ip link add name br1 type bridge vlan_filtering 0 + defer ip link del dev br1 + ip link set dev $swp1.111 master br1 + defer ip link set dev $swp1.111 nomaster + ip link set dev br1 up + defer ip link set dev br1 down ip link set dev $swp2.111 master br1 -} - -switch_destroy() -{ - # Do this first so that we can reset the limits to values that are only - # valid for the original static / dynamic setting. - devlink_pool_size_thtype_restore 6 - devlink_pool_size_thtype_restore 1 - - # bridge - # ------ - - ip link set dev $swp2.111 nomaster - - ip link set dev br1 down - ip link set dev $swp1.111 nomaster - ip link del dev br1 - - # $swp2 - # ----- - - tc qdisc del dev $swp2 parent 1:1 handle 11: - tc qdisc del dev $swp2 root - - devlink_tc_bind_pool_th_restore $swp2 1 egress - devlink_port_pool_th_restore $swp2 6 - - vlan_destroy $swp2 111 - ip link set dev $swp2 down - - # $swp1 - # ----- - - dcb buffer set dev $swp1 prio-buffer all:0 - tc qdisc del dev $swp1 root - - devlink_tc_bind_pool_th_restore $swp1 1 ingress - devlink_port_pool_th_restore $swp1 1 - - vlan_destroy $swp1 111 - ip link set dev $swp1 down + defer ip link set dev $swp2.111 nomaster } setup_prepare() @@ -203,23 +179,13 @@ setup_prepare() h2mac=$(mac_get $h2) vrf_prepare + defer vrf_cleanup h1_create h2_create switch_create } -cleanup() -{ - pre_cleanup - - switch_destroy - h2_destroy - h1_destroy - - vrf_cleanup -} - ping_ipv4() { ping_test $h1 192.0.2.34 " h1->h2" @@ -251,6 +217,7 @@ max_descriptors() log_info "Send many small packets, packet size = $pktsize bytes" start_traffic_pktsize $pktsize $h1.111 192.0.2.33 192.0.2.34 $h2mac + defer stop_traffic $! # Sleep to wait for congestion. sleep 5 @@ -268,9 +235,6 @@ max_descriptors() check_err $(bc <<< "$perc_used < $exp_perc_used") \ "Expected > $exp_perc_used% of descriptors, handle $perc_used%" - stop_traffic - sleep 1 - log_test "Maximum descriptors usage. The percentage used is $perc_used%" } diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh index 6d892de43fa8..cd4a5c21360c 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -73,122 +73,114 @@ source qos_lib.sh h1_create() { simple_if_init $h1 192.0.2.65/28 - mtu_set $h1 10000 -} + defer simple_if_fini $h1 192.0.2.65/28 -h1_destroy() -{ - mtu_restore $h1 - simple_if_fini $h1 192.0.2.65/28 + mtu_set $h1 10000 + defer mtu_restore $h1 } h2_create() { simple_if_init $h2 + defer simple_if_fini $h2 + mtu_set $h2 10000 + defer mtu_restore $h2 vlan_create $h2 111 v$h2 192.0.2.129/28 + defer vlan_destroy $h2 111 ip link set dev $h2.111 type vlan egress-qos-map 0:1 } -h2_destroy() -{ - vlan_destroy $h2 111 - - mtu_restore $h2 - simple_if_fini $h2 -} - h3_create() { simple_if_init $h3 192.0.2.66/28 + defer simple_if_fini $h3 192.0.2.66/28 + mtu_set $h3 10000 + defer mtu_restore $h3 vlan_create $h3 111 v$h3 192.0.2.130/28 -} - -h3_destroy() -{ - vlan_destroy $h3 111 - - mtu_restore $h3 - simple_if_fini $h3 192.0.2.66/28 + defer vlan_destroy $h3 111 } switch_create() { ip link set dev $swp1 up + defer ip link set dev $swp1 down + mtu_set $swp1 10000 + defer mtu_restore $swp1 ip link set dev $swp2 up + defer ip link set dev $swp2 down + mtu_set $swp2 10000 + defer mtu_restore $swp2 ip link set dev $swp3 up + defer ip link set dev $swp3 down + mtu_set $swp3 10000 + defer mtu_restore $swp3 vlan_create $swp2 111 + defer vlan_destroy $swp2 111 + vlan_create $swp3 111 + defer vlan_destroy $swp3 111 tc qdisc replace dev $swp3 root handle 3: tbf rate 1gbit \ burst 128K limit 1G + defer tc qdisc del dev $swp3 root handle 3: + tc qdisc replace dev $swp3 parent 3:3 handle 33: \ prio bands 8 priomap 7 7 7 7 7 7 7 7 + defer tc qdisc del dev $swp3 parent 3:3 handle 33: ip link add name br1 type bridge vlan_filtering 0 + defer ip link del dev br1 ip link set dev br1 addrgenmode none ip link set dev br1 up + ip link set dev $swp1 master br1 + defer ip link set dev $swp1 nomaster + ip link set dev $swp3 master br1 + defer ip link set dev $swp3 nomaster ip link add name br111 type bridge vlan_filtering 0 + defer ip link del dev br111 ip link set dev br111 addrgenmode none ip link set dev br111 up + ip link set dev $swp2.111 master br111 + defer ip link set dev $swp2.111 nomaster + ip link set dev $swp3.111 master br111 + defer ip link set dev $swp3.111 nomaster # Make sure that ingress quotas are smaller than egress so that there is # room for both streams of traffic to be admitted to shared buffer. devlink_port_pool_th_save $swp1 0 devlink_port_pool_th_set $swp1 0 5 + defer devlink_port_pool_th_restore $swp1 0 + devlink_tc_bind_pool_th_save $swp1 0 ingress devlink_tc_bind_pool_th_set $swp1 0 ingress 0 5 + defer devlink_tc_bind_pool_th_restore $swp1 0 ingress devlink_port_pool_th_save $swp2 0 devlink_port_pool_th_set $swp2 0 5 + defer devlink_port_pool_th_restore $swp2 0 + devlink_tc_bind_pool_th_save $swp2 1 ingress devlink_tc_bind_pool_th_set $swp2 1 ingress 0 5 + defer devlink_tc_bind_pool_th_restore $swp2 1 ingress devlink_port_pool_th_save $swp3 4 devlink_port_pool_th_set $swp3 4 12 -} - -switch_destroy() -{ - devlink_port_pool_th_restore $swp3 4 - - devlink_tc_bind_pool_th_restore $swp2 1 ingress - devlink_port_pool_th_restore $swp2 0 - - devlink_tc_bind_pool_th_restore $swp1 0 ingress - devlink_port_pool_th_restore $swp1 0 - - ip link del dev br111 - ip link del dev br1 - - tc qdisc del dev $swp3 parent 3:3 handle 33: - tc qdisc del dev $swp3 root handle 3: - - vlan_destroy $swp3 111 - vlan_destroy $swp2 111 - - mtu_restore $swp3 - ip link set dev $swp3 down - - mtu_restore $swp2 - ip link set dev $swp2 down - - mtu_restore $swp1 - ip link set dev $swp1 down + defer devlink_port_pool_th_restore $swp3 4 } setup_prepare() @@ -205,6 +197,7 @@ setup_prepare() h3mac=$(mac_get $h3) vrf_prepare + defer vrf_cleanup h1_create h2_create @@ -212,45 +205,45 @@ setup_prepare() switch_create } -cleanup() +ping_ipv4() { - pre_cleanup + ping_test $h2 192.0.2.130 +} - switch_destroy - h3_destroy - h2_destroy - h1_destroy +__run_uc_measure_rate() +{ + local what=$1; shift + local -a uc_rate + + start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac + defer stop_traffic $! + + uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_1 "$what")) + check_err $? "Could not get high enough $what ingress rate" - vrf_cleanup + echo ${uc_rate[@]} } -ping_ipv4() +run_uc_measure_rate() { - ping_test $h2 192.0.2.130 + in_defer_scope __run_uc_measure_rate "$@" } test_mc_aware() { RET=0 - local -a uc_rate - start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac - uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC-only")) - check_err $? "Could not get high enough UC-only ingress rate" - stop_traffic + local -a uc_rate=($(run_uc_measure_rate "UC-only")) local ucth1=${uc_rate[1]} start_traffic $h1 192.0.2.65 bc bc + defer stop_traffic $! local d0=$(date +%s) local t0=$(ethtool_stats_get $h3 rx_octets_prio_0) local u0=$(ethtool_stats_get $swp1 rx_octets_prio_0) - local -a uc_rate_2 - start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac - uc_rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC+MC")) - check_err $? "Could not get high enough UC+MC ingress rate" - stop_traffic + local -a uc_rate_2=($(run_uc_measure_rate "UC+MC")) local ucth2=${uc_rate_2[1]} local d1=$(date +%s) @@ -272,8 +265,6 @@ test_mc_aware() local mc_ir=$(rate $u0 $u1 $interval) local mc_er=$(rate $t0 $t1 $interval) - stop_traffic - log_test "UC performance under MC overload" echo "UC-only throughput $(humanize $ucth1)" @@ -297,6 +288,7 @@ test_uc_aware() RET=0 start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac + defer stop_traffic $! local d0=$(date +%s) local t0=$(ethtool_stats_get $h3 rx_octets_prio_1) @@ -326,8 +318,6 @@ test_uc_aware() ((attempts == passes)) check_err $? - stop_traffic - log_test "MC performance under UC overload" echo " ingress UC throughput $(humanize ${uc_ir})" echo " egress UC throughput $(humanize ${uc_er})" diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index 893a693ad805..45a569618424 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh @@ -186,10 +186,7 @@ bridge_vlan_flags_test() # If we did not handle references correctly, then this should produce a # trace - devlink dev reload "$DEVLINK_DEV" - - # Allow netdevices to be re-created following the reload - sleep 20 + devlink_reload log_test "bridge vlan flags" } @@ -923,12 +920,9 @@ devlink_reload_test() # devlink reload can be performed without errors RET=0 - devlink dev reload "$DEVLINK_DEV" - check_err $? "devlink reload failed" + devlink_reload log_test "devlink reload - last test" - - sleep 20 } trap cleanup EXIT diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh index 139175fd03e7..4aaceb6b2b60 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh @@ -21,6 +21,7 @@ switch_create() # Create a bottleneck so that the DWRR process can kick in. tc qdisc replace dev $swp2 root handle 3: tbf rate 1gbit \ burst 128K limit 1G + defer tc qdisc del dev $swp2 root handle 3: ets_switch_create @@ -30,16 +31,27 @@ switch_create() # for the DWRR process. devlink_port_pool_th_save $swp1 0 devlink_port_pool_th_set $swp1 0 12 + defer devlink_port_pool_th_restore $swp1 0 + devlink_tc_bind_pool_th_save $swp1 0 ingress devlink_tc_bind_pool_th_set $swp1 0 ingress 0 12 + defer devlink_tc_bind_pool_th_restore $swp1 0 ingress + devlink_port_pool_th_save $swp2 4 devlink_port_pool_th_set $swp2 4 12 + defer devlink_port_pool_th_restore $swp2 4 + devlink_tc_bind_pool_th_save $swp2 7 egress devlink_tc_bind_pool_th_set $swp2 7 egress 4 5 + defer devlink_tc_bind_pool_th_restore $swp2 7 egress + devlink_tc_bind_pool_th_save $swp2 6 egress devlink_tc_bind_pool_th_set $swp2 6 egress 4 5 + defer devlink_tc_bind_pool_th_restore $swp2 6 egress + devlink_tc_bind_pool_th_save $swp2 5 egress devlink_tc_bind_pool_th_set $swp2 5 egress 4 5 + defer devlink_tc_bind_pool_th_restore $swp2 5 egress # Note: sch_ets_core.sh uses VLAN ingress-qos-map to assign packet # priorities at $swp1 based on their 802.1p headers. ingress-qos-map is @@ -47,20 +59,6 @@ switch_create() # 1:1, which is the mapping currently hard-coded by the driver. } -switch_destroy() -{ - devlink_tc_bind_pool_th_restore $swp2 5 egress - devlink_tc_bind_pool_th_restore $swp2 6 egress - devlink_tc_bind_pool_th_restore $swp2 7 egress - devlink_port_pool_th_restore $swp2 4 - devlink_tc_bind_pool_th_restore $swp1 0 ingress - devlink_port_pool_th_restore $swp1 0 - - ets_switch_destroy - - tc qdisc del dev $swp2 root handle 3: -} - # Callback from sch_ets_tests.sh collect_stats() { diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh index 299e06a5808c..537d6baa77b7 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh @@ -75,6 +75,18 @@ source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh source mlxsw_lib.sh +stop_traffic_sleep() +{ + local pid=$1; shift + + # Issuing a kill still leaves a bunch of packets lingering in the + # buffers. This traffic then arrives at the point where a follow-up test + # is already running, and can confuse the test. Therefore sleep after + # stopping traffic to flush any leftover packets. + stop_traffic "$pid" + sleep 1 +} + ipaddr() { local host=$1; shift @@ -89,39 +101,31 @@ host_create() local host=$1; shift simple_if_init $dev + defer simple_if_fini $dev + mtu_set $dev 10000 + defer mtu_restore $dev vlan_create $dev 10 v$dev $(ipaddr $host 10)/28 + defer vlan_destroy $dev 10 ip link set dev $dev.10 type vlan egress 0:0 vlan_create $dev 11 v$dev $(ipaddr $host 11)/28 + defer vlan_destroy $dev 11 ip link set dev $dev.11 type vlan egress 0:1 } -host_destroy() -{ - local dev=$1; shift - - vlan_destroy $dev 11 - vlan_destroy $dev 10 - mtu_restore $dev - simple_if_fini $dev -} - h1_create() { host_create $h1 1 } -h1_destroy() -{ - host_destroy $h1 -} - h2_create() { host_create $h2 2 + tc qdisc add dev $h2 clsact + defer tc qdisc del dev $h2 clsact # Some of the tests in this suite use multicast traffic. As this traffic # enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus @@ -137,15 +141,9 @@ h2_create() # Prevent this by adding a shaper which limits the traffic in $h2 to # 1Gbps. - tc qdisc replace dev $h2 root handle 10: tbf rate 1gbit \ + tc qdisc replace dev $h2 root handle 10: tbf rate 200mbit \ burst 128K limit 1G -} - -h2_destroy() -{ - tc qdisc del dev $h2 root handle 10: - tc qdisc del dev $h2 clsact - host_destroy $h2 + defer tc qdisc del dev $h2 root handle 10: } h3_create() @@ -153,40 +151,54 @@ h3_create() host_create $h3 3 } -h3_destroy() -{ - host_destroy $h3 -} - switch_create() { local intf local vlan ip link add dev br1_10 type bridge + defer ip link del dev br1_10 + ip link add dev br1_11 type bridge + defer ip link del dev br1_11 ip link add dev br2_10 type bridge + defer ip link del dev br2_10 + ip link add dev br2_11 type bridge + defer ip link del dev br2_11 for intf in $swp1 $swp2 $swp3 $swp4 $swp5; do ip link set dev $intf up + defer ip link set dev $intf down + mtu_set $intf 10000 + defer mtu_restore $intf done for intf in $swp1 $swp4; do for vlan in 10 11; do vlan_create $intf $vlan + defer vlan_destroy $intf $vlan + ip link set dev $intf.$vlan master br1_$vlan + defer ip link set dev $intf.$vlan nomaster + ip link set dev $intf.$vlan up + defer ip link set dev $intf.$vlan up done done for intf in $swp2 $swp3 $swp5; do for vlan in 10 11; do vlan_create $intf $vlan + defer vlan_destroy $intf $vlan + ip link set dev $intf.$vlan master br2_$vlan + defer ip link set dev $intf.$vlan nomaster + ip link set dev $intf.$vlan up + defer ip link set dev $intf.$vlan up done done @@ -199,51 +211,27 @@ switch_create() done for intf in $swp3 $swp4; do - tc qdisc replace dev $intf root handle 1: tbf rate 1gbit \ + tc qdisc replace dev $intf root handle 1: tbf rate 200mbit \ burst 128K limit 1G + defer tc qdisc del dev $intf root handle 1: done ip link set dev br1_10 up + defer ip link set dev br1_10 down + ip link set dev br1_11 up + defer ip link set dev br1_11 down + ip link set dev br2_10 up + defer ip link set dev br2_10 down + ip link set dev br2_11 up + defer ip link set dev br2_11 down local size=$(devlink_pool_size_thtype 0 | cut -d' ' -f 1) devlink_port_pool_th_save $swp3 8 devlink_port_pool_th_set $swp3 8 $size -} - -switch_destroy() -{ - local intf - local vlan - - devlink_port_pool_th_restore $swp3 8 - - ip link set dev br2_11 down - ip link set dev br2_10 down - ip link set dev br1_11 down - ip link set dev br1_10 down - - for intf in $swp4 $swp3; do - tc qdisc del dev $intf root handle 1: - done - - for intf in $swp5 $swp3 $swp2 $swp4 $swp1; do - for vlan in 11 10; do - ip link set dev $intf.$vlan down - ip link set dev $intf.$vlan nomaster - vlan_destroy $intf $vlan - done - - mtu_restore $intf - ip link set dev $intf down - done - - ip link del dev br2_11 - ip link del dev br2_10 - ip link del dev br1_11 - ip link del dev br1_10 + defer devlink_port_pool_th_restore $swp3 8 } setup_prepare() @@ -263,6 +251,7 @@ setup_prepare() h3_mac=$(mac_get $h3) vrf_prepare + defer vrf_cleanup h1_create h2_create @@ -270,18 +259,6 @@ setup_prepare() switch_create } -cleanup() -{ - pre_cleanup - - switch_destroy - h3_destroy - h2_destroy - h1_destroy - - vrf_cleanup -} - ping_ipv4() { ping_test $h1.10 $(ipaddr 3 10) " from host 1, vlan 10" @@ -372,6 +349,7 @@ build_backlog() local i=0 while :; do + sleep 1 local cur=$(busywait 1100 until_counter_is "> $cur" \ get_qdisc_backlog $vlan) local diff=$((size - cur)) @@ -449,6 +427,7 @@ __do_ecn_test() start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \ $h3_mac tos=0x01 + defer stop_traffic_sleep $! sleep 1 ecn_test_common "$name" "$get_nmarked" $vlan $limit @@ -460,9 +439,6 @@ __do_ecn_test() build_backlog $vlan $((2 * limit)) udp >/dev/null check_fail $? "UDP traffic went into backlog instead of being early-dropped" log_test "TC $((vlan - 10)): $name backlog > limit: UDP early-dropped" - - stop_traffic - sleep 1 } do_ecn_test() @@ -470,7 +446,8 @@ do_ecn_test() local vlan=$1; shift local limit=$1; shift - __do_ecn_test get_nmarked "$vlan" "$limit" + in_defer_scope \ + __do_ecn_test get_nmarked "$vlan" "$limit" } do_ecn_test_perband() @@ -479,10 +456,11 @@ do_ecn_test_perband() local limit=$1; shift mlxsw_only_on_spectrum 3+ || return - __do_ecn_test get_qdisc_nmarked "$vlan" "$limit" "per-band ECN" + in_defer_scope \ + __do_ecn_test get_qdisc_nmarked "$vlan" "$limit" "per-band ECN" } -do_ecn_nodrop_test() +__do_ecn_nodrop_test() { local vlan=$1; shift local limit=$1; shift @@ -490,6 +468,7 @@ do_ecn_nodrop_test() start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \ $h3_mac tos=0x01 + defer stop_traffic_sleep $! sleep 1 ecn_test_common "$name" get_nmarked $vlan $limit @@ -501,12 +480,15 @@ do_ecn_nodrop_test() build_backlog $vlan $((2 * limit)) udp >/dev/null check_err $? "UDP traffic was early-dropped instead of getting into backlog" log_test "TC $((vlan - 10)): $name backlog > limit: UDP not dropped" +} - stop_traffic - sleep 1 +do_ecn_nodrop_test() +{ + in_defer_scope \ + __do_ecn_nodrop_test "$@" } -do_red_test() +__do_red_test() { local vlan=$1; shift local limit=$1; shift @@ -517,6 +499,7 @@ do_red_test() # is above limit. start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \ $h3_mac tos=0x01 + defer stop_traffic_sleep $! # Pushing below the queue limit should work. RET=0 @@ -532,17 +515,21 @@ do_red_test() check_fail $? "Traffic went into backlog instead of being early-dropped" pct=$(check_marking get_nmarked $vlan "== 0") check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." + backlog=$(get_qdisc_backlog $vlan) local diff=$((limit - backlog)) pct=$((100 * diff / limit)) - ((-10 <= pct && pct <= 10)) - check_err $? "backlog $backlog / $limit expected <= 10% distance" + ((-15 <= pct && pct <= 15)) + check_err $? "backlog $backlog / $limit expected <= 15% distance" log_test "TC $((vlan - 10)): RED backlog > limit" +} - stop_traffic - sleep 1 +do_red_test() +{ + in_defer_scope \ + __do_red_test "$@" } -do_mc_backlog_test() +__do_mc_backlog_test() { local vlan=$1; shift local limit=$1; shift @@ -552,7 +539,10 @@ do_mc_backlog_test() RET=0 start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) bc + defer stop_traffic_sleep $! + start_tcp_traffic $h2.$vlan $(ipaddr 2 $vlan) $(ipaddr 3 $vlan) bc + defer stop_traffic_sleep $! qbl=$(busywait 5000 until_counter_is ">= 500000" \ get_qdisc_backlog $vlan) @@ -565,13 +555,16 @@ do_mc_backlog_test() get_mc_transmit_queue $vlan) check_err $? "MC backlog reported by qdisc not visible in ethtool" - stop_traffic - stop_traffic - log_test "TC $((vlan - 10)): Qdisc reports MC backlog" } -do_mark_test() +do_mc_backlog_test() +{ + in_defer_scope \ + __do_mc_backlog_test "$@" +} + +__do_mark_test() { local vlan=$1; shift local limit=$1; shift @@ -586,6 +579,7 @@ do_mark_test() start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \ $h3_mac tos=0x01 + defer stop_traffic_sleep $! # Create a bit of a backlog and observe no mirroring due to marks. qevent_rule_install_$subtest @@ -600,7 +594,7 @@ do_mark_test() # Above limit, everything should be mirrored, we should see lots of # packets. build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01 >/dev/null - busywait_for_counter 1100 +10000 \ + busywait_for_counter 1100 +2500 \ $fetch_counter > /dev/null check_err_fail "$should_fail" $? "ECN-marked packets $subtest'd" @@ -615,12 +609,15 @@ do_mark_test() else log_test "TC $((vlan - 10)): marked packets $subtest'd" fi +} - stop_traffic - sleep 1 +do_mark_test() +{ + in_defer_scope \ + __do_mark_test "$@" } -do_drop_test() +__do_drop_test() { local vlan=$1; shift local limit=$1; shift @@ -635,6 +632,7 @@ do_drop_test() RET=0 start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) $h3_mac + defer stop_traffic_sleep $! # Create a bit of a backlog and observe no mirroring due to drops. qevent_rule_install_$subtest @@ -651,25 +649,30 @@ do_drop_test() build_backlog $vlan $((3 * limit / 2)) udp >/dev/null base=$($fetch_counter) - send_packets $vlan udp 11 + send_packets $vlan udp 100 - now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter) - check_err $? "Dropped packets not observed: 11 expected, $((now - base)) seen" + now=$(busywait 1100 until_counter_is ">= $((base + 95))" $fetch_counter) + check_err $? "${trigger}ped packets not observed: 100 expected, $((now - base)) seen" # When no extra traffic is injected, there should be no mirroring. - busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null + busywait 1100 until_counter_is ">= $((base + 110))" \ + $fetch_counter >/dev/null check_fail $? "Spurious packets observed" # When the rule is uninstalled, there should be no mirroring. qevent_rule_uninstall_$subtest - send_packets $vlan udp 11 - busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null - check_fail $? "Spurious packets observed after uninstall" + send_packets $vlan udp 100 + now=$(busywait 1100 until_counter_is ">= $((base + 110))" \ + $fetch_counter) + check_fail $? "$((now - base)) spurious packets observed after uninstall" log_test "TC $((vlan - 10)): ${trigger}ped packets $subtest'd" +} - stop_traffic - sleep 1 +do_drop_test() +{ + in_defer_scope \ + __do_drop_test "$@" } qevent_rule_install_mirror() diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh index 8ecddafa79b3..8902a115d9cd 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh @@ -20,8 +20,8 @@ source sch_red_core.sh # $BACKLOG2 are far enough not to overlap, so that we can assume that if we do # see (do not see) marking, it is actually due to the configuration of that one # TC, and not due to configuration of the other TC leaking over. -BACKLOG1=200000 -BACKLOG2=500000 +BACKLOG1=400000 +BACKLOG2=1000000 install_root_qdisc() { @@ -35,7 +35,7 @@ install_qdisc_tc0() tc qdisc add dev $swp3 parent 10:8 handle 108: red \ limit 1000000 min $BACKLOG1 max $((BACKLOG1 + 1)) \ - probability 1.0 avpkt 8000 burst 38 "${args[@]}" + probability 1.0 avpkt 8000 burst 51 "${args[@]}" } install_qdisc_tc1() @@ -44,7 +44,7 @@ install_qdisc_tc1() tc qdisc add dev $swp3 parent 10:7 handle 107: red \ limit 1000000 min $BACKLOG2 max $((BACKLOG2 + 1)) \ - probability 1.0 avpkt 8000 burst 63 "${args[@]}" + probability 1.0 avpkt 8000 burst 126 "${args[@]}" } install_qdisc() @@ -80,36 +80,34 @@ uninstall_qdisc() ecn_test() { install_qdisc ecn + defer uninstall_qdisc do_ecn_test 10 $BACKLOG1 do_ecn_test 11 $BACKLOG2 - - uninstall_qdisc } ecn_test_perband() { install_qdisc ecn + defer uninstall_qdisc do_ecn_test_perband 10 $BACKLOG1 do_ecn_test_perband 11 $BACKLOG2 - - uninstall_qdisc } ecn_nodrop_test() { install_qdisc ecn nodrop + defer uninstall_qdisc do_ecn_nodrop_test 10 $BACKLOG1 do_ecn_nodrop_test 11 $BACKLOG2 - - uninstall_qdisc } red_test() { install_qdisc + defer uninstall_qdisc # Make sure that we get the non-zero value if there is any. local cur=$(busywait 1100 until_counter_is "> 0" \ @@ -120,50 +118,44 @@ red_test() do_red_test 10 $BACKLOG1 do_red_test 11 $BACKLOG2 - - uninstall_qdisc } mc_backlog_test() { install_qdisc + defer uninstall_qdisc # Note that the backlog numbers here do not correspond to RED # configuration, but are arbitrary. do_mc_backlog_test 10 $BACKLOG1 do_mc_backlog_test 11 $BACKLOG2 - - uninstall_qdisc } red_mirror_test() { install_qdisc qevent early_drop block 10 + defer uninstall_qdisc do_drop_mirror_test 10 $BACKLOG1 early_drop do_drop_mirror_test 11 $BACKLOG2 early_drop - - uninstall_qdisc } red_trap_test() { install_qdisc qevent early_drop block 10 + defer uninstall_qdisc do_drop_trap_test 10 $BACKLOG1 early_drop do_drop_trap_test 11 $BACKLOG2 early_drop - - uninstall_qdisc } ecn_mirror_test() { install_qdisc ecn qevent mark block 10 + defer uninstall_qdisc do_mark_mirror_test 10 $BACKLOG1 do_mark_mirror_test 11 $BACKLOG2 - - uninstall_qdisc } bail_on_lldpad "configure DCB" "configure Qdiscs" diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh index 159108d02895..e9043771787b 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh @@ -32,45 +32,51 @@ uninstall_qdisc() ecn_test() { install_qdisc ecn + defer uninstall_qdisc + do_ecn_test 10 $BACKLOG - uninstall_qdisc } ecn_test_perband() { install_qdisc ecn + defer uninstall_qdisc + do_ecn_test_perband 10 $BACKLOG - uninstall_qdisc } ecn_nodrop_test() { install_qdisc ecn nodrop + defer uninstall_qdisc + do_ecn_nodrop_test 10 $BACKLOG - uninstall_qdisc } red_test() { install_qdisc + defer uninstall_qdisc + do_red_test 10 $BACKLOG - uninstall_qdisc } mc_backlog_test() { install_qdisc + defer uninstall_qdisc + # Note that the backlog value here does not correspond to RED # configuration, but is arbitrary. do_mc_backlog_test 10 $BACKLOG - uninstall_qdisc } red_mirror_test() { install_qdisc qevent early_drop block 10 + defer uninstall_qdisc + do_drop_mirror_test 10 $BACKLOG - uninstall_qdisc } bail_on_lldpad "configure DCB" "configure Qdiscs" diff --git a/tools/testing/selftests/drivers/net/shaper.py b/tools/testing/selftests/drivers/net/shaper.py new file mode 100755 index 000000000000..11310f19bfa0 --- /dev/null +++ b/tools/testing/selftests/drivers/net/shaper.py @@ -0,0 +1,461 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_true, KsftSkipEx +from lib.py import EthtoolFamily, NetshaperFamily +from lib.py import NetDrvEnv +from lib.py import NlError +from lib.py import cmd + +def get_shapers(cfg, nl_shaper) -> None: + try: + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("shapers not supported by the device") + raise + + # Default configuration: no shapers configured. + ksft_eq(len(shapers), 0) + +def get_caps(cfg, nl_shaper) -> None: + try: + caps = nl_shaper.cap_get({'ifindex': cfg.ifindex}, dump=True) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("shapers not supported by the device") + raise + + # Each device implementing shaper support must support some + # features in at least a scope. + ksft_true(len(caps)> 0) + +def set_qshapers(cfg, nl_shaper) -> None: + try: + caps = nl_shaper.cap_get({'ifindex': cfg.ifindex, + 'scope':'queue'}) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("shapers not supported by the device") + raise + if not 'support-bw-max' in caps or not 'support-metric-bps' in caps: + raise KsftSkipEx("device does not support queue scope shapers with bw_max and metric bps") + + cfg.queues = True; + netnl = EthtoolFamily() + channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}}) + if channels['combined-count'] == 0: + cfg.rx_type = 'rx' + cfg.nr_queues = channels['rx-count'] + else: + cfg.rx_type = 'combined' + cfg.nr_queues = channels['combined-count'] + if cfg.nr_queues < 3: + raise KsftSkipEx(f"device does not support enough queues min 3 found {cfg.nr_queues}") + + nl_shaper.set({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 1}, + 'metric': 'bps', + 'bw-max': 10000}) + nl_shaper.set({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 2}, + 'metric': 'bps', + 'bw-max': 20000}) + + # Querying a specific shaper not yet configured must fail. + raised = False + try: + shaper_q0 = nl_shaper.get({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 0}}) + except (NlError): + raised = True + ksft_eq(raised, True) + + shaper_q1 = nl_shaper.get({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 1}}) + ksft_eq(shaper_q1, {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 1}, + 'metric': 'bps', + 'bw-max': 10000}) + + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 1}, + 'metric': 'bps', + 'bw-max': 10000}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 2}, + 'metric': 'bps', + 'bw-max': 20000}]) + +def del_qshapers(cfg, nl_shaper) -> None: + if not cfg.queues: + raise KsftSkipEx("queue shapers not supported by device, skipping delete") + + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 2}}) + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 1}}) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(len(shapers), 0) + +def set_nshapers(cfg, nl_shaper) -> None: + # Check required features. + try: + caps = nl_shaper.cap_get({'ifindex': cfg.ifindex, + 'scope':'netdev'}) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("shapers not supported by the device") + raise + if not 'support-bw-max' in caps or not 'support-metric-bps' in caps: + raise KsftSkipEx("device does not support nested netdev scope shapers with weight") + + cfg.netdev = True; + nl_shaper.set({'ifindex': cfg.ifindex, + 'handle': {'scope': 'netdev', 'id': 0}, + 'bw-max': 100000}) + + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'handle': {'scope': 'netdev'}, + 'metric': 'bps', + 'bw-max': 100000}]) + +def del_nshapers(cfg, nl_shaper) -> None: + if not cfg.netdev: + raise KsftSkipEx("netdev shaper not supported by device, skipping delete") + + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'netdev'}}) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(len(shapers), 0) + +def basic_groups(cfg, nl_shaper) -> None: + if not cfg.netdev: + raise KsftSkipEx("netdev shaper not supported by the device") + if cfg.nr_queues < 3: + raise KsftSkipEx(f"netdev does not have enough queues min 3 reported {cfg.nr_queues}") + + try: + caps = nl_shaper.cap_get({'ifindex': cfg.ifindex, + 'scope':'queue'}) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("shapers not supported by the device") + raise + if not 'support-weight' in caps: + raise KsftSkipEx("device does not support queue scope shapers with weight") + + node_handle = nl_shaper.group({ + 'ifindex': cfg.ifindex, + 'leaves':[{'handle': {'scope': 'queue', 'id': 1}, + 'weight': 1}, + {'handle': {'scope': 'queue', 'id': 2}, + 'weight': 2}], + 'handle': {'scope':'netdev'}, + 'metric': 'bps', + 'bw-max': 10000}) + ksft_eq(node_handle, {'ifindex': cfg.ifindex, + 'handle': {'scope': 'netdev'}}) + + shaper = nl_shaper.get({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 1}}) + ksft_eq(shaper, {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 1}, + 'weight': 1 }) + + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 2}}) + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 1}}) + + # Deleting all the leaves shaper does not affect the node one + # when the latter has 'netdev' scope. + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(len(shapers), 1) + + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'netdev'}}) + +def qgroups(cfg, nl_shaper) -> None: + if cfg.nr_queues < 4: + raise KsftSkipEx(f"netdev does not have enough queues min 4 reported {cfg.nr_queues}") + try: + caps = nl_shaper.cap_get({'ifindex': cfg.ifindex, + 'scope':'node'}) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("shapers not supported by the device") + raise + if not 'support-bw-max' in caps or not 'support-metric-bps' in caps: + raise KsftSkipEx("device does not support node scope shapers with bw_max and metric bps") + try: + caps = nl_shaper.cap_get({'ifindex': cfg.ifindex, + 'scope':'queue'}) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("shapers not supported by the device") + raise + if not 'support-nesting' in caps or not 'support-weight' in caps or not 'support-metric-bps' in caps: + raise KsftSkipEx("device does not support nested queue scope shapers with weight") + + cfg.groups = True; + node_handle = nl_shaper.group({ + 'ifindex': cfg.ifindex, + 'leaves':[{'handle': {'scope': 'queue', 'id': 1}, + 'weight': 3}, + {'handle': {'scope': 'queue', 'id': 2}, + 'weight': 2}], + 'handle': {'scope':'node'}, + 'metric': 'bps', + 'bw-max': 10000}) + node_id = node_handle['handle']['id'] + + shaper = nl_shaper.get({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 1}}) + ksft_eq(shaper, {'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': node_id}, + 'handle': {'scope': 'queue', 'id': 1}, + 'weight': 3}) + shaper = nl_shaper.get({'ifindex': cfg.ifindex, + 'handle': {'scope': 'node', 'id': node_id}}) + ksft_eq(shaper, {'ifindex': cfg.ifindex, + 'handle': {'scope': 'node', 'id': node_id}, + 'parent': {'scope': 'netdev'}, + 'metric': 'bps', + 'bw-max': 10000}) + + # Grouping to a specified, not existing node scope shaper must fail + raised = False + try: + nl_shaper.group({ + 'ifindex': cfg.ifindex, + 'leaves':[{'handle': {'scope': 'queue', 'id': 3}, + 'weight': 3}], + 'handle': {'scope':'node', 'id': node_id + 1}, + 'metric': 'bps', + 'bw-max': 10000}) + + except (NlError): + raised = True + ksft_eq(raised, True) + + # Add to an existing node + node_handle = nl_shaper.group({ + 'ifindex': cfg.ifindex, + 'leaves':[{'handle': {'scope': 'queue', 'id': 3}, + 'weight': 4}], + 'handle': {'scope':'node', 'id': node_id}}) + ksft_eq(node_handle, {'ifindex': cfg.ifindex, + 'handle': {'scope': 'node', 'id': node_id}}) + + shaper = nl_shaper.get({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 3}}) + ksft_eq(shaper, {'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': node_id}, + 'handle': {'scope': 'queue', 'id': 3}, + 'weight': 4}) + + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 2}}) + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 1}}) + + # Deleting a non empty node will move the leaves downstream. + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'node', 'id': node_id}}) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 3}, + 'weight': 4}]) + + # Finish and verify the complete cleanup. + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': 3}}) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(len(shapers), 0) + +def delegation(cfg, nl_shaper) -> None: + if not cfg.groups: + raise KsftSkipEx("device does not support node scope") + try: + caps = nl_shaper.cap_get({'ifindex': cfg.ifindex, + 'scope':'node'}) + except NlError as e: + if e.error == 95: + raise KsftSkipEx("node scope shapers not supported by the device") + raise + if not 'support-nesting' in caps: + raise KsftSkipEx("device does not support node scope shapers nesting") + + node_handle = nl_shaper.group({ + 'ifindex': cfg.ifindex, + 'leaves':[{'handle': {'scope': 'queue', 'id': 1}, + 'weight': 3}, + {'handle': {'scope': 'queue', 'id': 2}, + 'weight': 2}, + {'handle': {'scope': 'queue', 'id': 3}, + 'weight': 1}], + 'handle': {'scope':'node'}, + 'metric': 'bps', + 'bw-max': 10000}) + node_id = node_handle['handle']['id'] + + # Create the nested node and validate the hierarchy + nested_node_handle = nl_shaper.group({ + 'ifindex': cfg.ifindex, + 'leaves':[{'handle': {'scope': 'queue', 'id': 1}, + 'weight': 3}, + {'handle': {'scope': 'queue', 'id': 2}, + 'weight': 2}], + 'handle': {'scope':'node'}, + 'metric': 'bps', + 'bw-max': 5000}) + nested_node_id = nested_node_handle['handle']['id'] + ksft_true(nested_node_id != node_id) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': nested_node_id}, + 'handle': {'scope': 'queue', 'id': 1}, + 'weight': 3}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': nested_node_id}, + 'handle': {'scope': 'queue', 'id': 2}, + 'weight': 2}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': node_id}, + 'handle': {'scope': 'queue', 'id': 3}, + 'weight': 1}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'node', 'id': node_id}, + 'metric': 'bps', + 'bw-max': 10000}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': node_id}, + 'handle': {'scope': 'node', 'id': nested_node_id}, + 'metric': 'bps', + 'bw-max': 5000}]) + + # Deleting a non empty node will move the leaves downstream. + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'node', 'id': nested_node_id}}) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': node_id}, + 'handle': {'scope': 'queue', 'id': 1}, + 'weight': 3}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': node_id}, + 'handle': {'scope': 'queue', 'id': 2}, + 'weight': 2}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'node', 'id': node_id}, + 'handle': {'scope': 'queue', 'id': 3}, + 'weight': 1}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'node', 'id': node_id}, + 'metric': 'bps', + 'bw-max': 10000}]) + + # Final cleanup. + for i in range(1, 4): + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': i}}) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(len(shapers), 0) + +def queue_update(cfg, nl_shaper) -> None: + if cfg.nr_queues < 4: + raise KsftSkipEx(f"netdev does not have enough queues min 4 reported {cfg.nr_queues}") + if not cfg.queues: + raise KsftSkipEx("device does not support queue scope") + + for i in range(3): + nl_shaper.set({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': i}, + 'metric': 'bps', + 'bw-max': (i + 1) * 1000}) + # Delete a channel, with no shapers configured on top of the related + # queue: no changes expected + cmd(f"ethtool -L {cfg.dev['ifname']} {cfg.rx_type} 3", timeout=10) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 0}, + 'metric': 'bps', + 'bw-max': 1000}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 1}, + 'metric': 'bps', + 'bw-max': 2000}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 2}, + 'metric': 'bps', + 'bw-max': 3000}]) + + # Delete a channel, with a shaper configured on top of the related + # queue: the shaper must be deleted, too + cmd(f"ethtool -L {cfg.dev['ifname']} {cfg.rx_type} 2", timeout=10) + + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 0}, + 'metric': 'bps', + 'bw-max': 1000}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 1}, + 'metric': 'bps', + 'bw-max': 2000}]) + + # Restore the original channels number, no expected changes + cmd(f"ethtool -L {cfg.dev['ifname']} {cfg.rx_type} {cfg.nr_queues}", timeout=10) + shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True) + ksft_eq(shapers, [{'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 0}, + 'metric': 'bps', + 'bw-max': 1000}, + {'ifindex': cfg.ifindex, + 'parent': {'scope': 'netdev'}, + 'handle': {'scope': 'queue', 'id': 1}, + 'metric': 'bps', + 'bw-max': 2000}]) + + # Final cleanup. + for i in range(0, 2): + nl_shaper.delete({'ifindex': cfg.ifindex, + 'handle': {'scope': 'queue', 'id': i}}) + +def main() -> None: + with NetDrvEnv(__file__, queue_count=4) as cfg: + cfg.queues = False + cfg.netdev = False + cfg.groups = False + cfg.nr_queues = 0 + ksft_run([get_shapers, + get_caps, + set_qshapers, + del_qshapers, + set_nshapers, + del_nshapers, + basic_groups, + qgroups, + delegation, + queue_update], args=(cfg, NetshaperFamily())) + ksft_exit() + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 649f1fe0dc46..26a4883a65c9 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -98,7 +98,6 @@ TEST_PROGS += vlan_hw_filter.sh TEST_PROGS += bpf_offload.py # YNL files, must be before "include ..lib.mk" -EXTRA_CLEAN += $(OUTPUT)/libynl.a YNL_GEN_FILES := ncdevmem TEST_GEN_FILES += $(YNL_GEN_FILES) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index c992e385159c..89c25f72b10c 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -1403,10 +1403,17 @@ tests_run() local current_test for current_test in ${TESTS:-$ALL_TESTS}; do - $current_test + in_defer_scope \ + $current_test done } +cleanup() +{ + pre_cleanup + defer_scopes_cleanup +} + multipath_eval() { local desc="$1" @@ -1761,8 +1768,10 @@ start_tcp_traffic() stop_traffic() { + local pid=${1-%%}; shift + # Suppress noise from killing mausezahn. - { kill %% && wait %%; } 2>/dev/null + { kill $pid && wait $pid; } 2>/dev/null } declare -A cappid diff --git a/tools/testing/selftests/net/forwarding/sch_ets.sh b/tools/testing/selftests/net/forwarding/sch_ets.sh index e60c8b4818cc..1f6f53e284b5 100755 --- a/tools/testing/selftests/net/forwarding/sch_ets.sh +++ b/tools/testing/selftests/net/forwarding/sch_ets.sh @@ -24,15 +24,10 @@ switch_create() # Create a bottleneck so that the DWRR process can kick in. tc qdisc add dev $swp2 root handle 1: tbf \ rate 1Gbit burst 1Mbit latency 100ms + defer tc qdisc del dev $swp2 root PARENT="parent 1:" } -switch_destroy() -{ - ets_switch_destroy - tc qdisc del dev $swp2 root -} - # Callback from sch_ets_tests.sh collect_stats() { diff --git a/tools/testing/selftests/net/forwarding/sch_ets_core.sh b/tools/testing/selftests/net/forwarding/sch_ets_core.sh index f906fcc66572..8f9922c695b0 100644 --- a/tools/testing/selftests/net/forwarding/sch_ets_core.sh +++ b/tools/testing/selftests/net/forwarding/sch_ets_core.sh @@ -166,44 +166,32 @@ h1_create() local i; simple_if_init $h1 + defer simple_if_fini $h1 + mtu_set $h1 9900 + defer mtu_restore $h1 + for i in {0..2}; do vlan_create $h1 1$i v$h1 $(sip $i)/28 + defer vlan_destroy $h1 1$i ip link set dev $h1.1$i type vlan egress 0:$i done } -h1_destroy() -{ - local i - - for i in {0..2}; do - vlan_destroy $h1 1$i - done - mtu_restore $h1 - simple_if_fini $h1 -} - h2_create() { local i simple_if_init $h2 - mtu_set $h2 9900 - for i in {0..2}; do - vlan_create $h2 1$i v$h2 $(dip $i)/28 - done -} + defer simple_if_fini $h2 -h2_destroy() -{ - local i + mtu_set $h2 9900 + defer mtu_restore $h2 for i in {0..2}; do - vlan_destroy $h2 1$i + vlan_create $h2 1$i v$h2 $(dip $i)/28 + defer vlan_destroy $h2 1$i done - mtu_restore $h2 - simple_if_fini $h2 } ets_switch_create() @@ -211,44 +199,45 @@ ets_switch_create() local i ip link set dev $swp1 up + defer ip link set dev $swp1 down + mtu_set $swp1 9900 + defer mtu_restore $swp1 ip link set dev $swp2 up + defer ip link set dev $swp2 down + mtu_set $swp2 9900 + defer mtu_restore $swp2 for i in {0..2}; do vlan_create $swp1 1$i + defer vlan_destroy $swp1 1$i ip link set dev $swp1.1$i type vlan ingress 0:0 1:1 2:2 vlan_create $swp2 1$i + defer vlan_destroy $swp2 1$i ip link add dev br1$i type bridge + defer ip link del dev br1$i + ip link set dev $swp1.1$i master br1$i + defer ip link set dev $swp1.1$i nomaster + ip link set dev $swp2.1$i master br1$i + defer ip link set dev $swp2.1$i nomaster ip link set dev br1$i up - ip link set dev $swp1.1$i up - ip link set dev $swp2.1$i up - done -} + defer ip link set dev br1$i down -ets_switch_destroy() -{ - local i - - ets_delete_qdisc + ip link set dev $swp1.1$i up + defer ip link set dev $swp1.1$i down - for i in {0..2}; do - ip link del dev br1$i - vlan_destroy $swp2 1$i - vlan_destroy $swp1 1$i + ip link set dev $swp2.1$i up + defer ip link set dev $swp2.1$i down done - mtu_restore $swp2 - ip link set dev $swp2 down - - mtu_restore $swp1 - ip link set dev $swp1 down + defer ets_delete_qdisc } setup_prepare() @@ -263,23 +252,13 @@ setup_prepare() hut=$h2 vrf_prepare + defer vrf_cleanup h1_create h2_create switch_create } -cleanup() -{ - pre_cleanup - - switch_destroy - h2_destroy - h1_destroy - - vrf_cleanup -} - ping_ipv4() { ping_test $h1.10 $(dip 0) " vlan 10" diff --git a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh index f9d26a7911bb..08240d3e3c87 100644 --- a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh +++ b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh @@ -90,6 +90,7 @@ __ets_dwrr_test() for stream in ${streams[@]}; do ets_start_traffic $stream + defer stop_traffic $! done sleep 10 @@ -120,25 +121,24 @@ __ets_dwrr_test() ${d[0]} ${d[$i]} fi done - - for stream in ${streams[@]}; do - stop_traffic - done } ets_dwrr_test_012() { - __ets_dwrr_test 0 1 2 + in_defer_scope \ + __ets_dwrr_test 0 1 2 } ets_dwrr_test_01() { - __ets_dwrr_test 0 1 + in_defer_scope \ + __ets_dwrr_test 0 1 } ets_dwrr_test_12() { - __ets_dwrr_test 1 2 + in_defer_scope \ + __ets_dwrr_test 1 2 } ets_qdisc_setup() diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh index 17f28644568e..af166662b78a 100755 --- a/tools/testing/selftests/net/forwarding/sch_red.sh +++ b/tools/testing/selftests/net/forwarding/sch_red.sh @@ -53,71 +53,63 @@ PKTSZ=1400 h1_create() { simple_if_init $h1 192.0.2.1/28 + defer simple_if_fini $h1 192.0.2.1/28 + mtu_set $h1 10000 + defer mtu_restore $h1 + tc qdisc replace dev $h1 root handle 1: tbf \ rate 10Mbit burst 10K limit 1M -} - -h1_destroy() -{ - tc qdisc del dev $h1 root - mtu_restore $h1 - simple_if_fini $h1 192.0.2.1/28 + defer tc qdisc del dev $h1 root } h2_create() { simple_if_init $h2 192.0.2.2/28 - mtu_set $h2 10000 -} + defer simple_if_fini $h2 192.0.2.2/28 -h2_destroy() -{ - mtu_restore $h2 - simple_if_fini $h2 192.0.2.2/28 + mtu_set $h2 10000 + defer mtu_restore $h2 } h3_create() { simple_if_init $h3 192.0.2.3/28 - mtu_set $h3 10000 -} + defer simple_if_fini $h3 192.0.2.3/28 -h3_destroy() -{ - mtu_restore $h3 - simple_if_fini $h3 192.0.2.3/28 + mtu_set $h3 10000 + defer mtu_restore $h3 } switch_create() { ip link add dev br up type bridge + defer ip link del dev br + ip link set dev $swp1 up master br + defer ip link set dev $swp1 down nomaster + ip link set dev $swp2 up master br + defer ip link set dev $swp2 down nomaster + ip link set dev $swp3 up master br + defer ip link set dev $swp3 down nomaster mtu_set $swp1 10000 + defer mtu_restore $h1 + mtu_set $swp2 10000 + defer mtu_restore $h2 + mtu_set $swp3 10000 + defer mtu_restore $h3 tc qdisc replace dev $swp3 root handle 1: tbf \ rate 10Mbit burst 10K limit 1M - ip link add name _drop_test up type dummy -} + defer tc qdisc del dev $swp3 root -switch_destroy() -{ - ip link del dev _drop_test - tc qdisc del dev $swp3 root - - mtu_restore $h3 - mtu_restore $h2 - mtu_restore $h1 - - ip link set dev $swp3 down nomaster - ip link set dev $swp2 down nomaster - ip link set dev $swp1 down nomaster - ip link del dev br + ip link add name _drop_test up type dummy + defer ip link del dev _drop_test } setup_prepare() @@ -134,6 +126,7 @@ setup_prepare() h3_mac=$(mac_get $h3) vrf_prepare + defer vrf_cleanup h1_create h2_create @@ -141,18 +134,6 @@ setup_prepare() switch_create } -cleanup() -{ - pre_cleanup - - switch_destroy - h3_destroy - h2_destroy - h1_destroy - - vrf_cleanup -} - ping_ipv4() { ping_test $h1 192.0.2.3 " from host 1" @@ -287,6 +268,7 @@ do_ecn_test() $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ -a own -b $h3_mac -t tcp -q tos=0x01 & + defer stop_traffic $! sleep 1 ecn_test_common "$name" $limit @@ -298,9 +280,6 @@ do_ecn_test() build_backlog $((2 * limit)) udp >/dev/null check_fail $? "UDP traffic went into backlog instead of being early-dropped" log_test "$name backlog > limit: UDP early-dropped" - - stop_traffic - sleep 1 } do_ecn_nodrop_test() @@ -310,6 +289,7 @@ do_ecn_nodrop_test() $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ -a own -b $h3_mac -t tcp -q tos=0x01 & + defer stop_traffic $! sleep 1 ecn_test_common "$name" $limit @@ -321,9 +301,6 @@ do_ecn_nodrop_test() build_backlog $((2 * limit)) udp >/dev/null check_err $? "UDP traffic was early-dropped instead of getting into backlog" log_test "$name backlog > limit: UDP not dropped" - - stop_traffic - sleep 1 } do_red_test() @@ -336,6 +313,7 @@ do_red_test() # is above limit. $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ -a own -b $h3_mac -t tcp -q tos=0x01 & + defer stop_traffic $! # Pushing below the queue limit should work. RET=0 @@ -352,9 +330,6 @@ do_red_test() pct=$(check_marking "== 0") check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." log_test "RED backlog > limit" - - stop_traffic - sleep 1 } do_red_qevent_test() @@ -369,6 +344,7 @@ do_red_qevent_test() $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ -a own -b $h3_mac -t udp -q & + defer stop_traffic $! sleep 1 tc filter add block 10 pref 1234 handle 102 matchall skip_hw \ @@ -396,9 +372,6 @@ do_red_qevent_test() check_err $? "Dropped packets still observed: 0 expected, $((now - base)) seen" log_test "RED early_dropped packets mirrored" - - stop_traffic - sleep 1 } do_ecn_qevent_test() @@ -410,6 +383,7 @@ do_ecn_qevent_test() $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ -a own -b $h3_mac -t tcp -q tos=0x01 & + defer stop_traffic $! sleep 1 tc filter add block 10 pref 1234 handle 102 matchall skip_hw \ @@ -428,9 +402,6 @@ do_ecn_qevent_test() tc filter del block 10 pref 1234 handle 102 matchall log_test "ECN marked packets mirrored" - - stop_traffic - sleep 1 } install_qdisc() @@ -451,36 +422,36 @@ uninstall_qdisc() ecn_test() { install_qdisc ecn + defer uninstall_qdisc xfail_on_slow do_ecn_test $BACKLOG - uninstall_qdisc } ecn_nodrop_test() { install_qdisc ecn nodrop + defer uninstall_qdisc xfail_on_slow do_ecn_nodrop_test $BACKLOG - uninstall_qdisc } red_test() { install_qdisc + defer uninstall_qdisc xfail_on_slow do_red_test $BACKLOG - uninstall_qdisc } red_qevent_test() { install_qdisc qevent early_drop block 10 + defer uninstall_qdisc xfail_on_slow do_red_qevent_test $BACKLOG - uninstall_qdisc } ecn_qevent_test() { install_qdisc ecn qevent mark block 10 + defer uninstall_qdisc xfail_on_slow do_ecn_qevent_test $BACKLOG - uninstall_qdisc } trap cleanup EXIT diff --git a/tools/testing/selftests/net/forwarding/sch_tbf_core.sh b/tools/testing/selftests/net/forwarding/sch_tbf_core.sh index 9cd884d4a5de..ec309a5086bc 100644 --- a/tools/testing/selftests/net/forwarding/sch_tbf_core.sh +++ b/tools/testing/selftests/net/forwarding/sch_tbf_core.sh @@ -60,68 +60,65 @@ host_create() local host=$1; shift simple_if_init $dev + defer simple_if_fini $dev + mtu_set $dev 10000 + defer mtu_restore $dev vlan_create $dev 10 v$dev $(ipaddr $host 10)/28 + defer vlan_destroy $dev 10 ip link set dev $dev.10 type vlan egress 0:0 vlan_create $dev 11 v$dev $(ipaddr $host 11)/28 + defer vlan_destroy $dev 11 ip link set dev $dev.11 type vlan egress 0:1 } -host_destroy() -{ - local dev=$1; shift - - vlan_destroy $dev 11 - vlan_destroy $dev 10 - mtu_restore $dev - simple_if_fini $dev -} - h1_create() { host_create $h1 1 } -h1_destroy() -{ - host_destroy $h1 -} - h2_create() { host_create $h2 2 tc qdisc add dev $h2 clsact + defer tc qdisc del dev $h2 clsact + tc filter add dev $h2 ingress pref 1010 prot 802.1q \ flower $TCFLAGS vlan_id 10 action pass tc filter add dev $h2 ingress pref 1011 prot 802.1q \ flower $TCFLAGS vlan_id 11 action pass } -h2_destroy() -{ - tc qdisc del dev $h2 clsact - host_destroy $h2 -} - switch_create() { local intf local vlan ip link add dev br10 type bridge + defer ip link del dev br10 + ip link add dev br11 type bridge + defer ip link del dev br11 for intf in $swp1 $swp2; do ip link set dev $intf up + defer ip link set dev $intf down + mtu_set $intf 10000 + defer mtu_restore $intf for vlan in 10 11; do vlan_create $intf $vlan + defer vlan_destroy $intf $vlan + ip link set dev $intf.$vlan master br$vlan + defer ip link set dev $intf.$vlan nomaster + ip link set dev $intf.$vlan up + defer ip link set dev $intf.$vlan down done done @@ -130,34 +127,10 @@ switch_create() done ip link set dev br10 up - ip link set dev br11 up -} - -switch_destroy() -{ - local intf - local vlan - - # A test may have been interrupted mid-run, with Qdisc installed. Delete - # it here. - tc qdisc del dev $swp2 root 2>/dev/null - - ip link set dev br11 down - ip link set dev br10 down + defer ip link set dev br10 down - for intf in $swp2 $swp1; do - for vlan in 11 10; do - ip link set dev $intf.$vlan down - ip link set dev $intf.$vlan nomaster - vlan_destroy $intf $vlan - done - - mtu_restore $intf - ip link set dev $intf down - done - - ip link del dev br11 - ip link del dev br10 + ip link set dev br11 up + defer ip link set dev br11 down } setup_prepare() @@ -177,23 +150,13 @@ setup_prepare() h2_mac=$(mac_get $h2) vrf_prepare + defer vrf_cleanup h1_create h2_create switch_create } -cleanup() -{ - pre_cleanup - - switch_destroy - h2_destroy - h1_destroy - - vrf_cleanup -} - ping_ipv4() { ping_test $h1.10 $(ipaddr 2 10) " vlan 10" @@ -207,18 +170,18 @@ tbf_get_counter() tc_rule_stats_get $h2 10$vlan ingress .bytes } -do_tbf_test() +__tbf_test() { local vlan=$1; shift local mbit=$1; shift start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 2 $vlan) $h2_mac + defer stop_traffic $! sleep 5 # Wait for the burst to dwindle local t2=$(busywait_for_counter 1000 +1 tbf_get_counter $vlan) sleep 10 local t3=$(tbf_get_counter $vlan) - stop_traffic RET=0 @@ -231,3 +194,9 @@ do_tbf_test() log_test "TC $((vlan - 10)): TBF rate ${mbit}Mbit" } + +do_tbf_test() +{ + in_defer_scope \ + __tbf_test "$@" +} diff --git a/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh b/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh index df9bcd6a811a..c182a04282bc 100644 --- a/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh +++ b/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh @@ -30,8 +30,9 @@ tbf_test() # This test is used for both ETS and PRIO. Even though we only need two # bands, PRIO demands a minimum of three. tc qdisc add dev $swp2 root handle 10: $QDISC 3 priomap 2 1 0 + defer tc qdisc del dev $swp2 root + tbf_test_one 128K - tc qdisc del dev $swp2 root } tbf_root_test() @@ -42,6 +43,8 @@ tbf_root_test() tc qdisc replace dev $swp2 root handle 1: \ tbf rate 400Mbit burst $bs limit 1M + defer tc qdisc del dev $swp2 root + tc qdisc replace dev $swp2 parent 1:1 handle 10: \ $QDISC 3 priomap 2 1 0 tc qdisc replace dev $swp2 parent 10:3 handle 103: \ @@ -53,8 +56,6 @@ tbf_root_test() do_tbf_test 10 400 $bs do_tbf_test 11 400 $bs - - tc qdisc del dev $swp2 root } if type -t sch_tbf_pre_hook >/dev/null; then diff --git a/tools/testing/selftests/net/forwarding/sch_tbf_root.sh b/tools/testing/selftests/net/forwarding/sch_tbf_root.sh index 96c997be0d03..9f20320f8d84 100755 --- a/tools/testing/selftests/net/forwarding/sch_tbf_root.sh +++ b/tools/testing/selftests/net/forwarding/sch_tbf_root.sh @@ -14,13 +14,14 @@ tbf_test_one() tc qdisc replace dev $swp2 root handle 108: tbf \ rate 400Mbit burst $bs limit 1M + defer tc qdisc del dev $swp2 root + do_tbf_test 10 400 $bs } tbf_test() { tbf_test_one 128K - tc qdisc del dev $swp2 root } if type -t sch_tbf_pre_hook >/dev/null; then diff --git a/tools/testing/selftests/net/ioam6.sh b/tools/testing/selftests/net/ioam6.sh index 12491850ae98..845c26dd01a9 100755 --- a/tools/testing/selftests/net/ioam6.sh +++ b/tools/testing/selftests/net/ioam6.sh @@ -3,119 +3,106 @@ # # Author: Justin Iurman <justin.iurman@uliege.be> # -# This script evaluates the IOAM insertion for IPv6 by checking the IOAM data -# consistency directly inside packets on the receiver side. Tests are divided -# into three categories: OUTPUT (evaluates the IOAM processing by the sender), -# INPUT (evaluates the IOAM processing by a receiver) and GLOBAL (evaluates -# wider use cases that do not fall into the other two categories). Both OUTPUT -# and INPUT tests only use a two-node topology (alpha and beta), while GLOBAL -# tests use the entire three-node topology (alpha, beta, gamma). Each test is -# documented inside its own handler in the code below. +# This script evaluates IOAM for IPv6 by checking local IOAM configurations and +# IOAM data inside packets. There are three categories of tests: LOCAL, OUTPUT, +# and INPUT. The former (LOCAL) checks all IOAM related configurations locally +# without sending packets. OUTPUT tests verify the processing of an IOAM +# encapsulating node, while INPUT tests verify the processing of an IOAM transit +# node. Both OUTPUT and INPUT tests send packets. Each test is documented inside +# its own handler. # -# An IOAM domain is configured from Alpha to Gamma but not on the reverse path. -# When either Beta or Gamma is the destination (depending on the test category), -# Alpha adds an IOAM option (Pre-allocated Trace) inside a Hop-by-hop. +# The topology used for OUTPUT and INPUT tests is made of three nodes: +# - Alpha (the IOAM encapsulating node) +# - Beta (the IOAM transit node) +# - Gamma (the receiver) ** # +# An IOAM domain is configured from Alpha to Beta, but not on the reverse path. +# Alpha adds an IOAM option (Pre-allocated Trace) inside a Hop-by-hop. # -# +-------------------+ +-------------------+ -# | | | | -# | Alpha netns | | Gamma netns | -# | | | | -# | +-------------+ | | +-------------+ | -# | | veth0 | | | | veth0 | | -# | | db01::2/64 | | | | db02::2/64 | | -# | +-------------+ | | +-------------+ | -# | . | | . | -# +-------------------+ +-------------------+ -# . . -# . . -# . . -# +----------------------------------------------------+ -# | . . | -# | +-------------+ +-------------+ | -# | | veth0 | | veth1 | | -# | | db01::1/64 | ................ | db02::1/64 | | -# | +-------------+ +-------------+ | -# | | -# | Beta netns | -# | | -# +----------------------------------------------------+ +# ** Gamma is required because ioam6_parser.c uses a packet socket and we need +# to see IOAM data inserted by the very last node (Beta), which would happen +# _after_ we get a copy of the packet on Beta. Note that using an +# IPv6 raw socket with IPV6_RECVHOPOPTS on Beta would not be enough: we also +# need to access the IPv6 header to check some fields (e.g., source and +# destination addresses), which is not possible in that case. As a +# consequence, we need Gamma as a receiver to run ioam6_parser.c which uses a +# packet socket. # # +# +-----------------------+ +-----------------------+ +# | | | | +# | Alpha netns | | Gamma netns | +# | | | | +# | +-------------------+ | | +-------------------+ | +# | | veth0 | | | | veth0 | | +# | | 2001:db8:1::2/64 | | | | 2001:db8:2::2/64 | | +# | +-------------------+ | | +-------------------+ | +# | . | | . | +# +-----------.-----------+ +-----------.-----------+ +# . . +# . . +# . . +# +-----------.----------------------------------.-----------+ +# | . . | +# | +-------------------+ +-------------------+ | +# | | veth0 | | veth1 | | +# | | 2001:db8:1::1/64 | ............ | 2001:db8:2::1/64 | | +# | +-------------------+ +-------------------+ | +# | | +# | Beta netns | +# | | +# +----------------------------------------------------------+ # -# ============================================================= -# | Alpha - IOAM configuration | -# +===========================================================+ -# | Node ID | 1 | -# +-----------------------------------------------------------+ -# | Node Wide ID | 11111111 | -# +-----------------------------------------------------------+ -# | Ingress ID | 0xffff (default value) | -# +-----------------------------------------------------------+ -# | Ingress Wide ID | 0xffffffff (default value) | -# +-----------------------------------------------------------+ -# | Egress ID | 101 | -# +-----------------------------------------------------------+ -# | Egress Wide ID | 101101 | -# +-----------------------------------------------------------+ -# | Namespace Data | 0xdeadbee0 | -# +-----------------------------------------------------------+ -# | Namespace Wide Data | 0xcafec0caf00dc0de | -# +-----------------------------------------------------------+ -# | Schema ID | 777 | -# +-----------------------------------------------------------+ -# | Schema Data | something that will be 4n-aligned | -# +-----------------------------------------------------------+ # # -# ============================================================= -# | Beta - IOAM configuration | -# +===========================================================+ -# | Node ID | 2 | -# +-----------------------------------------------------------+ -# | Node Wide ID | 22222222 | -# +-----------------------------------------------------------+ -# | Ingress ID | 201 | -# +-----------------------------------------------------------+ -# | Ingress Wide ID | 201201 | -# +-----------------------------------------------------------+ -# | Egress ID | 202 | -# +-----------------------------------------------------------+ -# | Egress Wide ID | 202202 | -# +-----------------------------------------------------------+ -# | Namespace Data | 0xdeadbee1 | -# +-----------------------------------------------------------+ -# | Namespace Wide Data | 0xcafec0caf11dc0de | -# +-----------------------------------------------------------+ -# | Schema ID | 666 | -# +-----------------------------------------------------------+ -# | Schema Data | Hello there -Obi | -# +-----------------------------------------------------------+ +# +==========================================================+ +# | Alpha - IOAM configuration | +# +=====================+====================================+ +# | Node ID | 1 | +# +---------------------+------------------------------------+ +# | Node Wide ID | 11111111 | +# +---------------------+------------------------------------+ +# | Ingress ID | 0xffff (default value) | +# +---------------------+------------------------------------+ +# | Ingress Wide ID | 0xffffffff (default value) | +# +---------------------+------------------------------------+ +# | Egress ID | 101 | +# +---------------------+------------------------------------+ +# | Egress Wide ID | 101101 | +# +---------------------+------------------------------------+ +# | Namespace Data | 0xdeadbeef | +# +---------------------+------------------------------------+ +# | Namespace Wide Data | 0xcafec0caf00dc0de | +# +---------------------+------------------------------------+ +# | Schema ID | 777 | +# +---------------------+------------------------------------+ +# | Schema Data | something that will be 4n-aligned | +# +---------------------+------------------------------------+ # # -# ============================================================= -# | Gamma - IOAM configuration | -# +===========================================================+ -# | Node ID | 3 | -# +-----------------------------------------------------------+ -# | Node Wide ID | 33333333 | -# +-----------------------------------------------------------+ -# | Ingress ID | 301 | -# +-----------------------------------------------------------+ -# | Ingress Wide ID | 301301 | -# +-----------------------------------------------------------+ -# | Egress ID | 0xffff (default value) | -# +-----------------------------------------------------------+ -# | Egress Wide ID | 0xffffffff (default value) | -# +-----------------------------------------------------------+ -# | Namespace Data | 0xdeadbee2 | -# +-----------------------------------------------------------+ -# | Namespace Wide Data | 0xcafec0caf22dc0de | -# +-----------------------------------------------------------+ -# | Schema ID | 0xffffff (= None) | -# +-----------------------------------------------------------+ -# | Schema Data | | -# +-----------------------------------------------------------+ +# +==========================================================+ +# | Beta - IOAM configuration | +# +=====================+====================================+ +# | Node ID | 2 | +# +---------------------+------------------------------------+ +# | Node Wide ID | 22222222 | +# +---------------------+------------------------------------+ +# | Ingress ID | 201 | +# +---------------------+------------------------------------+ +# | Ingress Wide ID | 201201 | +# +---------------------+------------------------------------+ +# | Egress ID | 202 | +# +---------------------+------------------------------------+ +# | Egress Wide ID | 202202 | +# +---------------------+------------------------------------+ +# | Namespace Data | 0xffffffff (default value) | +# +---------------------+------------------------------------+ +# | Namespace Wide Data | 0xffffffffffffffff (default value) | +# +---------------------+------------------------------------+ +# | Schema ID | 0xffffff (= None) | +# +---------------------+------------------------------------+ +# | Schema Data | | +# +---------------------+------------------------------------+ source lib.sh @@ -128,64 +115,69 @@ source lib.sh ################################################################################ ALPHA=( - 1 # ID - 11111111 # Wide ID - 0xffff # Ingress ID - 0xffffffff # Ingress Wide ID - 101 # Egress ID - 101101 # Egress Wide ID - 0xdeadbee0 # Namespace Data - 0xcafec0caf00dc0de # Namespace Wide Data - 777 # Schema ID (0xffffff = None) - "something that will be 4n-aligned" # Schema Data + 1 # ID + 11111111 # Wide ID + 0xffff # Ingress ID (default value) + 0xffffffff # Ingress Wide ID (default value) + 101 # Egress ID + 101101 # Egress Wide ID + 0xdeadbeef # Namespace Data + 0xcafec0caf00dc0de # Namespace Wide Data + 777 # Schema ID + "something that will be 4n-aligned" # Schema Data ) BETA=( - 2 - 22222222 - 201 - 201201 - 202 - 202202 - 0xdeadbee1 - 0xcafec0caf11dc0de - 666 - "Hello there -Obi" + 2 # ID + 22222222 # Wide ID + 201 # Ingress ID + 201201 # Ingress Wide ID + 202 # Egress ID + 202202 # Egress Wide ID + 0xffffffff # Namespace Data (empty value) + 0xffffffffffffffff # Namespace Wide Data (empty value) + 0xffffff # Schema ID (empty value) + "" # Schema Data (empty value) ) -GAMMA=( - 3 - 33333333 - 301 - 301301 - 0xffff - 0xffffffff - 0xdeadbee2 - 0xcafec0caf22dc0de - 0xffffff - "" -) +TESTS_LOCAL=" + local_sysctl_ioam_id + local_sysctl_ioam_id_wide + local_sysctl_ioam_intf_id + local_sysctl_ioam_intf_id_wide + local_sysctl_ioam_intf_enabled + local_ioam_namespace + local_ioam_schema + local_ioam_schema_namespace + local_route_ns + local_route_tunsrc + local_route_tundst + local_route_trace_type + local_route_trace_size + local_route_trace_type_bits + local_route_trace_size_values +" TESTS_OUTPUT=" - out_undef_ns - out_no_room - out_bits - out_full_supp_trace + output_undef_ns + output_no_room + output_no_room_oss + output_bits + output_sizes + output_full_supp_trace " TESTS_INPUT=" - in_undef_ns - in_no_room - in_oflag - in_bits - in_full_supp_trace + input_undef_ns + input_no_room + input_no_room_oss + input_disabled + input_oflag + input_bits + input_sizes + input_full_supp_trace " -TESTS_GLOBAL=" - fwd_full_supp_trace -" - - ################################################################################ # # # LIBRARY # @@ -194,66 +186,64 @@ TESTS_GLOBAL=" check_kernel_compatibility() { - setup_ns ioam_tmp_node - ip link add name veth0 netns $ioam_tmp_node type veth \ - peer name veth1 netns $ioam_tmp_node + setup_ns ioam_tmp_node &>/dev/null + local ret=$? - ip -netns $ioam_tmp_node link set veth0 up - ip -netns $ioam_tmp_node link set veth1 up + ip link add name veth0 netns $ioam_tmp_node type veth \ + peer name veth1 netns $ioam_tmp_node &>/dev/null + ret=$((ret + $?)) - ip -netns $ioam_tmp_node ioam namespace add 0 - ns_ad=$? + ip -netns $ioam_tmp_node link set veth0 up &>/dev/null + ret=$((ret + $?)) - ip -netns $ioam_tmp_node ioam namespace show | grep -q "namespace 0" - ns_sh=$? + ip -netns $ioam_tmp_node link set veth1 up &>/dev/null + ret=$((ret + $?)) - if [[ $ns_ad != 0 || $ns_sh != 0 ]] + if [ $ret != 0 ] then - echo "SKIP: kernel version probably too old, missing ioam support" - ip link del veth0 2>/dev/null || true - cleanup_ns $ioam_tmp_node || true + echo "SKIP: Setup failed." + cleanup_ns $ioam_tmp_node exit $ksft_skip fi - ip -netns $ioam_tmp_node route add db02::/64 encap ioam6 mode inline \ - trace prealloc type 0x800000 ns 0 size 4 dev veth0 - tr_ad=$? + ip -netns $ioam_tmp_node route add 2001:db8:2::/64 \ + encap ioam6 trace prealloc type 0x800000 ns 0 size 4 dev veth0 &>/dev/null + ret=$? - ip -netns $ioam_tmp_node -6 route | grep -q "encap ioam6" - tr_sh=$? + ip -netns $ioam_tmp_node -6 route 2>/dev/null | grep -q "encap ioam6" + ret=$((ret + $?)) - if [[ $tr_ad != 0 || $tr_sh != 0 ]] + if [ $ret != 0 ] then - echo "SKIP: cannot attach an ioam trace to a route, did you compile" \ - "without CONFIG_IPV6_IOAM6_LWTUNNEL?" - ip link del veth0 2>/dev/null || true - cleanup_ns $ioam_tmp_node || true + echo "SKIP: Cannot attach an IOAM trace to a route. Was your kernel" \ + "compiled without CONFIG_IPV6_IOAM6_LWTUNNEL? Are you running an" \ + "old kernel? Are you using an old version of iproute2?" + cleanup_ns $ioam_tmp_node exit $ksft_skip fi - ip link del veth0 2>/dev/null || true - cleanup_ns $ioam_tmp_node || true + cleanup_ns $ioam_tmp_node - lsmod | grep -q "ip6_tunnel" + lsmod 2>/dev/null | grep -q "ip6_tunnel" ip6tnl_loaded=$? - if [ $ip6tnl_loaded = 0 ] + if [ $ip6tnl_loaded == 0 ] then encap_tests=0 else modprobe ip6_tunnel &>/dev/null - lsmod | grep -q "ip6_tunnel" + lsmod 2>/dev/null | grep -q "ip6_tunnel" encap_tests=$? if [ $encap_tests != 0 ] then - ip a | grep -q "ip6tnl0" + ip a 2>/dev/null | grep -q "ip6tnl0" encap_tests=$? if [ $encap_tests != 0 ] then echo "Note: ip6_tunnel not found neither as a module nor inside the" \ - "kernel, tests that require it (encap mode) will be omitted" + "kernel. Any tests that require it will be skipped." fi fi fi @@ -261,477 +251,1400 @@ check_kernel_compatibility() cleanup() { - ip link del ioam-veth-alpha 2>/dev/null || true - ip link del ioam-veth-gamma 2>/dev/null || true - - cleanup_ns $ioam_node_alpha $ioam_node_beta $ioam_node_gamma || true + cleanup_ns $ioam_node_alpha $ioam_node_beta $ioam_node_gamma if [ $ip6tnl_loaded != 0 ] then - modprobe -r ip6_tunnel 2>/dev/null || true + modprobe -r ip6_tunnel &>/dev/null fi } setup() { - setup_ns ioam_node_alpha ioam_node_beta ioam_node_gamma + setup_ns ioam_node_alpha ioam_node_beta ioam_node_gamma &>/dev/null ip link add name ioam-veth-alpha netns $ioam_node_alpha type veth \ - peer name ioam-veth-betaL netns $ioam_node_beta + peer name ioam-veth-betaL netns $ioam_node_beta &>/dev/null ip link add name ioam-veth-betaR netns $ioam_node_beta type veth \ - peer name ioam-veth-gamma netns $ioam_node_gamma - - ip -netns $ioam_node_alpha link set ioam-veth-alpha name veth0 - ip -netns $ioam_node_beta link set ioam-veth-betaL name veth0 - ip -netns $ioam_node_beta link set ioam-veth-betaR name veth1 - ip -netns $ioam_node_gamma link set ioam-veth-gamma name veth0 - - ip -netns $ioam_node_alpha addr add db01::2/64 dev veth0 - ip -netns $ioam_node_alpha link set veth0 up - ip -netns $ioam_node_alpha link set lo up - ip -netns $ioam_node_alpha route add db02::/64 via db01::1 dev veth0 - ip -netns $ioam_node_alpha route del db01::/64 - ip -netns $ioam_node_alpha route add db01::/64 dev veth0 - - ip -netns $ioam_node_beta addr add db01::1/64 dev veth0 - ip -netns $ioam_node_beta addr add db02::1/64 dev veth1 - ip -netns $ioam_node_beta link set veth0 up - ip -netns $ioam_node_beta link set veth1 up - ip -netns $ioam_node_beta link set lo up - - ip -netns $ioam_node_gamma addr add db02::2/64 dev veth0 - ip -netns $ioam_node_gamma link set veth0 up - ip -netns $ioam_node_gamma link set lo up - ip -netns $ioam_node_gamma route add db01::/64 via db02::1 dev veth0 - - # - IOAM config - - ip netns exec $ioam_node_alpha sysctl -wq net.ipv6.ioam6_id=${ALPHA[0]} - ip netns exec $ioam_node_alpha sysctl -wq net.ipv6.ioam6_id_wide=${ALPHA[1]} - ip netns exec $ioam_node_alpha sysctl -wq net.ipv6.conf.veth0.ioam6_id=${ALPHA[4]} - ip netns exec $ioam_node_alpha sysctl -wq net.ipv6.conf.veth0.ioam6_id_wide=${ALPHA[5]} - ip -netns $ioam_node_alpha ioam namespace add 123 data ${ALPHA[6]} wide ${ALPHA[7]} - ip -netns $ioam_node_alpha ioam schema add ${ALPHA[8]} "${ALPHA[9]}" - ip -netns $ioam_node_alpha ioam namespace set 123 schema ${ALPHA[8]} - - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.all.forwarding=1 - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.ioam6_id=${BETA[0]} - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.ioam6_id_wide=${BETA[1]} - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=1 - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.veth0.ioam6_id=${BETA[2]} - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.veth0.ioam6_id_wide=${BETA[3]} - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.veth1.ioam6_id=${BETA[4]} - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.veth1.ioam6_id_wide=${BETA[5]} - ip -netns $ioam_node_beta ioam namespace add 123 data ${BETA[6]} wide ${BETA[7]} - ip -netns $ioam_node_beta ioam schema add ${BETA[8]} "${BETA[9]}" - ip -netns $ioam_node_beta ioam namespace set 123 schema ${BETA[8]} - - ip netns exec $ioam_node_gamma sysctl -wq net.ipv6.ioam6_id=${GAMMA[0]} - ip netns exec $ioam_node_gamma sysctl -wq net.ipv6.ioam6_id_wide=${GAMMA[1]} - ip netns exec $ioam_node_gamma sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=1 - ip netns exec $ioam_node_gamma sysctl -wq net.ipv6.conf.veth0.ioam6_id=${GAMMA[2]} - ip netns exec $ioam_node_gamma sysctl -wq net.ipv6.conf.veth0.ioam6_id_wide=${GAMMA[3]} - ip -netns $ioam_node_gamma ioam namespace add 123 data ${GAMMA[6]} wide ${GAMMA[7]} + peer name ioam-veth-gamma netns $ioam_node_gamma &>/dev/null + + ip -netns $ioam_node_alpha link set ioam-veth-alpha name veth0 &>/dev/null + ip -netns $ioam_node_beta link set ioam-veth-betaL name veth0 &>/dev/null + ip -netns $ioam_node_beta link set ioam-veth-betaR name veth1 &>/dev/null + ip -netns $ioam_node_gamma link set ioam-veth-gamma name veth0 &>/dev/null + + ip -netns $ioam_node_alpha addr add 2001:db8:1::50/64 dev veth0 &>/dev/null + ip -netns $ioam_node_alpha addr add 2001:db8:1::2/64 dev veth0 &>/dev/null + ip -netns $ioam_node_alpha link set veth0 up &>/dev/null + ip -netns $ioam_node_alpha link set lo up &>/dev/null + ip -netns $ioam_node_alpha route add 2001:db8:2::/64 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + ip -netns $ioam_node_beta addr add 2001:db8:1::1/64 dev veth0 &>/dev/null + ip -netns $ioam_node_beta addr add 2001:db8:2::1/64 dev veth1 &>/dev/null + ip -netns $ioam_node_beta link set veth0 up &>/dev/null + ip -netns $ioam_node_beta link set veth1 up &>/dev/null + ip -netns $ioam_node_beta link set lo up &>/dev/null + + ip -netns $ioam_node_gamma addr add 2001:db8:2::2/64 dev veth0 &>/dev/null + ip -netns $ioam_node_gamma link set veth0 up &>/dev/null + ip -netns $ioam_node_gamma link set lo up &>/dev/null + ip -netns $ioam_node_gamma route add 2001:db8:1::/64 \ + via 2001:db8:2::1 dev veth0 &>/dev/null + + # - Alpha: IOAM config - + ip netns exec $ioam_node_alpha \ + sysctl -wq net.ipv6.ioam6_id=${ALPHA[0]} &>/dev/null + ip netns exec $ioam_node_alpha \ + sysctl -wq net.ipv6.ioam6_id_wide=${ALPHA[1]} &>/dev/null + ip netns exec $ioam_node_alpha \ + sysctl -wq net.ipv6.conf.veth0.ioam6_id=${ALPHA[4]} &>/dev/null + ip netns exec $ioam_node_alpha \ + sysctl -wq net.ipv6.conf.veth0.ioam6_id_wide=${ALPHA[5]} &>/dev/null + ip -netns $ioam_node_alpha \ + ioam namespace add 123 data ${ALPHA[6]} wide ${ALPHA[7]} &>/dev/null + ip -netns $ioam_node_alpha \ + ioam schema add ${ALPHA[8]} "${ALPHA[9]}" &>/dev/null + ip -netns $ioam_node_alpha \ + ioam namespace set 123 schema ${ALPHA[8]} &>/dev/null + + # - Beta: IOAM config - + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.all.forwarding=1 &>/dev/null + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.ioam6_id=${BETA[0]} &>/dev/null + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.ioam6_id_wide=${BETA[1]} &>/dev/null + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=1 &>/dev/null + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth0.ioam6_id=${BETA[2]} &>/dev/null + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth0.ioam6_id_wide=${BETA[3]} &>/dev/null + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth1.ioam6_id=${BETA[4]} &>/dev/null + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth1.ioam6_id_wide=${BETA[5]} &>/dev/null + ip -netns $ioam_node_beta ioam namespace add 123 &>/dev/null sleep 1 - ip netns exec $ioam_node_alpha ping6 -c 5 -W 1 db02::2 &>/dev/null + ip netns exec $ioam_node_alpha ping6 -c 5 -W 1 2001:db8:2::2 &>/dev/null if [ $? != 0 ] then - echo "Setup FAILED" - cleanup &>/dev/null - exit 0 + echo "SKIP: Setup failed." + cleanup + exit $ksft_skip fi } log_test_passed() { - local desc=$1 - printf "TEST: %-60s [ OK ]\n" "${desc}" + printf " - TEST: %-57s [ OK ]\n" "$1" + npassed=$((npassed+1)) } -log_test_failed() +log_test_skipped() { - local desc=$1 - printf "TEST: %-60s [FAIL]\n" "${desc}" + printf " - TEST: %-57s [SKIP]\n" "$1" + nskipped=$((nskipped+1)) } -log_results() +log_test_failed() { - echo "- Tests passed: ${npassed}" - echo "- Tests failed: ${nfailed}" + printf " - TEST: %-57s [FAIL]\n" "$1" + nfailed=$((nfailed+1)) } run_test() { local name=$1 local desc=$2 - local node_src=$3 - local node_dst=$4 - local ip6_dst=$5 - local trace_type=$6 - local ioam_ns=$7 - local type=$8 - - ip netns exec $node_dst ./ioam6_parser $name $trace_type $ioam_ns $type & + local ip6_src=$3 + local trace_type=$4 + local trace_size=$5 + local ioam_ns=$6 + local type=$7 + + ip netns exec $ioam_node_gamma \ + ./ioam6_parser veth0 $name $ip6_src 2001:db8:2::2 \ + $trace_type $trace_size $ioam_ns $type & local spid=$! sleep 0.1 - ip netns exec $node_src ping6 -t 64 -c 1 -W 1 $ip6_dst &>/dev/null + ip netns exec $ioam_node_alpha ping6 -t 64 -c 1 -W 1 2001:db8:2::2 &>/dev/null if [ $? != 0 ] then - nfailed=$((nfailed+1)) log_test_failed "${desc}" kill -2 $spid &>/dev/null else wait $spid - if [ $? = 0 ] - then - npassed=$((npassed+1)) - log_test_passed "${desc}" - else - nfailed=$((nfailed+1)) - log_test_failed "${desc}" - fi + [ $? == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" fi } run() { + local test + + echo + printf "+" + printf "%0.s-" {1..72} + printf "+" + echo + printf "| %-28s LOCAL tests %-29s |" echo - printf "%0.s-" {1..74} + printf "+" + printf "%0.s-" {1..72} + printf "+" echo - echo "OUTPUT tests" - printf "%0.s-" {1..74} + + echo + echo "Global config" + for test in $TESTS_LOCAL + do + $test + done + + echo + echo "Inline mode" + for test in $TESTS_LOCAL + do + $test "inline" + done + + echo + echo "Encap mode" + for test in $TESTS_LOCAL + do + $test "encap" + done + + echo + printf "+" + printf "%0.s-" {1..72} + printf "+" + echo + printf "| %-28s OUTPUT tests %-28s |" + echo + printf "+" + printf "%0.s-" {1..72} + printf "+" echo # set OUTPUT settings - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=0 + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=0 &>/dev/null - for t in $TESTS_OUTPUT + echo + echo "Inline mode" + for test in $TESTS_OUTPUT do - $t "inline" - [ $encap_tests = 0 ] && $t "encap" + $test "inline" done - # clean OUTPUT settings - ip netns exec $ioam_node_beta sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=1 - ip -netns $ioam_node_alpha route change db01::/64 dev veth0 + echo + echo "Encap mode" + for test in $TESTS_OUTPUT + do + $test "encap" + done + echo + echo "Encap mode (with tunsrc)" + for test in $TESTS_OUTPUT + do + $test "encap" "tunsrc" + done + + # clean OUTPUT settings + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=1 &>/dev/null echo - printf "%0.s-" {1..74} + printf "+" + printf "%0.s-" {1..72} + printf "+" echo - echo "INPUT tests" - printf "%0.s-" {1..74} + printf "| %-28s INPUT tests %-29s |" + echo + printf "+" + printf "%0.s-" {1..72} + printf "+" echo # set INPUT settings - ip -netns $ioam_node_alpha ioam namespace del 123 + ip -netns $ioam_node_alpha ioam namespace del 123 &>/dev/null - for t in $TESTS_INPUT + echo + echo "Inline mode" + for test in $TESTS_INPUT do - $t "inline" - [ $encap_tests = 0 ] && $t "encap" + $test "inline" + done + + echo + echo "Encap mode" + for test in $TESTS_INPUT + do + $test "encap" done # clean INPUT settings - ip -netns $ioam_node_alpha ioam namespace add 123 \ - data ${ALPHA[6]} wide ${ALPHA[7]} - ip -netns $ioam_node_alpha ioam namespace set 123 schema ${ALPHA[8]} - ip -netns $ioam_node_alpha route change db01::/64 dev veth0 + ip -netns $ioam_node_alpha \ + ioam namespace add 123 data ${ALPHA[6]} wide ${ALPHA[7]} &>/dev/null + ip -netns $ioam_node_alpha \ + ioam namespace set 123 schema ${ALPHA[8]} &>/dev/null echo - printf "%0.s-" {1..74} + printf "+" + printf "%0.s-" {1..72} + printf "+" echo - echo "GLOBAL tests" - printf "%0.s-" {1..74} + printf "| %-30s Results %-31s |" + echo + printf "+" + printf "%0.s-" {1..72} + printf "+" echo - for t in $TESTS_GLOBAL - do - $t "inline" - [ $encap_tests = 0 ] && $t "encap" - done - echo - log_results + echo "- Passed: ${npassed}" + echo "- Skipped: ${nskipped}" + echo "- Failed: ${nfailed}" + echo } bit2type=( 0x800000 0x400000 0x200000 0x100000 0x080000 0x040000 0x020000 0x010000 0x008000 0x004000 0x002000 0x001000 0x000800 0x000400 0x000200 0x000100 - 0x000080 0x000040 0x000020 0x000010 0x000008 0x000004 0x000002 + 0x000080 0x000040 0x000020 0x000010 0x000008 0x000004 0x000002 0x000001 ) -bit2size=( 4 4 4 4 4 4 4 4 8 8 8 4 4 4 4 4 4 4 4 4 4 4 4 ) +bit2size=( 4 4 4 4 4 4 4 4 8 8 8 4 4 4 4 4 4 4 4 4 4 4 4 0 ) ################################################################################ # # -# OUTPUT tests # +# LOCAL tests # # # -# Two nodes (sender/receiver), IOAM disabled on ingress for the receiver. # ################################################################################ -out_undef_ns() +local_sysctl_ioam_id() +{ + ############################################################################## + # Make sure the sysctl "net.ipv6.ioam6_id" works as expected. # + ############################################################################## + local desc="Sysctl net.ipv6.ioam6_id" + + [ ! -z $1 ] && return + + ip netns exec $ioam_node_alpha \ + sysctl net.ipv6.ioam6_id 2>/dev/null | grep -wq ${ALPHA[0]} + + [ $? == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" +} + +local_sysctl_ioam_id_wide() { ############################################################################## - # Make sure that the encap node won't fill the trace if the chosen IOAM # - # namespace is not configured locally. # + # Make sure the sysctl "net.ipv6.ioam6_id_wide" works as expected. # ############################################################################## - local desc="Unknown IOAM namespace" + local desc="Sysctl net.ipv6.ioam6_id_wide" - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + [ ! -z $1 ] && return - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type 0x800000 ns 0 size 4 dev veth0 + ip netns exec $ioam_node_alpha \ + sysctl net.ipv6.ioam6_id_wide 2>/dev/null | grep -wq ${ALPHA[1]} - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \ - db01::1 0x800000 0 $1 + [ $? == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" +} - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down +local_sysctl_ioam_intf_id() +{ + ############################################################################## + # Make sure the sysctl "net.ipv6.conf.XX.ioam6_id" works as expected. # + ############################################################################## + local desc="Sysctl net.ipv6.conf.XX.ioam6_id" + + [ ! -z $1 ] && return + + ip netns exec $ioam_node_alpha \ + sysctl net.ipv6.conf.veth0.ioam6_id 2>/dev/null | grep -wq ${ALPHA[4]} + + [ $? == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" } -out_no_room() +local_sysctl_ioam_intf_id_wide() { ############################################################################## - # Make sure that the encap node won't fill the trace and will set the # - # Overflow flag since there is no room enough for its data. # + # Make sure the sysctl "net.ipv6.conf.XX.ioam6_id_wide" works as expected. # ############################################################################## - local desc="Missing trace room" + local desc="Sysctl net.ipv6.conf.XX.ioam6_id_wide" + + [ ! -z $1 ] && return + + ip netns exec $ioam_node_alpha \ + sysctl net.ipv6.conf.veth0.ioam6_id_wide 2>/dev/null | grep -wq ${ALPHA[5]} + + [ $? == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" +} - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up +local_sysctl_ioam_intf_enabled() +{ + ############################################################################## + # Make sure the sysctl "net.ipv6.conf.XX.ioam6_enabled" works as expected. # + ############################################################################## + local desc="Sysctl net.ipv6.conf.XX.ioam6_enabled" - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type 0xc00000 ns 123 size 4 dev veth0 + [ ! -z $1 ] && return - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \ - db01::1 0xc00000 123 $1 + ip netns exec $ioam_node_beta \ + sysctl net.ipv6.conf.veth0.ioam6_enabled 2>/dev/null | grep -wq 1 - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + [ $? == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" } -out_bits() +local_ioam_namespace() { ############################################################################## - # Make sure that, for each trace type bit, the encap node will either: # - # (i) fill the trace with its data when it is a supported bit # - # (ii) not fill the trace with its data when it is an unsupported bit # + # Make sure the creation of an IOAM Namespace works as expected. # ############################################################################## - local desc="Trace type with bit <n> only" + local desc="Create an IOAM Namespace" - local tmp=${bit2size[22]} - bit2size[22]=$(( $tmp + ${#ALPHA[9]} + ((4 - (${#ALPHA[9]} % 4)) % 4) )) + [ ! -z $1 ] && return - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + ip -netns $ioam_node_alpha \ + ioam namespace show 2>/dev/null | grep -wq 123 + local ret=$? - for i in {0..22} - do - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \ - dev veth0 &>/dev/null + ip -netns $ioam_node_alpha \ + ioam namespace show 2>/dev/null | grep -wq ${ALPHA[6]} + ret=$((ret + $?)) - local cmd_res=$? - local descr="${desc/<n>/$i}" + ip -netns $ioam_node_alpha \ + ioam namespace show 2>/dev/null | grep -wq ${ALPHA[7]} + ret=$((ret + $?)) + + [ $ret == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" +} + +local_ioam_schema() +{ + ############################################################################## + # Make sure the creation of an IOAM Schema works as expected. # + ############################################################################## + local desc="Create an IOAM Schema" + + [ ! -z $1 ] && return + + ip -netns $ioam_node_alpha \ + ioam schema show 2>/dev/null | grep -wq ${ALPHA[8]} + local ret=$? + + local sc_data=$( + for i in `seq 0 $((${#ALPHA[9]}-1))` + do + chr=${ALPHA[9]:i:1} + printf "%x " "'${chr}" + done + ) + + ip -netns $ioam_node_alpha \ + ioam schema show 2>/dev/null | grep -q "$sc_data" + ret=$((ret + $?)) + + [ $ret == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" +} + +local_ioam_schema_namespace() +{ + ############################################################################## + # Make sure the binding of a Schema to a Namespace works as expected. # + ############################################################################## + local desc="Bind an IOAM Schema to an IOAM Namespace" + + [ ! -z $1 ] && return + + ip -netns $ioam_node_alpha \ + ioam namespace show 2>/dev/null | grep -wq ${ALPHA[8]} + local ret=$? + + ip -netns $ioam_node_alpha \ + ioam schema show 2>/dev/null | grep -wq 123 + ret=$((ret + $?)) + + [ $ret == 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" +} + +local_route_ns() +{ + ############################################################################## + # Make sure the Namespace-ID is always provided, whatever the mode. # + ############################################################################## + local desc="Mandatory Namespace-ID" + local mode + + [ -z $1 ] && return + + [ "$1" == "encap" ] && mode="$1 tundst 2001:db8:2::2" || mode="$1" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret1=$? + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret2=$? + + [[ $ret1 == 0 || $ret2 != 0 ]] && log_test_failed "${desc}" \ + || log_test_passed "${desc}" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null +} + +local_route_tunsrc() +{ + ############################################################################## + # Make sure the Tunnel Source is only (and possibly) used with encap mode. # + ############################################################################## + local desc + local mode + local mode_tunsrc - if [[ $i -ge 12 && $i -le 21 ]] + [ -z $1 ] && return + + if [ "$1" == "encap" ] + then + desc="Optional Tunnel Source" + mode="$1 tundst 2001:db8:2::2" + mode_tunsrc="$1 tunsrc 2001:db8:1::50 tundst 2001:db8:2::2" + else + desc="Unneeded Tunnel Source" + mode="$1" + mode_tunsrc="$1 tunsrc 2001:db8:1::50" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret1=$? + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode_tunsrc trace prealloc type 0x800000 ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret2=$? + + if [ "$1" == "encap" ] + then + [[ $ret1 != 0 || $ret2 != 0 ]] && log_test_failed "${desc}" \ + || log_test_passed "${desc}" + else + [[ $ret1 != 0 || $ret2 == 0 ]] && log_test_failed "${desc}" \ + || log_test_passed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null +} + +local_route_tundst() +{ + ############################################################################## + # Make sure the Tunnel Destination is only (and always) used with encap mode.# + ############################################################################## + local desc + + [ -z $1 ] && return + + [ "$1" == "encap" ] && desc="Mandatory Tunnel Destination" \ + || desc="Unneeded Tunnel Destination" + + local mode="$1" + local mode_tundst="$1 tundst 2001:db8:2::2" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret1=$? + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode_tundst trace prealloc type 0x800000 ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret2=$? + + if [ "$1" == "encap" ] + then + [[ $ret1 == 0 || $ret2 != 0 ]] && log_test_failed "${desc}" \ + || log_test_passed "${desc}" + else + [[ $ret1 != 0 || $ret2 == 0 ]] && log_test_failed "${desc}" \ + || log_test_passed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null +} + +local_route_trace_type() +{ + ############################################################################## + # Make sure the Trace Type is always provided, whatever the mode. # + ############################################################################## + local desc="Mandatory Trace Type" + local mode + + [ -z $1 ] && return + + [ "$1" == "encap" ] && mode="$1 tundst 2001:db8:2::2" || mode="$1" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret1=$? + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret2=$? + + [[ $ret1 == 0 || $ret2 != 0 ]] && log_test_failed "${desc}" \ + || log_test_passed "${desc}" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null +} + +local_route_trace_size() +{ + ############################################################################## + # Make sure the Trace Size is always provided, whatever the mode. # + ############################################################################## + local desc="Mandatory Trace Size" + local mode + + [ -z $1 ] && return + + [ "$1" == "encap" ] && mode="$1 tundst 2001:db8:2::2" || mode="$1" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret1=$? + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + local ret2=$? + + [[ $ret1 == 0 || $ret2 != 0 ]] && log_test_failed "${desc}" \ + || log_test_passed "${desc}" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null +} + +local_route_trace_type_bits() +{ + ############################################################################## + # Make sure only allowed bits (0-11 and 22) are accepted. # + ############################################################################## + local desc="Trace Type bits" + local mode + + [ -z $1 ] && return + + [ "$1" == "encap" ] && mode="$1 tundst 2001:db8:2::2" || mode="$1" + + local i + for i in {0..23} + do + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type ${bit2type[$i]} ns 0 size 4 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [[ ($? == 0 && (($i -ge 12 && $i -le 21) || $i == 23)) || + ($? != 0 && (($i -ge 0 && $i -le 11) || $i == 22)) ]] then - if [ $cmd_res != 0 ] - then - npassed=$((npassed+1)) - log_test_passed "$descr ($1 mode)" - else - nfailed=$((nfailed+1)) - log_test_failed "$descr ($1 mode)" - fi - else - run_test "out_bit$i" "$descr ($1 mode)" $ioam_node_alpha \ - $ioam_node_beta db01::1 ${bit2type[$i]} 123 $1 + local err=1 + break fi done - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + [ -z $err ] && log_test_passed "${desc}" || log_test_failed "${desc}" - bit2size[22]=$tmp + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null } -out_full_supp_trace() +local_route_trace_size_values() { ############################################################################## - # Make sure that the encap node will correctly fill a full trace. Be careful,# - # "full trace" here does NOT mean all bits (only supported ones). # + # Make sure only allowed sizes (multiples of four in [4,244]) are accepted. # ############################################################################## - local desc="Full supported trace" + local desc="Trace Size values" + local mode - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + [ -z $1 ] && return - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type 0xfff002 ns 123 size 100 dev veth0 + [ "$1" == "encap" ] && mode="$1 tundst 2001:db8:2::2" || mode="$1" - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \ - db01::1 0xfff002 123 $1 + # we also try the next multiple of four after the MAX to check it's refused + local i + for i in {0..248} + do + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type 0x800000 ns 0 size $i \ + via 2001:db8:1::1 dev veth0 &>/dev/null - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + if [[ ($? == 0 && ($i == 0 || $i == 248 || $(( $i % 4 )) != 0)) || + ($? != 0 && $i != 0 && $i != 248 && $(( $i % 4 )) == 0) ]] + then + local err=1 + break + fi + done + + [ -z $err ] && log_test_passed "${desc}" || log_test_failed "${desc}" + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null } ################################################################################ # # -# INPUT tests # +# OUTPUT tests # # # -# Two nodes (sender/receiver), the sender MUST NOT fill the trace upon # -# insertion -> the IOAM namespace configured on the sender is removed # -# and is used in the inserted trace to force the sender not to fill it. # ################################################################################ -in_undef_ns() +output_undef_ns() { ############################################################################## - # Make sure that the receiving node won't fill the trace if the related IOAM # - # namespace is not configured locally. # + # Make sure an IOAM encapsulating node does NOT fill the trace when the # + # corresponding IOAM Namespace-ID is not configured locally. # ############################################################################## - local desc="Unknown IOAM namespace" + local desc="Unknown IOAM Namespace-ID" + local ns=0 + local tr_type=0x800000 + local tr_size=4 + local mode="$1" + local saddr="2001:db8:1::2" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi + + if [ "$2" == "tunsrc" ] + then + saddr="2001:db8:1::50" + mode+=" tunsrc 2001:db8:1::50" + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type 0x800000 ns 0 size 4 dev veth0 + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" $saddr $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \ - db01::1 0x800000 0 $1 + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null } -in_no_room() +output_no_room() { ############################################################################## - # Make sure that the receiving node won't fill the trace and will set the # - # Overflow flag if there is no room enough for its data. # + # Make sure an IOAM encapsulating node does NOT fill the trace AND sets the # + # Overflow flag when there is not enough room for its data. # ############################################################################## - local desc="Missing trace room" + local desc="Missing room for data" + local ns=123 + local tr_type=0xc00000 + local tr_size=4 + local mode="$1" + local saddr="2001:db8:1::2" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + if [ "$2" == "tunsrc" ] + then + saddr="2001:db8:1::50" + mode+=" tunsrc 2001:db8:1::50" + fi - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type 0xc00000 ns 123 size 4 dev veth0 + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \ - db01::1 0xc00000 123 $1 + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" $saddr $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null } -in_bits() +output_no_room_oss() { ############################################################################## - # Make sure that, for each trace type bit, the receiving node will either: # - # (i) fill the trace with its data when it is a supported bit # - # (ii) not fill the trace with its data when it is an unsupported bit # + # Make sure an IOAM encapsulating node does NOT fill the trace AND sets the # + # Overflow flag when there is not enough room for the Opaque State Snapshot. # ############################################################################## - local desc="Trace type with bit <n> only" + local desc="Missing room for Opaque State Snapshot" + local ns=123 + local tr_type=0x000002 + local tr_size=4 + local mode="$1" + local saddr="2001:db8:1::2" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi - local tmp=${bit2size[22]} - bit2size[22]=$(( $tmp + ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) )) + if [ "$2" == "tunsrc" ] + then + saddr="2001:db8:1::50" + mode+=" tunsrc 2001:db8:1::50" + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" $saddr $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null +} + +output_bits() +{ + ############################################################################## + # Make sure an IOAM encapsulating node implements all supported bits by # + # checking it correctly fills the trace with its data. # + ############################################################################## + local desc="Trace Type with supported bit <n> only" + local ns=123 + local mode="$1" + local saddr="2001:db8:1::2" - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + if [ "$1" == "encap" ] + then + if [ "$2" == "tunsrc" ] + then + saddr="2001:db8:1::50" + mode+=" tunsrc 2001:db8:1::50" + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + local tmp=${bit2size[22]} + bit2size[22]=$(( $tmp + ${#ALPHA[9]} + ((4 - (${#ALPHA[9]} % 4)) % 4) )) + local i for i in {0..11} {22..22} do - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \ - dev veth0 + local descr="${desc/<n>/$i}" + + if [[ "$1" == "encap" && $encap_tests != 0 ]] + then + log_test_skipped "${descr}" + continue + fi - run_test "in_bit$i" "${desc/<n>/$i} ($1 mode)" $ioam_node_alpha \ - $ioam_node_beta db01::1 ${bit2type[$i]} 123 $1 + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc \ + type ${bit2type[$i]} ns $ns size ${bit2size[$i]} \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test "output_bit$i" "${descr}" $saddr \ + ${bit2type[$i]} ${bit2size[$i]} $ns $1 + else + log_test_failed "${descr}" + fi done - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null bit2size[22]=$tmp } -in_oflag() +output_sizes() { ############################################################################## - # Make sure that the receiving node won't fill the trace since the Overflow # - # flag is set. # + # Make sure an IOAM encapsulating node allocates supported sizes correctly. # ############################################################################## - local desc="Overflow flag is set" + local desc="Trace Size of <n> bytes" + local ns=0 + local tr_type=0x800000 + local mode="$1" + local saddr="2001:db8:1::2" - # Exception: - # Here, we need the sender to set the Overflow flag. For that, we will add - # back the IOAM namespace that was previously configured on the sender. - ip -netns $ioam_node_alpha ioam namespace add 123 + if [ "$1" == "encap" ] + then + if [ "$2" == "tunsrc" ] + then + saddr="2001:db8:1::50" + mode+=" tunsrc 2001:db8:1::50" + fi - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type 0xc00000 ns 123 size 4 dev veth0 + local i + for i in $(seq 4 4 244) + do + local descr="${desc/<n>/$i}" - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \ - db01::1 0xc00000 123 $1 + if [[ "$1" == "encap" && $encap_tests != 0 ]] + then + log_test_skipped "${descr}" + continue + fi - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $i \ + via 2001:db8:1::1 dev veth0 &>/dev/null - # And we clean the exception for this test to get things back to normal for - # other INPUT tests - ip -netns $ioam_node_alpha ioam namespace del 123 + if [ $? == 0 ] + then + run_test "output_size$i" "${descr}" $saddr $tr_type $i $ns $1 + else + log_test_failed "${descr}" + fi + done + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null } -in_full_supp_trace() +output_full_supp_trace() { ############################################################################## - # Make sure that the receiving node will correctly fill a full trace. Be # - # careful, "full trace" here does NOT mean all bits (only supported ones). # + # Make sure an IOAM encapsulating node correctly fills a trace when all # + # supported bits are set. # ############################################################################## local desc="Full supported trace" + local ns=123 + local tr_type=0xfff002 + local tr_size + local mode="$1" + local saddr="2001:db8:1::2" - [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 up + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi - ip -netns $ioam_node_alpha route change db01::/64 encap ioam6 mode $mode \ - trace prealloc type 0xfff002 ns 123 size 80 dev veth0 + if [ "$2" == "tunsrc" ] + then + saddr="2001:db8:1::50" + mode+=" tunsrc 2001:db8:1::50" + fi - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \ - db01::1 0xfff002 123 $1 + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi - [ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down + local i + tr_size=$(( ${#ALPHA[9]} + ((4 - (${#ALPHA[9]} % 4)) % 4) )) + for i in {0..11} {22..22} + do + tr_size=$((tr_size + bit2size[$i])) + done + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" $saddr $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null } ################################################################################ # # -# GLOBAL tests # +# INPUT tests # # # -# Three nodes (sender/router/receiver), IOAM fully enabled on every node. # ################################################################################ -fwd_full_supp_trace() +input_undef_ns() +{ + ############################################################################## + # Make sure an IOAM node does NOT fill the trace when the corresponding IOAM # + # Namespace-ID is not configured locally. # + ############################################################################## + local desc="Unknown IOAM Namespace-ID" + local ns=0 + local tr_type=0x800000 + local tr_size=4 + local mode="$1" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" 2001:db8:1::2 $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null +} + +input_no_room() +{ + ############################################################################## + # Make sure an IOAM node does NOT fill the trace AND sets the Overflow flag # + # when there is not enough room for its data. # + ############################################################################## + local desc="Missing room for data" + local ns=123 + local tr_type=0xc00000 + local tr_size=4 + local mode="$1" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" 2001:db8:1::2 $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null +} + +input_no_room_oss() +{ + ############################################################################## + # Make sure an IOAM node does NOT fill the trace AND sets the Overflow flag # + # when there is not enough room for the Opaque State Snapshot. # + ############################################################################## + local desc="Missing room for Opaque State Snapshot" + local ns=123 + local tr_type=0x000002 + local tr_size=4 + local mode="$1" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" 2001:db8:1::2 $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null +} + +input_disabled() +{ + ############################################################################## + # Make sure an IOAM node does NOT fill the trace when IOAM is not enabled on # + # the corresponding (ingress) interface. # + ############################################################################## + local desc="IOAM disabled on ingress interface" + local ns=123 + local tr_type=0x800000 + local tr_size=4 + local mode="$1" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + # Exception: disable IOAM on ingress interface + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=0 &>/dev/null + local ret=$? + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + ret=$((ret + $?)) + + if [ $ret == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" 2001:db8:1::2 $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + # Clean Exception + ip netns exec $ioam_node_beta \ + sysctl -wq net.ipv6.conf.veth0.ioam6_enabled=1 &>/dev/null + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null +} + +input_oflag() +{ + ############################################################################## + # Make sure an IOAM node does NOT fill the trace when the Overflow flag is # + # set. # + ############################################################################## + local desc="Overflow flag is set" + local ns=123 + local tr_type=0xc00000 + local tr_size=4 + local mode="$1" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + # Exception: + # Here, we need the sender to set the Overflow flag. For that, we will add + # back the IOAM namespace that was previously configured on the sender. + ip -netns $ioam_node_alpha ioam namespace add 123 &>/dev/null + local ret=$? + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + ret=$((ret + $?)) + + if [ $ret == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" 2001:db8:1::2 $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + # Clean Exception + ip -netns $ioam_node_alpha ioam namespace del 123 &>/dev/null + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null +} + +input_bits() +{ + ############################################################################## + # Make sure an IOAM node implements all supported bits by checking it # + # correctly fills the trace with its data. # + ############################################################################## + local desc="Trace Type with supported bit <n> only" + local ns=123 + local mode="$1" + + if [ "$1" == "encap" ] + then + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + local tmp=${bit2size[22]} + bit2size[22]=$(( $tmp + ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) )) + + local i + for i in {0..11} {22..22} + do + local descr="${desc/<n>/$i}" + + if [[ "$1" == "encap" && $encap_tests != 0 ]] + then + log_test_skipped "${descr}" + continue + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc \ + type ${bit2type[$i]} ns $ns size ${bit2size[$i]} \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test "input_bit$i" "${descr}" 2001:db8:1::2 \ + ${bit2type[$i]} ${bit2size[$i]} $ns $1 + else + log_test_failed "${descr}" + fi + done + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null + + bit2size[22]=$tmp +} + +input_sizes() { ############################################################################## - # Make sure that all three nodes correctly filled the full supported trace # - # by checking that the trace data is consistent with the predefined config. # + # Make sure an IOAM node handles all supported sizes correctly. # ############################################################################## - local desc="Forward - Full supported trace" + local desc="Trace Size of <n> bytes" + local ns=123 + local tr_type=0x800000 + local mode="$1" + + if [ "$1" == "encap" ] + then + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi - [ "$1" = "encap" ] && mode="$1 tundst db02::2" || mode="$1" - [ "$1" = "encap" ] && ip -netns $ioam_node_gamma link set ip6tnl0 up + local i + for i in $(seq 4 4 244) + do + local descr="${desc/<n>/$i}" - ip -netns $ioam_node_alpha route change db02::/64 encap ioam6 mode $mode \ - trace prealloc type 0xfff002 ns 123 size 244 via db01::1 dev veth0 + if [[ "$1" == "encap" && $encap_tests != 0 ]] + then + log_test_skipped "${descr}" + continue + fi - run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_gamma \ - db02::2 0xfff002 123 $1 + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $i \ + via 2001:db8:1::1 dev veth0 &>/dev/null - [ "$1" = "encap" ] && ip -netns $ioam_node_gamma link set ip6tnl0 down + if [ $? == 0 ] + then + run_test "input_size$i" "${descr}" 2001:db8:1::2 $tr_type $i $ns $1 + else + log_test_failed "${descr}" + fi + done + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null +} + +input_full_supp_trace() +{ + ############################################################################## + # Make sure an IOAM node correctly fills a trace when all supported bits are # + # set. # + ############################################################################## + local desc="Full supported trace" + local ns=123 + local tr_type=0xfff002 + local tr_size + local mode="$1" + + if [ "$1" == "encap" ] + then + if [ $encap_tests != 0 ] + then + log_test_skipped "${desc}" + return + fi + + mode+=" tundst 2001:db8:2::2" + ip -netns $ioam_node_gamma link set ip6tnl0 up &>/dev/null + fi + + local i + tr_size=$(( ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) )) + for i in {0..11} {22..22} + do + tr_size=$((tr_size + bit2size[$i])) + done + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 \ + encap ioam6 mode $mode trace prealloc type $tr_type ns $ns size $tr_size \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + if [ $? == 0 ] + then + run_test ${FUNCNAME[0]} "${desc}" 2001:db8:1::2 $tr_type $tr_size $ns $1 + else + log_test_failed "${desc}" + fi + + ip -netns $ioam_node_alpha \ + route change 2001:db8:2::/64 via 2001:db8:1::1 dev veth0 &>/dev/null + + [ "$1" == "encap" ] && ip -netns $ioam_node_gamma \ + link set ip6tnl0 down &>/dev/null } @@ -742,30 +1655,29 @@ fwd_full_supp_trace() ################################################################################ npassed=0 +nskipped=0 nfailed=0 if [ "$(id -u)" -ne 0 ] then - echo "SKIP: Need root privileges" + echo "SKIP: Need root privileges." exit $ksft_skip fi if [ ! -x "$(command -v ip)" ] then - echo "SKIP: Could not run test without ip tool" - exit $ksft_skip -fi - -ip ioam &>/dev/null -if [ $? = 1 ] -then - echo "SKIP: iproute2 too old, missing ioam command" + echo "SKIP: Could not run test without ip tool." exit $ksft_skip fi check_kernel_compatibility - -cleanup &>/dev/null setup run -cleanup &>/dev/null +cleanup + +if [ $nfailed != 0 ] +then + exit $ksft_fail +fi + +exit $ksft_pass diff --git a/tools/testing/selftests/net/ioam6_parser.c b/tools/testing/selftests/net/ioam6_parser.c index 895e5bb5044b..de4b5c9e8a74 100644 --- a/tools/testing/selftests/net/ioam6_parser.c +++ b/tools/testing/selftests/net/ioam6_parser.c @@ -8,8 +8,10 @@ #include <errno.h> #include <limits.h> #include <linux/const.h> +#include <linux/if_ether.h> #include <linux/ioam6.h> #include <linux/ipv6.h> +#include <stdbool.h> #include <stdlib.h> #include <string.h> #include <unistd.h> @@ -40,7 +42,7 @@ static struct ioam_config node1 = { .egr_id = 101, .ingr_wide = 0xffffffff, /* default value */ .egr_wide = 101101, - .ns_data = 0xdeadbee0, + .ns_data = 0xdeadbeef, .ns_wide = 0xcafec0caf00dc0de, .sc_id = 777, .sc_data = "something that will be 4n-aligned", @@ -54,33 +56,22 @@ static struct ioam_config node2 = { .egr_id = 202, .ingr_wide = 201201, .egr_wide = 202202, - .ns_data = 0xdeadbee1, - .ns_wide = 0xcafec0caf11dc0de, - .sc_id = 666, - .sc_data = "Hello there -Obi", - .hlim = 63, -}; - -static struct ioam_config node3 = { - .id = 3, - .wide = 33333333, - .ingr_id = 301, - .egr_id = 0xffff, /* default value */ - .ingr_wide = 301301, - .egr_wide = 0xffffffff, /* default value */ - .ns_data = 0xdeadbee2, - .ns_wide = 0xcafec0caf22dc0de, + .ns_data = 0xffffffff, /* default value */ + .ns_wide = 0xffffffffffffffff, /* default value */ .sc_id = 0xffffff, /* default value */ .sc_data = NULL, - .hlim = 62, + .hlim = 63, }; enum { /********** * OUTPUT * **********/ + __TEST_OUT_MIN, + TEST_OUT_UNDEF_NS, TEST_OUT_NO_ROOM, + TEST_OUT_NO_ROOM_OSS, TEST_OUT_BIT0, TEST_OUT_BIT1, TEST_OUT_BIT2, @@ -94,13 +85,80 @@ enum { TEST_OUT_BIT10, TEST_OUT_BIT11, TEST_OUT_BIT22, + TEST_OUT_SIZE4, + TEST_OUT_SIZE8, + TEST_OUT_SIZE12, + TEST_OUT_SIZE16, + TEST_OUT_SIZE20, + TEST_OUT_SIZE24, + TEST_OUT_SIZE28, + TEST_OUT_SIZE32, + TEST_OUT_SIZE36, + TEST_OUT_SIZE40, + TEST_OUT_SIZE44, + TEST_OUT_SIZE48, + TEST_OUT_SIZE52, + TEST_OUT_SIZE56, + TEST_OUT_SIZE60, + TEST_OUT_SIZE64, + TEST_OUT_SIZE68, + TEST_OUT_SIZE72, + TEST_OUT_SIZE76, + TEST_OUT_SIZE80, + TEST_OUT_SIZE84, + TEST_OUT_SIZE88, + TEST_OUT_SIZE92, + TEST_OUT_SIZE96, + TEST_OUT_SIZE100, + TEST_OUT_SIZE104, + TEST_OUT_SIZE108, + TEST_OUT_SIZE112, + TEST_OUT_SIZE116, + TEST_OUT_SIZE120, + TEST_OUT_SIZE124, + TEST_OUT_SIZE128, + TEST_OUT_SIZE132, + TEST_OUT_SIZE136, + TEST_OUT_SIZE140, + TEST_OUT_SIZE144, + TEST_OUT_SIZE148, + TEST_OUT_SIZE152, + TEST_OUT_SIZE156, + TEST_OUT_SIZE160, + TEST_OUT_SIZE164, + TEST_OUT_SIZE168, + TEST_OUT_SIZE172, + TEST_OUT_SIZE176, + TEST_OUT_SIZE180, + TEST_OUT_SIZE184, + TEST_OUT_SIZE188, + TEST_OUT_SIZE192, + TEST_OUT_SIZE196, + TEST_OUT_SIZE200, + TEST_OUT_SIZE204, + TEST_OUT_SIZE208, + TEST_OUT_SIZE212, + TEST_OUT_SIZE216, + TEST_OUT_SIZE220, + TEST_OUT_SIZE224, + TEST_OUT_SIZE228, + TEST_OUT_SIZE232, + TEST_OUT_SIZE236, + TEST_OUT_SIZE240, + TEST_OUT_SIZE244, TEST_OUT_FULL_SUPP_TRACE, + __TEST_OUT_MAX, + /********* * INPUT * *********/ + __TEST_IN_MIN, + TEST_IN_UNDEF_NS, TEST_IN_NO_ROOM, + TEST_IN_NO_ROOM_OSS, + TEST_IN_DISABLED, TEST_IN_OFLAG, TEST_IN_BIT0, TEST_IN_BIT1, @@ -115,36 +173,107 @@ enum { TEST_IN_BIT10, TEST_IN_BIT11, TEST_IN_BIT22, + TEST_IN_SIZE4, + TEST_IN_SIZE8, + TEST_IN_SIZE12, + TEST_IN_SIZE16, + TEST_IN_SIZE20, + TEST_IN_SIZE24, + TEST_IN_SIZE28, + TEST_IN_SIZE32, + TEST_IN_SIZE36, + TEST_IN_SIZE40, + TEST_IN_SIZE44, + TEST_IN_SIZE48, + TEST_IN_SIZE52, + TEST_IN_SIZE56, + TEST_IN_SIZE60, + TEST_IN_SIZE64, + TEST_IN_SIZE68, + TEST_IN_SIZE72, + TEST_IN_SIZE76, + TEST_IN_SIZE80, + TEST_IN_SIZE84, + TEST_IN_SIZE88, + TEST_IN_SIZE92, + TEST_IN_SIZE96, + TEST_IN_SIZE100, + TEST_IN_SIZE104, + TEST_IN_SIZE108, + TEST_IN_SIZE112, + TEST_IN_SIZE116, + TEST_IN_SIZE120, + TEST_IN_SIZE124, + TEST_IN_SIZE128, + TEST_IN_SIZE132, + TEST_IN_SIZE136, + TEST_IN_SIZE140, + TEST_IN_SIZE144, + TEST_IN_SIZE148, + TEST_IN_SIZE152, + TEST_IN_SIZE156, + TEST_IN_SIZE160, + TEST_IN_SIZE164, + TEST_IN_SIZE168, + TEST_IN_SIZE172, + TEST_IN_SIZE176, + TEST_IN_SIZE180, + TEST_IN_SIZE184, + TEST_IN_SIZE188, + TEST_IN_SIZE192, + TEST_IN_SIZE196, + TEST_IN_SIZE200, + TEST_IN_SIZE204, + TEST_IN_SIZE208, + TEST_IN_SIZE212, + TEST_IN_SIZE216, + TEST_IN_SIZE220, + TEST_IN_SIZE224, + TEST_IN_SIZE228, + TEST_IN_SIZE232, + TEST_IN_SIZE236, + TEST_IN_SIZE240, + TEST_IN_SIZE244, TEST_IN_FULL_SUPP_TRACE, - /********** - * GLOBAL * - **********/ - TEST_FWD_FULL_SUPP_TRACE, + __TEST_IN_MAX, __TEST_MAX, }; -static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h, - __u32 trace_type, __u16 ioam_ns) +static int check_header(int tid, struct ioam6_trace_hdr *trace, + __u32 trace_type, __u8 trace_size, __u16 ioam_ns) { - if (__be16_to_cpu(ioam6h->namespace_id) != ioam_ns || - __be32_to_cpu(ioam6h->type_be32) != (trace_type << 8)) + if (__be16_to_cpu(trace->namespace_id) != ioam_ns || + __be32_to_cpu(trace->type_be32) != (trace_type << 8)) return 1; switch (tid) { case TEST_OUT_UNDEF_NS: case TEST_IN_UNDEF_NS: - return ioam6h->overflow || - ioam6h->nodelen != 1 || - ioam6h->remlen != 1; + case TEST_IN_DISABLED: + return trace->overflow == 1 || + trace->nodelen != 1 || + trace->remlen != 1; case TEST_OUT_NO_ROOM: case TEST_IN_NO_ROOM: case TEST_IN_OFLAG: - return !ioam6h->overflow || - ioam6h->nodelen != 2 || - ioam6h->remlen != 1; + return trace->overflow == 0 || + trace->nodelen != 2 || + trace->remlen != 1; + + case TEST_OUT_NO_ROOM_OSS: + return trace->overflow == 0 || + trace->nodelen != 0 || + trace->remlen != 1; + + case TEST_IN_NO_ROOM_OSS: + case TEST_OUT_BIT22: + case TEST_IN_BIT22: + return trace->overflow == 1 || + trace->nodelen != 0 || + trace->remlen != 0; case TEST_OUT_BIT0: case TEST_IN_BIT0: @@ -164,9 +293,9 @@ static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h, case TEST_IN_BIT7: case TEST_OUT_BIT11: case TEST_IN_BIT11: - return ioam6h->overflow || - ioam6h->nodelen != 1 || - ioam6h->remlen; + return trace->overflow == 1 || + trace->nodelen != 1 || + trace->remlen != 0; case TEST_OUT_BIT8: case TEST_IN_BIT8: @@ -174,22 +303,145 @@ static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h, case TEST_IN_BIT9: case TEST_OUT_BIT10: case TEST_IN_BIT10: - return ioam6h->overflow || - ioam6h->nodelen != 2 || - ioam6h->remlen; - - case TEST_OUT_BIT22: - case TEST_IN_BIT22: - return ioam6h->overflow || - ioam6h->nodelen || - ioam6h->remlen; + return trace->overflow == 1 || + trace->nodelen != 2 || + trace->remlen != 0; + + case TEST_OUT_SIZE4: + case TEST_OUT_SIZE8: + case TEST_OUT_SIZE12: + case TEST_OUT_SIZE16: + case TEST_OUT_SIZE20: + case TEST_OUT_SIZE24: + case TEST_OUT_SIZE28: + case TEST_OUT_SIZE32: + case TEST_OUT_SIZE36: + case TEST_OUT_SIZE40: + case TEST_OUT_SIZE44: + case TEST_OUT_SIZE48: + case TEST_OUT_SIZE52: + case TEST_OUT_SIZE56: + case TEST_OUT_SIZE60: + case TEST_OUT_SIZE64: + case TEST_OUT_SIZE68: + case TEST_OUT_SIZE72: + case TEST_OUT_SIZE76: + case TEST_OUT_SIZE80: + case TEST_OUT_SIZE84: + case TEST_OUT_SIZE88: + case TEST_OUT_SIZE92: + case TEST_OUT_SIZE96: + case TEST_OUT_SIZE100: + case TEST_OUT_SIZE104: + case TEST_OUT_SIZE108: + case TEST_OUT_SIZE112: + case TEST_OUT_SIZE116: + case TEST_OUT_SIZE120: + case TEST_OUT_SIZE124: + case TEST_OUT_SIZE128: + case TEST_OUT_SIZE132: + case TEST_OUT_SIZE136: + case TEST_OUT_SIZE140: + case TEST_OUT_SIZE144: + case TEST_OUT_SIZE148: + case TEST_OUT_SIZE152: + case TEST_OUT_SIZE156: + case TEST_OUT_SIZE160: + case TEST_OUT_SIZE164: + case TEST_OUT_SIZE168: + case TEST_OUT_SIZE172: + case TEST_OUT_SIZE176: + case TEST_OUT_SIZE180: + case TEST_OUT_SIZE184: + case TEST_OUT_SIZE188: + case TEST_OUT_SIZE192: + case TEST_OUT_SIZE196: + case TEST_OUT_SIZE200: + case TEST_OUT_SIZE204: + case TEST_OUT_SIZE208: + case TEST_OUT_SIZE212: + case TEST_OUT_SIZE216: + case TEST_OUT_SIZE220: + case TEST_OUT_SIZE224: + case TEST_OUT_SIZE228: + case TEST_OUT_SIZE232: + case TEST_OUT_SIZE236: + case TEST_OUT_SIZE240: + case TEST_OUT_SIZE244: + return trace->overflow == 1 || + trace->nodelen != 1 || + trace->remlen != trace_size / 4; + + case TEST_IN_SIZE4: + case TEST_IN_SIZE8: + case TEST_IN_SIZE12: + case TEST_IN_SIZE16: + case TEST_IN_SIZE20: + case TEST_IN_SIZE24: + case TEST_IN_SIZE28: + case TEST_IN_SIZE32: + case TEST_IN_SIZE36: + case TEST_IN_SIZE40: + case TEST_IN_SIZE44: + case TEST_IN_SIZE48: + case TEST_IN_SIZE52: + case TEST_IN_SIZE56: + case TEST_IN_SIZE60: + case TEST_IN_SIZE64: + case TEST_IN_SIZE68: + case TEST_IN_SIZE72: + case TEST_IN_SIZE76: + case TEST_IN_SIZE80: + case TEST_IN_SIZE84: + case TEST_IN_SIZE88: + case TEST_IN_SIZE92: + case TEST_IN_SIZE96: + case TEST_IN_SIZE100: + case TEST_IN_SIZE104: + case TEST_IN_SIZE108: + case TEST_IN_SIZE112: + case TEST_IN_SIZE116: + case TEST_IN_SIZE120: + case TEST_IN_SIZE124: + case TEST_IN_SIZE128: + case TEST_IN_SIZE132: + case TEST_IN_SIZE136: + case TEST_IN_SIZE140: + case TEST_IN_SIZE144: + case TEST_IN_SIZE148: + case TEST_IN_SIZE152: + case TEST_IN_SIZE156: + case TEST_IN_SIZE160: + case TEST_IN_SIZE164: + case TEST_IN_SIZE168: + case TEST_IN_SIZE172: + case TEST_IN_SIZE176: + case TEST_IN_SIZE180: + case TEST_IN_SIZE184: + case TEST_IN_SIZE188: + case TEST_IN_SIZE192: + case TEST_IN_SIZE196: + case TEST_IN_SIZE200: + case TEST_IN_SIZE204: + case TEST_IN_SIZE208: + case TEST_IN_SIZE212: + case TEST_IN_SIZE216: + case TEST_IN_SIZE220: + case TEST_IN_SIZE224: + case TEST_IN_SIZE228: + case TEST_IN_SIZE232: + case TEST_IN_SIZE236: + case TEST_IN_SIZE240: + case TEST_IN_SIZE244: + return trace->overflow == 1 || + trace->nodelen != 1 || + trace->remlen != (trace_size / 4) - trace->nodelen; case TEST_OUT_FULL_SUPP_TRACE: case TEST_IN_FULL_SUPP_TRACE: - case TEST_FWD_FULL_SUPP_TRACE: - return ioam6h->overflow || - ioam6h->nodelen != 15 || - ioam6h->remlen; + return trace->overflow == 1 || + trace->nodelen != 15 || + trace->remlen != 0; default: break; @@ -198,167 +450,137 @@ static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h, return 1; } -static int check_ioam6_data(__u8 **p, struct ioam6_trace_hdr *ioam6h, - const struct ioam_config cnf) +static int check_data(struct ioam6_trace_hdr *trace, __u8 trace_size, + const struct ioam_config cnf, bool is_output) { - unsigned int len; + unsigned int len, i; __u8 aligned; __u64 raw64; __u32 raw32; + __u8 *p; - if (ioam6h->type.bit0) { - raw32 = __be32_to_cpu(*((__u32 *)*p)); - if (cnf.hlim != (raw32 >> 24) || cnf.id != (raw32 & 0xffffff)) - return 1; - *p += sizeof(__u32); - } - - if (ioam6h->type.bit1) { - raw32 = __be32_to_cpu(*((__u32 *)*p)); - if (cnf.ingr_id != (raw32 >> 16) || - cnf.egr_id != (raw32 & 0xffff)) - return 1; - *p += sizeof(__u32); - } - - if (ioam6h->type.bit2) - *p += sizeof(__u32); - - if (ioam6h->type.bit3) - *p += sizeof(__u32); - - if (ioam6h->type.bit4) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) - return 1; - *p += sizeof(__u32); - } - - if (ioam6h->type.bit5) { - if (__be32_to_cpu(*((__u32 *)*p)) != cnf.ns_data) - return 1; - *p += sizeof(__u32); - } - - if (ioam6h->type.bit6) - *p += sizeof(__u32); + if (trace->type.bit12 | trace->type.bit13 | trace->type.bit14 | + trace->type.bit15 | trace->type.bit16 | trace->type.bit17 | + trace->type.bit18 | trace->type.bit19 | trace->type.bit20 | + trace->type.bit21 | trace->type.bit23) + return 1; - if (ioam6h->type.bit7) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + for (i = 0; i < trace->remlen * 4; i++) { + if (trace->data[i] != 0) return 1; - *p += sizeof(__u32); } - if (ioam6h->type.bit8) { - raw64 = __be64_to_cpu(*((__u64 *)*p)); - if (cnf.hlim != (raw64 >> 56) || - cnf.wide != (raw64 & 0xffffffffffffff)) - return 1; - *p += sizeof(__u64); - } + if (trace->remlen * 4 == trace_size) + return 0; - if (ioam6h->type.bit9) { - if (__be32_to_cpu(*((__u32 *)*p)) != cnf.ingr_wide) - return 1; - *p += sizeof(__u32); + p = trace->data + trace->remlen * 4; - if (__be32_to_cpu(*((__u32 *)*p)) != cnf.egr_wide) + if (trace->type.bit0) { + raw32 = __be32_to_cpu(*((__u32 *)p)); + if (cnf.hlim != (raw32 >> 24) || cnf.id != (raw32 & 0xffffff)) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit10) { - if (__be64_to_cpu(*((__u64 *)*p)) != cnf.ns_wide) + if (trace->type.bit1) { + raw32 = __be32_to_cpu(*((__u32 *)p)); + if (cnf.ingr_id != (raw32 >> 16) || + cnf.egr_id != (raw32 & 0xffff)) return 1; - *p += sizeof(__u64); + p += sizeof(__u32); } - if (ioam6h->type.bit11) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit2) { + raw32 = __be32_to_cpu(*((__u32 *)p)); + if ((is_output && raw32 != 0xffffffff) || + (!is_output && (raw32 == 0 || raw32 == 0xffffffff))) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit12) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit3) { + raw32 = __be32_to_cpu(*((__u32 *)p)); + if ((is_output && raw32 != 0xffffffff) || + (!is_output && (raw32 == 0 || raw32 == 0xffffffff))) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit13) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit4) { + if (__be32_to_cpu(*((__u32 *)p)) != 0xffffffff) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit14) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit5) { + if (__be32_to_cpu(*((__u32 *)p)) != cnf.ns_data) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit15) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit6) { + if (__be32_to_cpu(*((__u32 *)p)) == 0xffffffff) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit16) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit7) { + if (__be32_to_cpu(*((__u32 *)p)) != 0xffffffff) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit17) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit8) { + raw64 = __be64_to_cpu(*((__u64 *)p)); + if (cnf.hlim != (raw64 >> 56) || + cnf.wide != (raw64 & 0xffffffffffffff)) return 1; - *p += sizeof(__u32); + p += sizeof(__u64); } - if (ioam6h->type.bit18) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit9) { + if (__be32_to_cpu(*((__u32 *)p)) != cnf.ingr_wide) return 1; - *p += sizeof(__u32); - } + p += sizeof(__u32); - if (ioam6h->type.bit19) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (__be32_to_cpu(*((__u32 *)p)) != cnf.egr_wide) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit20) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit10) { + if (__be64_to_cpu(*((__u64 *)p)) != cnf.ns_wide) return 1; - *p += sizeof(__u32); + p += sizeof(__u64); } - if (ioam6h->type.bit21) { - if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) + if (trace->type.bit11) { + if (__be32_to_cpu(*((__u32 *)p)) != 0xffffffff) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); } - if (ioam6h->type.bit22) { + if (trace->type.bit22) { len = cnf.sc_data ? strlen(cnf.sc_data) : 0; aligned = cnf.sc_data ? __ALIGN_KERNEL(len, 4) : 0; - raw32 = __be32_to_cpu(*((__u32 *)*p)); + raw32 = __be32_to_cpu(*((__u32 *)p)); if (aligned != (raw32 >> 24) * 4 || cnf.sc_id != (raw32 & 0xffffff)) return 1; - *p += sizeof(__u32); + p += sizeof(__u32); if (cnf.sc_data) { - if (strncmp((char *)*p, cnf.sc_data, len)) + if (strncmp((char *)p, cnf.sc_data, len)) return 1; - *p += len; + p += len; aligned -= len; while (aligned--) { - if (**p != '\0') + if (*p != '\0') return 1; - *p += sizeof(__u8); + p += sizeof(__u8); } } } @@ -366,151 +588,351 @@ static int check_ioam6_data(__u8 **p, struct ioam6_trace_hdr *ioam6h, return 0; } -static int check_ioam_header_and_data(int tid, struct ioam6_trace_hdr *ioam6h, - __u32 trace_type, __u16 ioam_ns) +static int check_ioam_trace(int tid, struct ioam6_trace_hdr *trace, + __u32 trace_type, __u8 trace_size, __u16 ioam_ns) { - __u8 *p; - - if (check_ioam_header(tid, ioam6h, trace_type, ioam_ns)) + if (check_header(tid, trace, trace_type, trace_size, ioam_ns)) return 1; - p = ioam6h->data + ioam6h->remlen * 4; - - switch (tid) { - case TEST_OUT_BIT0: - case TEST_OUT_BIT1: - case TEST_OUT_BIT2: - case TEST_OUT_BIT3: - case TEST_OUT_BIT4: - case TEST_OUT_BIT5: - case TEST_OUT_BIT6: - case TEST_OUT_BIT7: - case TEST_OUT_BIT8: - case TEST_OUT_BIT9: - case TEST_OUT_BIT10: - case TEST_OUT_BIT11: - case TEST_OUT_BIT22: - case TEST_OUT_FULL_SUPP_TRACE: - return check_ioam6_data(&p, ioam6h, node1); - - case TEST_IN_BIT0: - case TEST_IN_BIT1: - case TEST_IN_BIT2: - case TEST_IN_BIT3: - case TEST_IN_BIT4: - case TEST_IN_BIT5: - case TEST_IN_BIT6: - case TEST_IN_BIT7: - case TEST_IN_BIT8: - case TEST_IN_BIT9: - case TEST_IN_BIT10: - case TEST_IN_BIT11: - case TEST_IN_BIT22: - case TEST_IN_FULL_SUPP_TRACE: - { - __u32 tmp32 = node2.egr_wide; - __u16 tmp16 = node2.egr_id; - int res; - - node2.egr_id = 0xffff; - node2.egr_wide = 0xffffffff; + if (tid > __TEST_OUT_MIN && tid < __TEST_OUT_MAX) + return check_data(trace, trace_size, node1, true); - res = check_ioam6_data(&p, ioam6h, node2); - - node2.egr_id = tmp16; - node2.egr_wide = tmp32; - - return res; - } - - case TEST_FWD_FULL_SUPP_TRACE: - if (check_ioam6_data(&p, ioam6h, node3)) - return 1; - if (check_ioam6_data(&p, ioam6h, node2)) - return 1; - return check_ioam6_data(&p, ioam6h, node1); - - default: - break; - } + if (tid > __TEST_IN_MIN && tid < __TEST_IN_MAX) + return check_data(trace, trace_size, node2, false); return 1; } static int str2id(const char *tname) { - if (!strcmp("out_undef_ns", tname)) + if (!strcmp("output_undef_ns", tname)) return TEST_OUT_UNDEF_NS; - if (!strcmp("out_no_room", tname)) + if (!strcmp("output_no_room", tname)) return TEST_OUT_NO_ROOM; - if (!strcmp("out_bit0", tname)) + if (!strcmp("output_no_room_oss", tname)) + return TEST_OUT_NO_ROOM_OSS; + if (!strcmp("output_bit0", tname)) return TEST_OUT_BIT0; - if (!strcmp("out_bit1", tname)) + if (!strcmp("output_bit1", tname)) return TEST_OUT_BIT1; - if (!strcmp("out_bit2", tname)) + if (!strcmp("output_bit2", tname)) return TEST_OUT_BIT2; - if (!strcmp("out_bit3", tname)) + if (!strcmp("output_bit3", tname)) return TEST_OUT_BIT3; - if (!strcmp("out_bit4", tname)) + if (!strcmp("output_bit4", tname)) return TEST_OUT_BIT4; - if (!strcmp("out_bit5", tname)) + if (!strcmp("output_bit5", tname)) return TEST_OUT_BIT5; - if (!strcmp("out_bit6", tname)) + if (!strcmp("output_bit6", tname)) return TEST_OUT_BIT6; - if (!strcmp("out_bit7", tname)) + if (!strcmp("output_bit7", tname)) return TEST_OUT_BIT7; - if (!strcmp("out_bit8", tname)) + if (!strcmp("output_bit8", tname)) return TEST_OUT_BIT8; - if (!strcmp("out_bit9", tname)) + if (!strcmp("output_bit9", tname)) return TEST_OUT_BIT9; - if (!strcmp("out_bit10", tname)) + if (!strcmp("output_bit10", tname)) return TEST_OUT_BIT10; - if (!strcmp("out_bit11", tname)) + if (!strcmp("output_bit11", tname)) return TEST_OUT_BIT11; - if (!strcmp("out_bit22", tname)) + if (!strcmp("output_bit22", tname)) return TEST_OUT_BIT22; - if (!strcmp("out_full_supp_trace", tname)) + if (!strcmp("output_size4", tname)) + return TEST_OUT_SIZE4; + if (!strcmp("output_size8", tname)) + return TEST_OUT_SIZE8; + if (!strcmp("output_size12", tname)) + return TEST_OUT_SIZE12; + if (!strcmp("output_size16", tname)) + return TEST_OUT_SIZE16; + if (!strcmp("output_size20", tname)) + return TEST_OUT_SIZE20; + if (!strcmp("output_size24", tname)) + return TEST_OUT_SIZE24; + if (!strcmp("output_size28", tname)) + return TEST_OUT_SIZE28; + if (!strcmp("output_size32", tname)) + return TEST_OUT_SIZE32; + if (!strcmp("output_size36", tname)) + return TEST_OUT_SIZE36; + if (!strcmp("output_size40", tname)) + return TEST_OUT_SIZE40; + if (!strcmp("output_size44", tname)) + return TEST_OUT_SIZE44; + if (!strcmp("output_size48", tname)) + return TEST_OUT_SIZE48; + if (!strcmp("output_size52", tname)) + return TEST_OUT_SIZE52; + if (!strcmp("output_size56", tname)) + return TEST_OUT_SIZE56; + if (!strcmp("output_size60", tname)) + return TEST_OUT_SIZE60; + if (!strcmp("output_size64", tname)) + return TEST_OUT_SIZE64; + if (!strcmp("output_size68", tname)) + return TEST_OUT_SIZE68; + if (!strcmp("output_size72", tname)) + return TEST_OUT_SIZE72; + if (!strcmp("output_size76", tname)) + return TEST_OUT_SIZE76; + if (!strcmp("output_size80", tname)) + return TEST_OUT_SIZE80; + if (!strcmp("output_size84", tname)) + return TEST_OUT_SIZE84; + if (!strcmp("output_size88", tname)) + return TEST_OUT_SIZE88; + if (!strcmp("output_size92", tname)) + return TEST_OUT_SIZE92; + if (!strcmp("output_size96", tname)) + return TEST_OUT_SIZE96; + if (!strcmp("output_size100", tname)) + return TEST_OUT_SIZE100; + if (!strcmp("output_size104", tname)) + return TEST_OUT_SIZE104; + if (!strcmp("output_size108", tname)) + return TEST_OUT_SIZE108; + if (!strcmp("output_size112", tname)) + return TEST_OUT_SIZE112; + if (!strcmp("output_size116", tname)) + return TEST_OUT_SIZE116; + if (!strcmp("output_size120", tname)) + return TEST_OUT_SIZE120; + if (!strcmp("output_size124", tname)) + return TEST_OUT_SIZE124; + if (!strcmp("output_size128", tname)) + return TEST_OUT_SIZE128; + if (!strcmp("output_size132", tname)) + return TEST_OUT_SIZE132; + if (!strcmp("output_size136", tname)) + return TEST_OUT_SIZE136; + if (!strcmp("output_size140", tname)) + return TEST_OUT_SIZE140; + if (!strcmp("output_size144", tname)) + return TEST_OUT_SIZE144; + if (!strcmp("output_size148", tname)) + return TEST_OUT_SIZE148; + if (!strcmp("output_size152", tname)) + return TEST_OUT_SIZE152; + if (!strcmp("output_size156", tname)) + return TEST_OUT_SIZE156; + if (!strcmp("output_size160", tname)) + return TEST_OUT_SIZE160; + if (!strcmp("output_size164", tname)) + return TEST_OUT_SIZE164; + if (!strcmp("output_size168", tname)) + return TEST_OUT_SIZE168; + if (!strcmp("output_size172", tname)) + return TEST_OUT_SIZE172; + if (!strcmp("output_size176", tname)) + return TEST_OUT_SIZE176; + if (!strcmp("output_size180", tname)) + return TEST_OUT_SIZE180; + if (!strcmp("output_size184", tname)) + return TEST_OUT_SIZE184; + if (!strcmp("output_size188", tname)) + return TEST_OUT_SIZE188; + if (!strcmp("output_size192", tname)) + return TEST_OUT_SIZE192; + if (!strcmp("output_size196", tname)) + return TEST_OUT_SIZE196; + if (!strcmp("output_size200", tname)) + return TEST_OUT_SIZE200; + if (!strcmp("output_size204", tname)) + return TEST_OUT_SIZE204; + if (!strcmp("output_size208", tname)) + return TEST_OUT_SIZE208; + if (!strcmp("output_size212", tname)) + return TEST_OUT_SIZE212; + if (!strcmp("output_size216", tname)) + return TEST_OUT_SIZE216; + if (!strcmp("output_size220", tname)) + return TEST_OUT_SIZE220; + if (!strcmp("output_size224", tname)) + return TEST_OUT_SIZE224; + if (!strcmp("output_size228", tname)) + return TEST_OUT_SIZE228; + if (!strcmp("output_size232", tname)) + return TEST_OUT_SIZE232; + if (!strcmp("output_size236", tname)) + return TEST_OUT_SIZE236; + if (!strcmp("output_size240", tname)) + return TEST_OUT_SIZE240; + if (!strcmp("output_size244", tname)) + return TEST_OUT_SIZE244; + if (!strcmp("output_full_supp_trace", tname)) return TEST_OUT_FULL_SUPP_TRACE; - if (!strcmp("in_undef_ns", tname)) + if (!strcmp("input_undef_ns", tname)) return TEST_IN_UNDEF_NS; - if (!strcmp("in_no_room", tname)) + if (!strcmp("input_no_room", tname)) return TEST_IN_NO_ROOM; - if (!strcmp("in_oflag", tname)) + if (!strcmp("input_no_room_oss", tname)) + return TEST_IN_NO_ROOM_OSS; + if (!strcmp("input_disabled", tname)) + return TEST_IN_DISABLED; + if (!strcmp("input_oflag", tname)) return TEST_IN_OFLAG; - if (!strcmp("in_bit0", tname)) + if (!strcmp("input_bit0", tname)) return TEST_IN_BIT0; - if (!strcmp("in_bit1", tname)) + if (!strcmp("input_bit1", tname)) return TEST_IN_BIT1; - if (!strcmp("in_bit2", tname)) + if (!strcmp("input_bit2", tname)) return TEST_IN_BIT2; - if (!strcmp("in_bit3", tname)) + if (!strcmp("input_bit3", tname)) return TEST_IN_BIT3; - if (!strcmp("in_bit4", tname)) + if (!strcmp("input_bit4", tname)) return TEST_IN_BIT4; - if (!strcmp("in_bit5", tname)) + if (!strcmp("input_bit5", tname)) return TEST_IN_BIT5; - if (!strcmp("in_bit6", tname)) + if (!strcmp("input_bit6", tname)) return TEST_IN_BIT6; - if (!strcmp("in_bit7", tname)) + if (!strcmp("input_bit7", tname)) return TEST_IN_BIT7; - if (!strcmp("in_bit8", tname)) + if (!strcmp("input_bit8", tname)) return TEST_IN_BIT8; - if (!strcmp("in_bit9", tname)) + if (!strcmp("input_bit9", tname)) return TEST_IN_BIT9; - if (!strcmp("in_bit10", tname)) + if (!strcmp("input_bit10", tname)) return TEST_IN_BIT10; - if (!strcmp("in_bit11", tname)) + if (!strcmp("input_bit11", tname)) return TEST_IN_BIT11; - if (!strcmp("in_bit22", tname)) + if (!strcmp("input_bit22", tname)) return TEST_IN_BIT22; - if (!strcmp("in_full_supp_trace", tname)) + if (!strcmp("input_size4", tname)) + return TEST_IN_SIZE4; + if (!strcmp("input_size8", tname)) + return TEST_IN_SIZE8; + if (!strcmp("input_size12", tname)) + return TEST_IN_SIZE12; + if (!strcmp("input_size16", tname)) + return TEST_IN_SIZE16; + if (!strcmp("input_size20", tname)) + return TEST_IN_SIZE20; + if (!strcmp("input_size24", tname)) + return TEST_IN_SIZE24; + if (!strcmp("input_size28", tname)) + return TEST_IN_SIZE28; + if (!strcmp("input_size32", tname)) + return TEST_IN_SIZE32; + if (!strcmp("input_size36", tname)) + return TEST_IN_SIZE36; + if (!strcmp("input_size40", tname)) + return TEST_IN_SIZE40; + if (!strcmp("input_size44", tname)) + return TEST_IN_SIZE44; + if (!strcmp("input_size48", tname)) + return TEST_IN_SIZE48; + if (!strcmp("input_size52", tname)) + return TEST_IN_SIZE52; + if (!strcmp("input_size56", tname)) + return TEST_IN_SIZE56; + if (!strcmp("input_size60", tname)) + return TEST_IN_SIZE60; + if (!strcmp("input_size64", tname)) + return TEST_IN_SIZE64; + if (!strcmp("input_size68", tname)) + return TEST_IN_SIZE68; + if (!strcmp("input_size72", tname)) + return TEST_IN_SIZE72; + if (!strcmp("input_size76", tname)) + return TEST_IN_SIZE76; + if (!strcmp("input_size80", tname)) + return TEST_IN_SIZE80; + if (!strcmp("input_size84", tname)) + return TEST_IN_SIZE84; + if (!strcmp("input_size88", tname)) + return TEST_IN_SIZE88; + if (!strcmp("input_size92", tname)) + return TEST_IN_SIZE92; + if (!strcmp("input_size96", tname)) + return TEST_IN_SIZE96; + if (!strcmp("input_size100", tname)) + return TEST_IN_SIZE100; + if (!strcmp("input_size104", tname)) + return TEST_IN_SIZE104; + if (!strcmp("input_size108", tname)) + return TEST_IN_SIZE108; + if (!strcmp("input_size112", tname)) + return TEST_IN_SIZE112; + if (!strcmp("input_size116", tname)) + return TEST_IN_SIZE116; + if (!strcmp("input_size120", tname)) + return TEST_IN_SIZE120; + if (!strcmp("input_size124", tname)) + return TEST_IN_SIZE124; + if (!strcmp("input_size128", tname)) + return TEST_IN_SIZE128; + if (!strcmp("input_size132", tname)) + return TEST_IN_SIZE132; + if (!strcmp("input_size136", tname)) + return TEST_IN_SIZE136; + if (!strcmp("input_size140", tname)) + return TEST_IN_SIZE140; + if (!strcmp("input_size144", tname)) + return TEST_IN_SIZE144; + if (!strcmp("input_size148", tname)) + return TEST_IN_SIZE148; + if (!strcmp("input_size152", tname)) + return TEST_IN_SIZE152; + if (!strcmp("input_size156", tname)) + return TEST_IN_SIZE156; + if (!strcmp("input_size160", tname)) + return TEST_IN_SIZE160; + if (!strcmp("input_size164", tname)) + return TEST_IN_SIZE164; + if (!strcmp("input_size168", tname)) + return TEST_IN_SIZE168; + if (!strcmp("input_size172", tname)) + return TEST_IN_SIZE172; + if (!strcmp("input_size176", tname)) + return TEST_IN_SIZE176; + if (!strcmp("input_size180", tname)) + return TEST_IN_SIZE180; + if (!strcmp("input_size184", tname)) + return TEST_IN_SIZE184; + if (!strcmp("input_size188", tname)) + return TEST_IN_SIZE188; + if (!strcmp("input_size192", tname)) + return TEST_IN_SIZE192; + if (!strcmp("input_size196", tname)) + return TEST_IN_SIZE196; + if (!strcmp("input_size200", tname)) + return TEST_IN_SIZE200; + if (!strcmp("input_size204", tname)) + return TEST_IN_SIZE204; + if (!strcmp("input_size208", tname)) + return TEST_IN_SIZE208; + if (!strcmp("input_size212", tname)) + return TEST_IN_SIZE212; + if (!strcmp("input_size216", tname)) + return TEST_IN_SIZE216; + if (!strcmp("input_size220", tname)) + return TEST_IN_SIZE220; + if (!strcmp("input_size224", tname)) + return TEST_IN_SIZE224; + if (!strcmp("input_size228", tname)) + return TEST_IN_SIZE228; + if (!strcmp("input_size232", tname)) + return TEST_IN_SIZE232; + if (!strcmp("input_size236", tname)) + return TEST_IN_SIZE236; + if (!strcmp("input_size240", tname)) + return TEST_IN_SIZE240; + if (!strcmp("input_size244", tname)) + return TEST_IN_SIZE244; + if (!strcmp("input_full_supp_trace", tname)) return TEST_IN_FULL_SUPP_TRACE; - if (!strcmp("fwd_full_supp_trace", tname)) - return TEST_FWD_FULL_SUPP_TRACE; return -1; } +static int ipv6_addr_equal(const struct in6_addr *a1, const struct in6_addr *a2) +{ + return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | + (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | + (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | + (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; +} + static int get_u32(__u32 *val, const char *arg, int base) { unsigned long res; @@ -555,119 +977,124 @@ static int get_u16(__u16 *val, const char *arg, int base) return 0; } -static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = { - [TEST_OUT_UNDEF_NS] = check_ioam_header, - [TEST_OUT_NO_ROOM] = check_ioam_header, - [TEST_OUT_BIT0] = check_ioam_header_and_data, - [TEST_OUT_BIT1] = check_ioam_header_and_data, - [TEST_OUT_BIT2] = check_ioam_header_and_data, - [TEST_OUT_BIT3] = check_ioam_header_and_data, - [TEST_OUT_BIT4] = check_ioam_header_and_data, - [TEST_OUT_BIT5] = check_ioam_header_and_data, - [TEST_OUT_BIT6] = check_ioam_header_and_data, - [TEST_OUT_BIT7] = check_ioam_header_and_data, - [TEST_OUT_BIT8] = check_ioam_header_and_data, - [TEST_OUT_BIT9] = check_ioam_header_and_data, - [TEST_OUT_BIT10] = check_ioam_header_and_data, - [TEST_OUT_BIT11] = check_ioam_header_and_data, - [TEST_OUT_BIT22] = check_ioam_header_and_data, - [TEST_OUT_FULL_SUPP_TRACE] = check_ioam_header_and_data, - [TEST_IN_UNDEF_NS] = check_ioam_header, - [TEST_IN_NO_ROOM] = check_ioam_header, - [TEST_IN_OFLAG] = check_ioam_header, - [TEST_IN_BIT0] = check_ioam_header_and_data, - [TEST_IN_BIT1] = check_ioam_header_and_data, - [TEST_IN_BIT2] = check_ioam_header_and_data, - [TEST_IN_BIT3] = check_ioam_header_and_data, - [TEST_IN_BIT4] = check_ioam_header_and_data, - [TEST_IN_BIT5] = check_ioam_header_and_data, - [TEST_IN_BIT6] = check_ioam_header_and_data, - [TEST_IN_BIT7] = check_ioam_header_and_data, - [TEST_IN_BIT8] = check_ioam_header_and_data, - [TEST_IN_BIT9] = check_ioam_header_and_data, - [TEST_IN_BIT10] = check_ioam_header_and_data, - [TEST_IN_BIT11] = check_ioam_header_and_data, - [TEST_IN_BIT22] = check_ioam_header_and_data, - [TEST_IN_FULL_SUPP_TRACE] = check_ioam_header_and_data, - [TEST_FWD_FULL_SUPP_TRACE] = check_ioam_header_and_data, -}; +static int get_u8(__u8 *val, const char *arg, int base) +{ + unsigned long res; + char *ptr; + + if (!arg || !*arg) + return -1; + res = strtoul(arg, &ptr, base); + + if (!ptr || ptr == arg || *ptr) + return -1; + + if (res == ULONG_MAX && errno == ERANGE) + return -1; + + if (res > 0xFFUL) + return -1; + + *val = res; + return 0; +} int main(int argc, char **argv) { - int fd, size, hoplen, tid, ret = 1, on = 1; - struct ioam6_hdr *opt; - struct cmsghdr *cmsg; - struct msghdr msg; - struct iovec iov; - __u8 buffer[512]; + __u8 buffer[512], *ptr, nexthdr, tr_size; + struct ioam6_trace_hdr *trace; + unsigned int hoplen, ret = 1; + struct ipv6_hopopt_hdr *hbh; + int fd, size, testname_id; + struct in6_addr src, dst; + struct ioam6_hdr *ioam6; + struct timeval timeout; + struct ipv6hdr *ipv6; __u32 tr_type; __u16 ioam_ns; - __u8 *ptr; - if (argc != 5) + if (argc != 9) goto out; - tid = str2id(argv[1]); - if (tid < 0 || !func[tid]) - goto out; + testname_id = str2id(argv[2]); - if (get_u32(&tr_type, argv[2], 16) || - get_u16(&ioam_ns, argv[3], 0)) + if (testname_id < 0 || + inet_pton(AF_INET6, argv[3], &src) != 1 || + inet_pton(AF_INET6, argv[4], &dst) != 1 || + get_u32(&tr_type, argv[5], 16) || + get_u8(&tr_size, argv[6], 0) || + get_u16(&ioam_ns, argv[7], 0)) goto out; - fd = socket(PF_INET6, SOCK_RAW, - !strcmp(argv[4], "encap") ? IPPROTO_IPV6 : IPPROTO_ICMPV6); + nexthdr = (!strcmp(argv[8], "encap") ? IPPROTO_IPV6 : IPPROTO_ICMPV6); + + hoplen = sizeof(*hbh); + hoplen += 2; // 2-byte padding for alignment + hoplen += sizeof(*ioam6); // IOAM option header + hoplen += sizeof(*trace); // IOAM trace header + hoplen += tr_size; // IOAM trace size + hoplen += (tr_size % 8); // optional padding + + fd = socket(AF_PACKET, SOCK_DGRAM, __cpu_to_be16(ETH_P_IPV6)); if (fd < 0) goto out; - setsockopt(fd, IPPROTO_IPV6, IPV6_RECVHOPOPTS, &on, sizeof(on)); + if (setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, + argv[1], strlen(argv[1]))) + goto close; - iov.iov_len = 1; - iov.iov_base = malloc(CMSG_SPACE(sizeof(buffer))); - if (!iov.iov_base) + timeout.tv_sec = 1; + timeout.tv_usec = 0; + if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, + (const char *)&timeout, sizeof(timeout))) goto close; recv: - memset(&msg, 0, sizeof(msg)); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - msg.msg_control = buffer; - msg.msg_controllen = CMSG_SPACE(sizeof(buffer)); - - size = recvmsg(fd, &msg, 0); + size = recv(fd, buffer, sizeof(buffer), 0); if (size <= 0) goto close; - for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { - if (cmsg->cmsg_level != IPPROTO_IPV6 || - cmsg->cmsg_type != IPV6_HOPOPTS || - cmsg->cmsg_len < sizeof(struct ipv6_hopopt_hdr)) - continue; + ipv6 = (struct ipv6hdr *)buffer; + + /* Skip packets that do not have the expected src/dst address or that + * do not have a Hop-by-hop. + */ + if (!ipv6_addr_equal(&ipv6->saddr, &src) || + !ipv6_addr_equal(&ipv6->daddr, &dst) || + ipv6->nexthdr != IPPROTO_HOPOPTS) + goto recv; + + /* Check Hbh's Next Header and Size. */ + hbh = (struct ipv6_hopopt_hdr *)(buffer + sizeof(*ipv6)); + if (hbh->nexthdr != nexthdr || hbh->hdrlen != (hoplen >> 3) - 1) + goto close; - ptr = (__u8 *)CMSG_DATA(cmsg); + /* Check we have a 2-byte padding for alignment. */ + ptr = (__u8 *)hbh + sizeof(*hbh); + if (ptr[0] != IPV6_TLV_PADN && ptr[1] != 0) + goto close; - hoplen = (ptr[1] + 1) << 3; - ptr += sizeof(struct ipv6_hopopt_hdr); + /* Check we now have the IOAM option. */ + ptr += 2; + if (ptr[0] != IPV6_TLV_IOAM) + goto close; - while (hoplen > 0) { - opt = (struct ioam6_hdr *)ptr; + /* Check its size and the IOAM option type. */ + ioam6 = (struct ioam6_hdr *)ptr; + if (ioam6->opt_len != sizeof(*ioam6) - 2 + sizeof(*trace) + tr_size || + ioam6->type != IOAM6_TYPE_PREALLOC) + goto close; - if (opt->opt_type == IPV6_TLV_IOAM && - opt->type == IOAM6_TYPE_PREALLOC) { - ptr += sizeof(*opt); - ret = func[tid](tid, - (struct ioam6_trace_hdr *)ptr, - tr_type, ioam_ns); - goto close; - } + trace = (struct ioam6_trace_hdr *)(ptr + sizeof(*ioam6)); - ptr += opt->opt_len + 2; - hoplen -= opt->opt_len + 2; - } - } + /* Check the trailing 4-byte padding (potentially). */ + ptr = (__u8 *)trace + sizeof(*trace) + tr_size; + if (tr_size % 8 && ptr[0] != IPV6_TLV_PADN && ptr[1] != 2 && + ptr[2] != 0 && ptr[3] != 0) + goto close; - goto recv; + /* Check the IOAM header and data. */ + ret = check_ioam_trace(testname_id, trace, tr_type, tr_size, ioam_ns); close: - free(iov.iov_base); close(fd); out: return ret; diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index be8707bfb46e..c8991cc6bf28 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -1,6 +1,9 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +net_dir=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")") +source "$net_dir/lib/sh/defer.sh" + ############################################################################## # Defines diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile index 82c3264b115e..18b9443454a9 100644 --- a/tools/testing/selftests/net/lib/Makefile +++ b/tools/testing/selftests/net/lib/Makefile @@ -10,6 +10,6 @@ TEST_FILES += ../../../../net/ynl TEST_GEN_FILES += csum -TEST_INCLUDES := $(wildcard py/*.py) +TEST_INCLUDES := $(wildcard py/*.py sh/*.sh) include ../../lib.mk diff --git a/tools/testing/selftests/net/lib/csum.c b/tools/testing/selftests/net/lib/csum.c index e0a34e5e8dd5..27437590eeb5 100644 --- a/tools/testing/selftests/net/lib/csum.c +++ b/tools/testing/selftests/net/lib/csum.c @@ -675,22 +675,20 @@ static int recv_verify_packet_ipv6(void *nh, int len) { struct ipv6hdr *ip6h = nh; uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto; - uint16_t ip_len; + uint16_t payload_len; if (len < sizeof(*ip6h) || ip6h->nexthdr != proto) return -1; - ip_len = ntohs(ip6h->payload_len); - if (ip_len > len - sizeof(*ip6h)) + payload_len = ntohs(ip6h->payload_len); + if (payload_len > len - sizeof(*ip6h)) return -1; - len = ip_len; iph_addr_p = &ip6h->saddr; - if (proto == IPPROTO_TCP) - return recv_verify_packet_tcp(ip6h + 1, len); + return recv_verify_packet_tcp(ip6h + 1, payload_len); else - return recv_verify_packet_udp(ip6h + 1, len); + return recv_verify_packet_udp(ip6h + 1, payload_len); } /* return whether auxdata includes TP_STATUS_CSUM_VALID */ diff --git a/tools/testing/selftests/net/lib/py/__init__.py b/tools/testing/selftests/net/lib/py/__init__.py index b6d498d125fe..54d8f5eba810 100644 --- a/tools/testing/selftests/net/lib/py/__init__.py +++ b/tools/testing/selftests/net/lib/py/__init__.py @@ -6,3 +6,4 @@ from .netns import NetNS from .nsim import * from .utils import * from .ynl import NlError, YnlFamily, EthtoolFamily, NetdevFamily, RtnlFamily +from .ynl import NetshaperFamily diff --git a/tools/testing/selftests/net/lib/py/ynl.py b/tools/testing/selftests/net/lib/py/ynl.py index 1ace58370c06..a0d689d58c57 100644 --- a/tools/testing/selftests/net/lib/py/ynl.py +++ b/tools/testing/selftests/net/lib/py/ynl.py @@ -47,3 +47,8 @@ class NetdevFamily(YnlFamily): def __init__(self): super().__init__((SPEC_PATH / Path('netdev.yaml')).as_posix(), schema='') + +class NetshaperFamily(YnlFamily): + def __init__(self): + super().__init__((SPEC_PATH / Path('net_shaper.yaml')).as_posix(), + schema='') diff --git a/tools/testing/selftests/net/lib/sh/defer.sh b/tools/testing/selftests/net/lib/sh/defer.sh new file mode 100644 index 000000000000..082f5d38321b --- /dev/null +++ b/tools/testing/selftests/net/lib/sh/defer.sh @@ -0,0 +1,115 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# map[(scope_id,track,cleanup_id) -> cleanup_command] +# track={d=default | p=priority} +declare -A __DEFER__JOBS + +# map[(scope_id,track) -> # cleanup_commands] +declare -A __DEFER__NJOBS + +# scope_id of the topmost scope. +__DEFER__SCOPE_ID=0 + +__defer__ndefer_key() +{ + local track=$1; shift + + echo $__DEFER__SCOPE_ID,$track +} + +__defer__defer_key() +{ + local track=$1; shift + local defer_ix=$1; shift + + echo $__DEFER__SCOPE_ID,$track,$defer_ix +} + +__defer__ndefers() +{ + local track=$1; shift + + echo ${__DEFER__NJOBS[$(__defer__ndefer_key $track)]} +} + +__defer__run() +{ + local track=$1; shift + local defer_ix=$1; shift + local defer_key=$(__defer__defer_key $track $defer_ix) + + ${__DEFER__JOBS[$defer_key]} + unset __DEFER__JOBS[$defer_key] +} + +__defer__schedule() +{ + local track=$1; shift + local ndefers=$(__defer__ndefers $track) + local ndefers_key=$(__defer__ndefer_key $track) + local defer_key=$(__defer__defer_key $track $ndefers) + local defer="$@" + + __DEFER__JOBS[$defer_key]="$defer" + __DEFER__NJOBS[$ndefers_key]=$((ndefers + 1)) +} + +__defer__scope_wipe() +{ + __DEFER__NJOBS[$(__defer__ndefer_key d)]=0 + __DEFER__NJOBS[$(__defer__ndefer_key p)]=0 +} + +defer_scope_push() +{ + ((__DEFER__SCOPE_ID++)) + __defer__scope_wipe +} + +defer_scope_pop() +{ + local defer_ix + + for ((defer_ix=$(__defer__ndefers p); defer_ix-->0; )); do + __defer__run p $defer_ix + done + + for ((defer_ix=$(__defer__ndefers d); defer_ix-->0; )); do + __defer__run d $defer_ix + done + + __defer__scope_wipe + ((__DEFER__SCOPE_ID--)) +} + +defer() +{ + __defer__schedule d "$@" +} + +defer_prio() +{ + __defer__schedule p "$@" +} + +defer_scopes_cleanup() +{ + while ((__DEFER__SCOPE_ID >= 0)); do + defer_scope_pop + done +} + +in_defer_scope() +{ + local ret + + defer_scope_push + "$@" + ret=$? + defer_scope_pop + + return $ret +} + +__defer__scope_wipe diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 4f31e92ebd96..84c524357075 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -48,6 +48,7 @@ #include <string.h> #include <sys/mman.h> #include <sys/socket.h> +#include <sys/ioctl.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> @@ -59,6 +60,33 @@ static uint32_t cfg_max_num_members; +static void loopback_set_up_down(int state_up) +{ + struct ifreq ifreq = {}; + int fd, err; + + fd = socket(AF_PACKET, SOCK_RAW, 0); + if (fd < 0) { + perror("socket loopback"); + exit(1); + } + strcpy(ifreq.ifr_name, "lo"); + err = ioctl(fd, SIOCGIFFLAGS, &ifreq); + if (err) { + perror("SIOCGIFFLAGS"); + exit(1); + } + if (state_up != !!(ifreq.ifr_flags & IFF_UP)) { + ifreq.ifr_flags ^= IFF_UP; + err = ioctl(fd, SIOCSIFFLAGS, &ifreq); + if (err) { + perror("SIOCSIFFLAGS"); + exit(1); + } + } + close(fd); +} + /* Open a socket in a given fanout mode. * @return -1 if mode is bad, a valid socket otherwise */ static int sock_fanout_open(uint16_t typeflags, uint16_t group_id) @@ -251,6 +279,41 @@ static int sock_fanout_read(int fds[], char *rings[], const int expect[]) return 0; } +/* Test that creating/joining a fanout group fails for unbound socket without + * a specified protocol + */ +static void test_unbound_fanout(void) +{ + int val, fd0, fd1, err; + + fprintf(stderr, "test: unbound fanout\n"); + fd0 = socket(PF_PACKET, SOCK_RAW, 0); + if (fd0 < 0) { + perror("socket packet"); + exit(1); + } + /* Try to create a new fanout group. Should fail. */ + val = (PACKET_FANOUT_HASH << 16) | 1; + err = setsockopt(fd0, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val)); + if (!err) { + fprintf(stderr, "ERROR: unbound socket fanout create\n"); + exit(1); + } + fd1 = sock_fanout_open(PACKET_FANOUT_HASH, 1); + if (fd1 == -1) { + fprintf(stderr, "ERROR: failed to open HASH socket\n"); + exit(1); + } + /* Try to join an existing fanout group. Should fail. */ + err = setsockopt(fd0, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val)); + if (!err) { + fprintf(stderr, "ERROR: unbound socket fanout join\n"); + exit(1); + } + close(fd0); + close(fd1); +} + /* Test illegal mode + flag combination */ static void test_control_single(void) { @@ -264,17 +327,22 @@ static void test_control_single(void) } /* Test illegal group with different modes or flags */ -static void test_control_group(void) +static void test_control_group(int toggle) { int fds[2]; - fprintf(stderr, "test: control multiple sockets\n"); + if (toggle) + fprintf(stderr, "test: control multiple sockets with link down toggle\n"); + else + fprintf(stderr, "test: control multiple sockets\n"); fds[0] = sock_fanout_open(PACKET_FANOUT_HASH, 0); if (fds[0] == -1) { fprintf(stderr, "ERROR: failed to open HASH socket\n"); exit(1); } + if (toggle) + loopback_set_up_down(0); if (sock_fanout_open(PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG, 0) != -1) { fprintf(stderr, "ERROR: joined group with wrong flag defrag\n"); @@ -294,6 +362,8 @@ static void test_control_group(void) fprintf(stderr, "ERROR: failed to join group\n"); exit(1); } + if (toggle) + loopback_set_up_down(1); if (close(fds[1]) || close(fds[0])) { fprintf(stderr, "ERROR: closing sockets\n"); exit(1); @@ -488,8 +558,10 @@ int main(int argc, char **argv) const int expect_uniqueid[2][2] = { { 20, 20}, { 20, 20 } }; int port_off = 2, tries = 20, ret; + test_unbound_fanout(); test_control_single(); - test_control_group(); + test_control_group(0); + test_control_group(1); test_control_group_max_num_members(); test_unique_fanout_group_ids(); diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index bdf6f10d0558..87dce3efe31e 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -809,10 +809,10 @@ kci_test_ipsec_offload() # does driver have correct offload info run_cmd diff $sysfsf - << EOF SA count=2 tx=3 -sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000 +sa[0] tx ipaddr=$dstip sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 sa[0] key=0x34333231 38373635 32313039 36353433 -sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0 +sa[1] rx ipaddr=$srcip sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 sa[1] key=0x34333231 38373635 32313039 36353433 EOF diff --git a/tools/testing/selftests/net/tcp_ao/lib/aolib.h b/tools/testing/selftests/net/tcp_ao/lib/aolib.h index db44e77428dd..5db2f65cddc4 100644 --- a/tools/testing/selftests/net/tcp_ao/lib/aolib.h +++ b/tools/testing/selftests/net/tcp_ao/lib/aolib.h @@ -46,6 +46,7 @@ static inline char *test_snprintf(const char *fmt, va_list vargs) va_copy(tmp, vargs); n = vsnprintf(ret, size, fmt, tmp); + va_end(tmp); if (n < 0) return NULL; diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index f27a12d2a2c9..1a706d03bb6b 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -266,6 +266,25 @@ TEST_F(tls_basic, bad_cipher) EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1); } +TEST_F(tls_basic, recseq_wrap) +{ + struct tls_crypto_info_keys tls12; + char const *test_str = "test_read"; + int send_len = 10; + + if (self->notls) + SKIP(return, "no TLS support"); + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12); + memset(&tls12.aes128.rec_seq, 0xff, sizeof(tls12.aes128.rec_seq)); + + ASSERT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); + ASSERT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); + + EXPECT_EQ(send(self->fd, test_str, send_len, 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + FIXTURE(tls) { int fd, cfd; diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c index d626f22f9550..dae91eb97d69 100644 --- a/tools/testing/selftests/net/txtimestamp.c +++ b/tools/testing/selftests/net/txtimestamp.c @@ -77,6 +77,8 @@ static bool cfg_epollet; static bool cfg_do_listen; static uint16_t dest_port = 9000; static bool cfg_print_nsec; +static uint32_t ts_opt_id; +static bool cfg_use_cmsg_opt_id; static struct sockaddr_in daddr; static struct sockaddr_in6 daddr6; @@ -136,12 +138,13 @@ static void validate_key(int tskey, int tstype) /* compare key for each subsequent request * must only test for one type, the first one requested */ - if (saved_tskey == -1) + if (saved_tskey == -1 || cfg_use_cmsg_opt_id) saved_tskey_type = tstype; else if (saved_tskey_type != tstype) return; stepsize = cfg_proto == SOCK_STREAM ? cfg_payload_len : 1; + stepsize = cfg_use_cmsg_opt_id ? 0 : stepsize; if (tskey != saved_tskey + stepsize) { fprintf(stderr, "ERROR: key %d, expected %d\n", tskey, saved_tskey + stepsize); @@ -484,7 +487,7 @@ static void fill_header_udp(void *p, bool is_ipv4) static void do_test(int family, unsigned int report_opt) { - char control[CMSG_SPACE(sizeof(uint32_t))]; + char control[2 * CMSG_SPACE(sizeof(uint32_t))]; struct sockaddr_ll laddr; unsigned int sock_opt; struct cmsghdr *cmsg; @@ -624,18 +627,32 @@ static void do_test(int family, unsigned int report_opt) msg.msg_iov = &iov; msg.msg_iovlen = 1; - if (cfg_use_cmsg) { + if (cfg_use_cmsg || cfg_use_cmsg_opt_id) { memset(control, 0, sizeof(control)); msg.msg_control = control; - msg.msg_controllen = sizeof(control); + msg.msg_controllen = cfg_use_cmsg * CMSG_SPACE(sizeof(uint32_t)); + msg.msg_controllen += cfg_use_cmsg_opt_id * CMSG_SPACE(sizeof(uint32_t)); - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SO_TIMESTAMPING; - cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); + cmsg = NULL; + if (cfg_use_cmsg) { + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SO_TIMESTAMPING; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); + + *((uint32_t *)CMSG_DATA(cmsg)) = report_opt; + } + if (cfg_use_cmsg_opt_id) { + cmsg = cmsg ? CMSG_NXTHDR(&msg, cmsg) : CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_TS_OPT_ID; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); + + *((uint32_t *)CMSG_DATA(cmsg)) = ts_opt_id; + saved_tskey = ts_opt_id; + } - *((uint32_t *) CMSG_DATA(cmsg)) = report_opt; } val = sendmsg(fd, &msg, 0); @@ -685,6 +702,7 @@ static void __attribute__((noreturn)) usage(const char *filepath) " -L listen on hostname and port\n" " -n: set no-payload option\n" " -N: print timestamps and durations in nsec (instead of usec)\n" + " -o N: use SCM_TS_OPT_ID control message to provide N as tskey\n" " -p N: connect to port N\n" " -P: use PF_PACKET\n" " -r: use raw\n" @@ -705,7 +723,7 @@ static void parse_opt(int argc, char **argv) int c; while ((c = getopt(argc, argv, - "46bc:CeEFhIl:LnNp:PrRS:t:uv:V:x")) != -1) { + "46bc:CeEFhIl:LnNo:p:PrRS:t:uv:V:x")) != -1) { switch (c) { case '4': do_ipv6 = 0; @@ -746,6 +764,10 @@ static void parse_opt(int argc, char **argv) case 'N': cfg_print_nsec = true; break; + case 'o': + ts_opt_id = strtoul(optarg, NULL, 10); + cfg_use_cmsg_opt_id = true; + break; case 'p': dest_port = strtoul(optarg, NULL, 10); break; @@ -803,6 +825,8 @@ static void parse_opt(int argc, char **argv) error(1, 0, "cannot ask for pktinfo over pf_packet"); if (cfg_busy_poll && cfg_use_epoll) error(1, 0, "pass epoll or busy_poll, not both"); + if (cfg_proto == SOCK_STREAM && cfg_use_cmsg_opt_id) + error(1, 0, "TCP sockets don't support SCM_TS_OPT_ID"); if (optind != argc - 1) error(1, 0, "missing required hostname argument"); diff --git a/tools/testing/selftests/net/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh index 25baca4b148e..fe4649bb8786 100755 --- a/tools/testing/selftests/net/txtimestamp.sh +++ b/tools/testing/selftests/net/txtimestamp.sh @@ -37,11 +37,13 @@ run_test_v4v6() { run_test_tcpudpraw() { local -r args=$@ - run_test_v4v6 ${args} # tcp - run_test_v4v6 ${args} -u # udp - run_test_v4v6 ${args} -r # raw - run_test_v4v6 ${args} -R # raw (IPPROTO_RAW) - run_test_v4v6 ${args} -P # pf_packet + run_test_v4v6 ${args} # tcp + run_test_v4v6 ${args} -u # udp + run_test_v4v6 ${args} -u -o 42 # udp with fixed tskey + run_test_v4v6 ${args} -r # raw + run_test_v4v6 ${args} -r -o 42 # raw + run_test_v4v6 ${args} -R # raw (IPPROTO_RAW) + run_test_v4v6 ${args} -P # pf_packet } run_test_all() { diff --git a/tools/testing/selftests/net/ynl.mk b/tools/testing/selftests/net/ynl.mk index 1ef24119def0..d43afe243779 100644 --- a/tools/testing/selftests/net/ynl.mk +++ b/tools/testing/selftests/net/ynl.mk @@ -9,6 +9,8 @@ # YNL_GEN_FILES: TEST_GEN_FILES which need YNL YNL_OUTPUTS := $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_FILES)) +YNL_SPECS := \ + $(patsubst %,$(top_srcdir)/Documentation/netlink/specs/%.yaml,$(YNL_GENS)) $(YNL_OUTPUTS): $(OUTPUT)/libynl.a $(YNL_OUTPUTS): CFLAGS += \ @@ -16,10 +18,20 @@ $(YNL_OUTPUTS): CFLAGS += \ -I$(top_srcdir)/tools/net/ynl/lib/ \ -I$(top_srcdir)/tools/net/ynl/generated/ -$(OUTPUT)/libynl.a: +# Make sure we rebuild libynl if user added a new family. We can't easily +# depend on the contents of a variable so create a fake file with a hash. +YNL_GENS_HASH := $(shell echo $(YNL_GENS) | sha1sum | cut -c1-8) +$(OUTPUT)/.libynl-$(YNL_GENS_HASH).sig: + $(Q)rm -f $(OUTPUT)/.libynl-*.sig + $(Q)touch $(OUTPUT)/.libynl-$(YNL_GENS_HASH).sig + +$(OUTPUT)/libynl.a: $(YNL_SPECS) $(OUTPUT)/.libynl-$(YNL_GENS_HASH).sig + $(Q)rm -f $(top_srcdir)/tools/net/ynl/libynl.a $(Q)$(MAKE) -C $(top_srcdir)/tools/net/ynl GENS="$(YNL_GENS)" libynl.a $(Q)cp $(top_srcdir)/tools/net/ynl/libynl.a $(OUTPUT)/libynl.a EXTRA_CLEAN += \ $(top_srcdir)/tools/net/ynl/lib/__pycache__ \ - $(top_srcdir)/tools/net/ynl/lib/*.[ado] + $(top_srcdir)/tools/net/ynl/lib/*.[ado] \ + $(OUTPUT)/.libynl-*.sig \ + $(OUTPUT)/libynl.a diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c index 011252fe238c..58064151f2c8 100644 --- a/tools/testing/selftests/ptp/testptp.c +++ b/tools/testing/selftests/ptp/testptp.c @@ -146,6 +146,7 @@ static void usage(char *progname) " -T val set the ptp clock time to 'val' seconds\n" " -x val get an extended ptp clock time with the desired number of samples (up to %d)\n" " -X get a ptp clock cross timestamp\n" + " -y val pre/post tstamp timebase to use {realtime|monotonic|monotonic-raw}\n" " -z test combinations of rising/falling external time stamp flags\n", progname, PTP_MAX_SAMPLES); } @@ -189,6 +190,7 @@ int main(int argc, char *argv[]) int seconds = 0; int settime = 0; int channel = -1; + clockid_t ext_clockid = CLOCK_REALTIME; int64_t t1, t2, tp; int64_t interval, offset; @@ -198,7 +200,7 @@ int main(int argc, char *argv[]) progname = strrchr(argv[0], '/'); progname = progname ? 1+progname : argv[0]; - while (EOF != (c = getopt(argc, argv, "cd:e:f:F:ghH:i:k:lL:n:o:p:P:sSt:T:w:x:Xz"))) { + while (EOF != (c = getopt(argc, argv, "cd:e:f:F:ghH:i:k:lL:n:o:p:P:sSt:T:w:x:Xy:z"))) { switch (c) { case 'c': capabilities = 1; @@ -278,6 +280,21 @@ int main(int argc, char *argv[]) case 'X': getcross = 1; break; + case 'y': + if (!strcasecmp(optarg, "realtime")) + ext_clockid = CLOCK_REALTIME; + else if (!strcasecmp(optarg, "monotonic")) + ext_clockid = CLOCK_MONOTONIC; + else if (!strcasecmp(optarg, "monotonic-raw")) + ext_clockid = CLOCK_MONOTONIC_RAW; + else { + fprintf(stderr, + "type needs to be realtime, monotonic or monotonic-raw; was given %s\n", + optarg); + return -1; + } + break; + case 'z': flagtest = 1; break; @@ -566,6 +583,7 @@ int main(int argc, char *argv[]) } soe->n_samples = getextended; + soe->clockid = ext_clockid; if (ioctl(fd, PTP_SYS_OFFSET_EXTENDED, soe)) { perror("PTP_SYS_OFFSET_EXTENDED"); @@ -574,12 +592,46 @@ int main(int argc, char *argv[]) getextended); for (i = 0; i < getextended; i++) { - printf("sample #%2d: system time before: %lld.%09u\n", - i, soe->ts[i][0].sec, soe->ts[i][0].nsec); + switch (ext_clockid) { + case CLOCK_REALTIME: + printf("sample #%2d: real time before: %lld.%09u\n", + i, soe->ts[i][0].sec, + soe->ts[i][0].nsec); + break; + case CLOCK_MONOTONIC: + printf("sample #%2d: monotonic time before: %lld.%09u\n", + i, soe->ts[i][0].sec, + soe->ts[i][0].nsec); + break; + case CLOCK_MONOTONIC_RAW: + printf("sample #%2d: monotonic-raw time before: %lld.%09u\n", + i, soe->ts[i][0].sec, + soe->ts[i][0].nsec); + break; + default: + break; + } printf(" phc time: %lld.%09u\n", soe->ts[i][1].sec, soe->ts[i][1].nsec); - printf(" system time after: %lld.%09u\n", - soe->ts[i][2].sec, soe->ts[i][2].nsec); + switch (ext_clockid) { + case CLOCK_REALTIME: + printf(" real time after: %lld.%09u\n", + soe->ts[i][2].sec, + soe->ts[i][2].nsec); + break; + case CLOCK_MONOTONIC: + printf(" monotonic time after: %lld.%09u\n", + soe->ts[i][2].sec, + soe->ts[i][2].nsec); + break; + case CLOCK_MONOTONIC_RAW: + printf(" monotonic-raw time after: %lld.%09u\n", + soe->ts[i][2].sec, + soe->ts[i][2].nsec); + break; + default: + break; + } } } |