summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-29 21:57:23 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-29 21:57:23 +0300
commit9d31d2338950293ec19d9b095fbaa9030899dcb4 (patch)
treee688040d0557c24a2eeb9f6c9c223d949f6f7ef9 /drivers/net/ethernet/mellanox
parent635de956a7f5a6ffcb04f29d70630c64c717b56b (diff)
parent4a52dd8fefb45626dace70a63c0738dbd83b7edb (diff)
downloadlinux-9d31d2338950293ec19d9b095fbaa9030899dcb4.tar.xz
Merge tag 'net-next-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - bpf: - allow bpf programs calling kernel functions (initially to reuse TCP congestion control implementations) - enable task local storage for tracing programs - remove the need to store per-task state in hash maps, and allow tracing programs access to task local storage previously added for BPF_LSM - add bpf_for_each_map_elem() helper, allowing programs to walk all map elements in a more robust and easier to verify fashion - sockmap: support UDP and cross-protocol BPF_SK_SKB_VERDICT redirection - lpm: add support for batched ops in LPM trie - add BTF_KIND_FLOAT support - mostly to allow use of BTF on s390 which has floats in its headers files - improve BPF syscall documentation and extend the use of kdoc parsing scripts we already employ for bpf-helpers - libbpf, bpftool: support static linking of BPF ELF files - improve support for encapsulation of L2 packets - xdp: restructure redirect actions to avoid a runtime lookup, improving performance by 4-8% in microbenchmarks - xsk: build skb by page (aka generic zerocopy xmit) - improve performance of software AF_XDP path by 33% for devices which don't need headers in the linear skb part (e.g. virtio) - nexthop: resilient next-hop groups - improve path stability on next-hops group changes (incl. offload for mlxsw) - ipv6: segment routing: add support for IPv4 decapsulation - icmp: add support for RFC 8335 extended PROBE messages - inet: use bigger hash table for IP ID generation - tcp: deal better with delayed TX completions - make sure we don't give up on fast TCP retransmissions only because driver is slow in reporting that it completed transmitting the original - tcp: reorder tcp_congestion_ops for better cache locality - mptcp: - add sockopt support for common TCP options - add support for common TCP msg flags - include multiple address ids in RM_ADDR - add reset option support for resetting one subflow - udp: GRO L4 improvements - improve 'forward' / 'frag_list' co-existence with UDP tunnel GRO, allowing the first to take place correctly even for encapsulated UDP traffic - micro-optimize dev_gro_receive() and flow dissection, avoid retpoline overhead on VLAN and TEB GRO - use less memory for sysctls, add a new sysctl type, to allow using u8 instead of "int" and "long" and shrink networking sysctls - veth: allow GRO without XDP - this allows aggregating UDP packets before handing them off to routing, bridge, OvS, etc. - allow specifing ifindex when device is moved to another namespace - netfilter: - nft_socket: add support for cgroupsv2 - nftables: add catch-all set element - special element used to define a default action in case normal lookup missed - use net_generic infra in many modules to avoid allocating per-ns memory unnecessarily - xps: improve the xps handling to avoid potential out-of-bound accesses and use-after-free when XPS change race with other re-configuration under traffic - add a config knob to turn off per-cpu netdev refcnt to catch underflows in testing Device APIs: - add WWAN subsystem to organize the WWAN interfaces better and hopefully start driving towards more unified and vendor- independent APIs - ethtool: - add interface for reading IEEE MIB stats (incl. mlx5 and bnxt support) - allow network drivers to dump arbitrary SFP EEPROM data, current offset+length API was a poor fit for modern SFP which define EEPROM in terms of pages (incl. mlx5 support) - act_police, flow_offload: add support for packet-per-second policing (incl. offload for nfp) - psample: add additional metadata attributes like transit delay for packets sampled from switch HW (and corresponding egress and policy-based sampling in the mlxsw driver) - dsa: improve support for sandwiched LAGs with bridge and DSA - netfilter: - flowtable: use direct xmit in topologies with IP forwarding, bridging, vlans etc. - nftables: counter hardware offload support - Bluetooth: - improvements for firmware download w/ Intel devices - add support for reading AOSP vendor capabilities - add support for virtio transport driver - mac80211: - allow concurrent monitor iface and ethernet rx decap - set priority and queue mapping for injected frames - phy: add support for Clause-45 PHY Loopback - pci/iov: add sysfs MSI-X vector assignment interface to distribute MSI-X resources to VFs (incl. mlx5 support) New hardware/drivers: - dsa: mv88e6xxx: add support for Marvell mv88e6393x - 11-port Ethernet switch with 8x 1-Gigabit Ethernet and 3x 10-Gigabit interfaces. - dsa: support for legacy Broadcom tags used on BCM5325, BCM5365 and BCM63xx switches - Microchip KSZ8863 and KSZ8873; 3x 10/100Mbps Ethernet switches - ath11k: support for QCN9074 a 802.11ax device - Bluetooth: Broadcom BCM4330 and BMC4334 - phy: Marvell 88X2222 transceiver support - mdio: add BCM6368 MDIO mux bus controller - r8152: support RTL8153 and RTL8156 (USB Ethernet) chips - mana: driver for Microsoft Azure Network Adapter (MANA) - Actions Semi Owl Ethernet MAC - can: driver for ETAS ES58X CAN/USB interfaces Pure driver changes: - add XDP support to: enetc, igc, stmmac - add AF_XDP support to: stmmac - virtio: - page_to_skb() use build_skb when there's sufficient tailroom (21% improvement for 1000B UDP frames) - support XDP even without dedicated Tx queues - share the Tx queues with the stack when necessary - mlx5: - flow rules: add support for mirroring with conntrack, matching on ICMP, GTP, flex filters and more - support packet sampling with flow offloads - persist uplink representor netdev across eswitch mode changes - allow coexistence of CQE compression and HW time-stamping - add ethtool extended link error state reporting - ice, iavf: support flow filters, UDP Segmentation Offload - dpaa2-switch: - move the driver out of staging - add spanning tree (STP) support - add rx copybreak support - add tc flower hardware offload on ingress traffic - ionic: - implement Rx page reuse - support HW PTP time-stamping - octeon: support TC hardware offloads - flower matching on ingress and egress ratelimitting. - stmmac: - add RX frame steering based on VLAN priority in tc flower - support frame preemption (FPE) - intel: add cross time-stamping freq difference adjustment - ocelot: - support forwarding of MRP frames in HW - support multiple bridges - support PTP Sync one-step timestamping - dsa: mv88e6xxx, dpaa2-switch: offload bridge port flags like learning, flooding etc. - ipa: add IPA v4.5, v4.9 and v4.11 support (Qualcomm SDX55, SM8350, SC7280 SoCs) - mt7601u: enable TDLS support - mt76: - add support for 802.3 rx frames (mt7915/mt7615) - mt7915 flash pre-calibration support - mt7921/mt7663 runtime power management fixes" * tag 'net-next-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2451 commits) net: selftest: fix build issue if INET is disabled net: netrom: nr_in: Remove redundant assignment to ns net: tun: Remove redundant assignment to ret net: phy: marvell: add downshift support for M88E1240 net: dsa: ksz: Make reg_mib_cnt a u8 as it never exceeds 255 net/sched: act_ct: Remove redundant ct get and check icmp: standardize naming of RFC 8335 PROBE constants bpf, selftests: Update array map tests for per-cpu batched ops bpf: Add batched ops support for percpu array bpf: Implement formatted output helpers with bstr_printf seq_file: Add a seq_bprintf function sfc: adjust efx->xdp_tx_queue_count with the real number of initialized queues net:nfc:digital: Fix a double free in digital_tg_recv_dep_req net: fix a concurrency bug in l2tp_tunnel_register() net/smc: Remove redundant assignment to rc mpls: Remove redundant assignment to err llc2: Remove redundant assignment to rc net/tls: Remove redundant initialization of record rds: Remove redundant assignment to nr_sig dt-bindings: net: mdio-gpio: add compatible for microchip,mdio-smi0 ...
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c605
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c548
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c399
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c510
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c585
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c140
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c979
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h277
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c724
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h130
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c245
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c453
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c682
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c213
144 files changed, 10657 insertions, 4066 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c678344d22a2..8d751383530b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2241,43 +2241,52 @@ void mlx4_master_comm_channel(struct work_struct *work)
struct mlx4_priv *priv =
container_of(mfunc, struct mlx4_priv, mfunc);
struct mlx4_dev *dev = &priv->dev;
- __be32 *bit_vec;
+ u32 lbit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ u32 nmbr_bits;
u32 comm_cmd;
- u32 vec;
- int i, j, slave;
+ int i, slave;
int toggle;
+ bool first = true;
int served = 0;
int reported = 0;
u32 slt;
- bit_vec = master->comm_arm_bit_vector;
- for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
- vec = be32_to_cpu(bit_vec[i]);
- for (j = 0; j < 32; j++) {
- if (!(vec & (1 << j)))
- continue;
- ++reported;
- slave = (i * 32) + j;
- comm_cmd = swab32(readl(
- &mfunc->comm[slave].slave_write));
- slt = swab32(readl(&mfunc->comm[slave].slave_read))
- >> 31;
- toggle = comm_cmd >> 31;
- if (toggle != slt) {
- if (master->slave_state[slave].comm_toggle
- != slt) {
- pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
- slave, slt,
- master->slave_state[slave].comm_toggle);
- master->slave_state[slave].comm_toggle =
- slt;
- }
- mlx4_master_do_cmd(dev, slave,
- comm_cmd >> 16 & 0xff,
- comm_cmd & 0xffff, toggle);
- ++served;
+ for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++)
+ lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]);
+ nmbr_bits = dev->persist->num_vfs + 1;
+ if (++master->next_slave >= nmbr_bits)
+ master->next_slave = 0;
+ slave = master->next_slave;
+ while (true) {
+ slave = find_next_bit((const unsigned long *)&lbit_vec, nmbr_bits, slave);
+ if (!first && slave >= master->next_slave)
+ break;
+ if (slave == nmbr_bits) {
+ if (!first)
+ break;
+ first = false;
+ slave = 0;
+ continue;
+ }
+ ++reported;
+ comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write));
+ slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31;
+ toggle = comm_cmd >> 31;
+ if (toggle != slt) {
+ if (master->slave_state[slave].comm_toggle
+ != slt) {
+ pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+ slave, slt,
+ master->slave_state[slave].comm_toggle);
+ master->slave_state[slave].comm_toggle =
+ slt;
}
+ mlx4_master_do_cmd(dev, slave,
+ comm_cmd >> 16 & 0xff,
+ comm_cmd & 0xffff, toggle);
+ ++served;
}
+ slave++;
}
if (reported && reported != served)
@@ -2389,6 +2398,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (!priv->mfunc.master.vf_oper)
goto err_comm_oper;
+ priv->mfunc.master.next_slave = 0;
+
for (i = 0; i < dev->num_slaves; ++i) {
vf_admin = &priv->mfunc.master.vf_admin[i];
vf_oper = &priv->mfunc.master.vf_oper[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 64bed7ac3836..6ccf340660d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -603,6 +603,7 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_slave_event_eq slave_eq;
struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1];
+ u32 next_slave; /* mlx4_master_comm_channel */
};
struct mlx4_mfunc {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 9d623e38d783..461a43f338e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -104,6 +104,18 @@ config MLX5_TC_CT
If unsure, set to Y
+config MLX5_TC_SAMPLE
+ bool "MLX5 TC sample offload support"
+ depends on MLX5_CLS_ACT
+ default y
+ help
+ Say Y here if you want to support offloading sample rules via tc
+ sample action.
+ If set to N, will not be able to configure tc rules with sample
+ action.
+
+ If unsure, set to Y
+
config MLX5_CORE_EN_DCB
bool "Data Center Bridging (DCB) Support"
default y
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 8cb2625472c3..a1223e904190 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -27,7 +27,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o
#
# Netdev extra
@@ -37,9 +37,10 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
- en_rep.o en/rep/bond.o en/mod_hdr.o
+ en_rep.o en/rep/bond.o en/mod_hdr.o \
+ en/mapping.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
- en/mapping.o lib/fs_chains.o en/tc_tun.o \
+ lib/fs_chains.o en/tc_tun.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
@@ -49,11 +50,12 @@ mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o
+ ecpf.o rdma.o esw/legacy.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
- esw/devlink_port.o
+ esw/devlink_port.o esw/vporttbl.o
+mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e8cecd50558d..9d79c5ec31e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -263,15 +263,15 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
return 0;
}
-static void dump_buf(void *buf, int size, int data_only, int offset)
+static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
{
__be32 *p = buf;
int i;
for (i = 0; i < size; i += 16) {
- pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
- be32_to_cpu(p[1]), be32_to_cpu(p[2]),
- be32_to_cpu(p[3]));
+ pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
+ be32_to_cpu(p[0]), be32_to_cpu(p[1]),
+ be32_to_cpu(p[2]), be32_to_cpu(p[3]));
p += 4;
offset += 16;
}
@@ -802,39 +802,41 @@ static void dump_command(struct mlx5_core_dev *dev,
int dump_len;
int i;
+ mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
if (data_only)
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
- "dump command data %s(0x%x) %s\n",
- mlx5_command_str(op), op,
+ "cmd[%d]: dump command data %s(0x%x) %s\n",
+ ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT");
else
- mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
- mlx5_command_str(op), op,
+ mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
+ ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT");
if (data_only) {
if (input) {
- dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
+ dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
offset += sizeof(ent->lay->in);
} else {
- dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
+ dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
offset += sizeof(ent->lay->out);
}
} else {
- dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
+ dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
offset += sizeof(*ent->lay);
}
for (i = 0; i < n && next; i++) {
if (data_only) {
dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
- dump_buf(next->buf, dump_len, 1, offset);
+ dump_buf(next->buf, dump_len, 1, offset, ent->idx);
offset += MLX5_CMD_DATA_BLOCK_SIZE;
} else {
- mlx5_core_dbg(dev, "command block:\n");
- dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
+ mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
+ dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
+ ent->idx);
offset += sizeof(struct mlx5_cmd_prot_block);
}
next = next->next;
@@ -842,6 +844,8 @@ static void dump_command(struct mlx5_core_dev *dev,
if (data_only)
pr_debug("\n");
+
+ mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
}
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 9153c9bda96f..a9166cd85013 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -58,9 +58,6 @@ static bool is_eth_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false;
- if (is_eth_rep_supported(dev))
- return false;
-
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d0f9d3cee97d..44c458443428 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -137,18 +137,18 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
* unregistering devlink instance while holding devlink_mutext.
* Hence, do not support reload.
*/
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
return -EOPNOTSUPP;
}
if (mlx5_lag_is_active(dev)) {
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
return -EOPNOTSUPP;
}
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
@@ -170,13 +170,13 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
@@ -461,6 +461,50 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
return 0;
}
+
+static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ return -EOPNOTSUPP;
+
+ return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool);
+}
+
+static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ return 0;
+}
+
+static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 esw_mode;
+
+ if (!MLX5_ESWITCH_MANAGER(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
+ return -EOPNOTSUPP;
+ }
+ esw_mode = mlx5_eswitch_mode(dev);
+ if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch must either disabled or non switchdev mode");
+ return -EBUSY;
+ }
+ return 0;
+}
+
#endif
static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
@@ -495,6 +539,12 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_devlink_esw_port_metadata_get,
+ mlx5_devlink_esw_port_metadata_set,
+ mlx5_devlink_esw_port_metadata_validate),
#endif
DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_enable_remote_dev_reset_get,
@@ -524,6 +574,18 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
value);
+
+ if (MLX5_ESWITCH_MANAGER(dev)) {
+ if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
+ dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ value.vbool = true;
+ } else {
+ value.vbool = false;
+ }
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ value);
+ }
#endif
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index eff107dad922..7318d44b774b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -10,6 +10,7 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 2eb022ad7fd0..01a1d02dcf15 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1100,7 +1100,7 @@ int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
int err;
if (IS_ERR_OR_NULL(tracer))
- return -EINVAL;
+ return 0;
dev = tracer->dev;
mlx5_fw_tracer_cleanup(tracer);
@@ -1126,8 +1126,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
switch (eqe->sub_type) {
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
- if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state))
- queue_work(tracer->work_queue, &tracer->ownership_change_work);
+ queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
if (likely(tracer->str_db.loaded))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index bc6f77ea0a31..b636d63358d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -269,6 +269,7 @@ struct mlx5e_params {
struct mlx5e_xsk *xsk;
unsigned int sw_mtu;
int hard_mtu;
+ bool ptp_rx;
};
enum {
@@ -324,9 +325,9 @@ enum {
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
- MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
+ MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
};
struct mlx5e_tx_mpwqe {
@@ -499,6 +500,8 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
+struct mlx5e_ktls_resync_resp;
+
struct mlx5e_icosq {
/* data path */
u16 cc;
@@ -518,6 +521,7 @@ struct mlx5e_icosq {
u32 sqn;
u16 reserved_room;
unsigned long state;
+ struct mlx5e_ktls_resync_resp *ktls_resync;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -708,11 +712,11 @@ struct mlx5e_channel {
int cpu;
};
-struct mlx5e_port_ptp;
+struct mlx5e_ptp;
struct mlx5e_channels {
struct mlx5e_channel **c;
- struct mlx5e_port_ptp *port_ptp;
+ struct mlx5e_ptp *ptp;
unsigned int num;
struct mlx5e_params params;
};
@@ -727,10 +731,11 @@ struct mlx5e_channel_stats {
struct mlx5e_xdpsq_stats xsksq;
} ____cacheline_aligned_in_smp;
-struct mlx5e_port_ptp_stats {
+struct mlx5e_ptp_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
enum {
@@ -837,6 +842,7 @@ struct mlx5e_priv {
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_tir ptp_tir;
struct mlx5e_rss_params rss_params;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
@@ -856,10 +862,11 @@ struct mlx5e_priv {
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_channel_stats trap_stats;
- struct mlx5e_port_ptp_stats port_ptp_stats;
+ struct mlx5e_ptp_stats ptp_stats;
u16 max_nch;
u8 max_opened_tc;
- bool port_ptp_opened;
+ bool tx_ptp_opened;
+ bool rx_ptp_opened;
struct hwtstamp_config tstamp;
u16 q_counter;
u16 drop_rq_q_counter;
@@ -882,7 +889,6 @@ struct mlx5e_priv {
#endif
struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter;
- struct devlink_port dl_port;
struct mlx5e_xsk xsk;
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
struct mlx5e_hv_vhca_stats_agent stats_agent;
@@ -916,13 +922,12 @@ struct mlx5e_profile {
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
u8 rq_groups;
+ bool rx_ptp_support;
};
void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -965,9 +970,9 @@ struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types
struct mlx5e_xsk_param;
struct mlx5e_rq_param;
-int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
+int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
+ struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
@@ -1013,27 +1018,20 @@ int fn##_ctx(struct mlx5e_priv *priv, void *context) \
return fn(priv); \
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
-int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_preactivate preactivate,
- void *context);
+int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params,
+ mlx5e_fp_preactivate preactivate,
+ void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
+int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-
-void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
@@ -1092,10 +1090,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
@@ -1176,10 +1174,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
const struct mlx5e_profile *new_profile, void *new_ppriv);
+void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
-void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels);
void mlx5e_rx_dim_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index a69c62d72d16..0dd7615e5931 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -2,37 +2,70 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include "en/devlink.h"
+#include "eswitch.h"
+
+static void
+mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
+{
+ u64 parent_id;
+
+ parent_id = mlx5_query_nic_system_image_guid(dev);
+ ppid->id_len = sizeof(parent_id);
+ memcpy(ppid->id, &parent_id, sizeof(parent_id));
+}
int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
{
struct devlink *devlink = priv_to_devlink(priv->mdev);
struct devlink_port_attrs attrs = {};
+ struct netdev_phys_item_id ppid = {};
+ struct devlink_port *dl_port;
+ unsigned int dl_port_index;
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn);
+ if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
+ mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
+ memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
+ attrs.switch_id.id_len = ppid.id_len;
+ }
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev,
+ MLX5_VPORT_UPLINK);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, 0);
}
- devlink_port_attrs_set(&priv->dl_port, &attrs);
+ dl_port = mlx5e_devlink_get_dl_port(priv);
+ memset(dl_port, 0, sizeof(*dl_port));
+ devlink_port_attrs_set(dl_port, &attrs);
- return devlink_port_register(devlink, &priv->dl_port, 1);
+ return devlink_port_register(devlink, dl_port, dl_port_index);
}
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
{
- devlink_port_type_eth_set(&priv->dl_port, priv->netdev);
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+
+ devlink_port_type_eth_set(dl_port, priv->netdev);
}
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
- devlink_port_unregister(&priv->dl_port);
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+
+ if (dl_port->registered)
+ devlink_port_unregister(dl_port);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct devlink_port *port;
- return &priv->dl_port;
+ port = mlx5e_devlink_get_dl_port(priv);
+ if (port->registered)
+ return port;
+ return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
index 83123a801adc..10b50feb9883 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -12,4 +12,10 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
+static inline struct devlink_port *
+mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv)
+{
+ return &priv->mdev->mlx5e_res.dl_port;
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index a16297e7e2ac..1d5ce07b83f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -29,6 +29,7 @@ struct mlx5e_tc_table {
struct netdev_net_notifier netdevice_nn;
struct mlx5_tc_ct_priv *ct;
+ struct mapping_ctx *mapping;
};
struct mlx5e_flow_table {
@@ -49,18 +50,10 @@ struct mlx5e_promisc_table {
struct mlx5_flow_handle *rule;
};
-struct mlx5e_vlan_table {
- struct mlx5e_flow_table ft;
- DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
- DECLARE_BITMAP(active_svlans, VLAN_N_VID);
- struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *untagged_rule;
- struct mlx5_flow_handle *any_cvlan_rule;
- struct mlx5_flow_handle *any_svlan_rule;
- struct mlx5_flow_handle *trap_rule;
- bool cvlan_filter_disabled;
-};
+/* Forward declaration and APIs to get private fields of vlan_table */
+struct mlx5e_vlan_table;
+unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan);
+struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan);
struct mlx5e_l2_table {
struct mlx5e_flow_table ft;
@@ -137,11 +130,13 @@ enum {
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
+ MLX5E_FS_TT_UDP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+ MLX5E_FS_TT_ANY_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#ifdef CONFIG_MLX5_EN_TLS
- MLX5E_ACCEL_FS_TCP_FT_LEVEL,
+ MLX5E_ACCEL_FS_TCP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_ARFS
- MLX5E_ARFS_FT_LEVEL,
+ MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
@@ -198,31 +193,7 @@ static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
#endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS
-#define ARFS_HASH_SHIFT BITS_PER_BYTE
-#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
-
-struct arfs_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *default_rule;
- struct hlist_head rules_hash[ARFS_HASH_SIZE];
-};
-
-enum arfs_type {
- ARFS_IPV4_TCP,
- ARFS_IPV6_TCP,
- ARFS_IPV4_UDP,
- ARFS_IPV6_UDP,
- ARFS_NUM_TYPES,
-};
-
-struct mlx5e_arfs_tables {
- struct arfs_table arfs_tables[ARFS_NUM_TYPES];
- /* Protect aRFS rules list */
- spinlock_t arfs_lock;
- struct list_head rules;
- int last_filter_id;
- struct workqueue_struct *wq;
-};
+struct mlx5e_arfs_tables;
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
@@ -241,6 +212,10 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_fs_udp;
+struct mlx5e_fs_any;
+struct mlx5e_ptp_fs;
+
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_namespace *egress_ns;
@@ -249,16 +224,19 @@ struct mlx5e_flow_steering {
#endif
struct mlx5e_tc_table tc;
struct mlx5e_promisc_table promisc;
- struct mlx5e_vlan_table vlan;
+ struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
- struct mlx5e_arfs_tables arfs;
+ struct mlx5e_arfs_tables *arfs;
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp;
#endif
+ struct mlx5e_fs_udp *udp;
+ struct mlx5e_fs_any *any;
+ struct mlx5e_ptp_fs *ptp_fs;
};
struct ttc_params {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
new file mode 100644
index 000000000000..909faa6c89d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include "en/fs_tt_redirect.h"
+#include "fs_core.h"
+
+enum fs_udp_type {
+ FS_IPV4_UDP,
+ FS_IPV6_UDP,
+ FS_UDP_NUM_TYPES,
+};
+
+struct mlx5e_fs_udp {
+ struct mlx5e_flow_table tables[FS_UDP_NUM_TYPES];
+ struct mlx5_flow_handle *default_rules[FS_UDP_NUM_TYPES];
+ int ref_cnt;
+};
+
+struct mlx5e_fs_any {
+ struct mlx5e_flow_table table;
+ struct mlx5_flow_handle *default_rule;
+ int ref_cnt;
+};
+
+static char *fs_udp_type2str(enum fs_udp_type i)
+{
+ switch (i) {
+ case FS_IPV4_UDP:
+ return "UDP v4";
+ default: /* FS_IPV6_UDP */
+ return "UDP v6";
+ }
+}
+
+static enum mlx5e_traffic_types fs_udp2tt(enum fs_udp_type i)
+{
+ switch (i) {
+ case FS_IPV4_UDP:
+ return MLX5E_TT_IPV4_UDP;
+ default: /* FS_IPV6_UDP */
+ return MLX5E_TT_IPV6_UDP;
+ }
+}
+
+static enum fs_udp_type tt2fs_udp(enum mlx5e_traffic_types i)
+{
+ switch (i) {
+ case MLX5E_TT_IPV4_UDP:
+ return FS_IPV4_UDP;
+ case MLX5E_TT_IPV6_UDP:
+ return FS_IPV6_UDP;
+ default:
+ return FS_UDP_NUM_TYPES;
+ }
+}
+
+void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule)
+{
+ mlx5_del_flow_rules(rule);
+}
+
+static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type type,
+ u16 udp_dport)
+{
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
+ type == FS_IPV4_UDP ? 4 : 6);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, udp_dport);
+}
+
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types ttc_type,
+ u32 tir_num, u16 d_port)
+{
+ enum fs_udp_type type = tt2fs_udp(ttc_type);
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_table *ft = NULL;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_fs_udp *fs_udp;
+ int err;
+
+ if (type == FS_UDP_NUM_TYPES)
+ return ERR_PTR(-EINVAL);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ fs_udp = priv->fs.udp;
+ ft = fs_udp->tables[type].t;
+
+ fs_udp_set_dport_flow(spec, type, d_port);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tir_num;
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add %s rule failed, err %d\n",
+ __func__, fs_udp_type2str(type), err);
+ }
+ return rule;
+}
+
+static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type type)
+{
+ struct mlx5e_flow_table *fs_udp_t;
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5e_fs_udp *fs_udp;
+ int err;
+
+ fs_udp = priv->fs.udp;
+ fs_udp_t = &fs_udp->tables[type];
+
+ dest = mlx5e_ttc_get_default_dest(priv, fs_udp2tt(type));
+ rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev,
+ "%s: add default rule failed, fs type=%d, err %d\n",
+ __func__, type, err);
+ return err;
+ }
+
+ fs_udp->default_rules[type] = rule;
+ return 0;
+}
+
+#define MLX5E_FS_UDP_NUM_GROUPS (2)
+#define MLX5E_FS_UDP_GROUP1_SIZE (BIT(16))
+#define MLX5E_FS_UDP_GROUP2_SIZE (BIT(0))
+#define MLX5E_FS_UDP_TABLE_SIZE (MLX5E_FS_UDP_GROUP1_SIZE +\
+ MLX5E_FS_UDP_GROUP2_SIZE)
+static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type type)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
+ kvfree(in);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
+
+ switch (type) {
+ case FS_IPV4_UDP:
+ case FS_IPV6_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ /* Match on udp protocol, Ipv4/6 and dport */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_UDP_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Default Flow Group */
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_UDP_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+out:
+ kvfree(in);
+
+ return err;
+}
+
+static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
+{
+ struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+ struct mlx5_flow_table_attr ft_attr = {};
+ int err;
+
+ ft->num_groups = 0;
+
+ ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
+ ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ netdev_dbg(priv->netdev, "Created fs %s table id %u level %u\n",
+ fs_udp_type2str(type), ft->t->id, ft->t->level);
+
+ err = fs_udp_create_groups(ft, type);
+ if (err)
+ goto err;
+
+ err = fs_udp_add_default_rule(priv, type);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
+{
+ if (IS_ERR_OR_NULL(fs_udp->tables[i].t))
+ return;
+
+ mlx5_del_flow_rules(fs_udp->default_rules[i]);
+ mlx5e_destroy_flow_table(&fs_udp->tables[i]);
+ fs_udp->tables[i].t = NULL;
+}
+
+static int fs_udp_disable(struct mlx5e_priv *priv)
+{
+ int err, i;
+
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
+ /* Modify ttc rules destination to point back to the indir TIRs */
+ err = mlx5e_ttc_fwd_default_dest(priv, fs_udp2tt(i));
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int fs_udp_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest = {};
+ int err, i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
+ dest.ft = priv->fs.udp->tables[i].t;
+
+ /* Modify ttc rules destination to point on the accel_fs FTs */
+ err = mlx5e_ttc_fwd_dest(priv, fs_udp2tt(i), &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+ int i;
+
+ if (!fs_udp)
+ return;
+
+ if (--fs_udp->ref_cnt)
+ return;
+
+ fs_udp_disable(priv);
+
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++)
+ fs_udp_destroy_table(fs_udp, i);
+
+ kfree(fs_udp);
+ priv->fs.udp = NULL;
+}
+
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
+{
+ int i, err;
+
+ if (priv->fs.udp) {
+ priv->fs.udp->ref_cnt++;
+ return 0;
+ }
+
+ priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
+ if (!priv->fs.udp)
+ return -ENOMEM;
+
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
+ err = fs_udp_create_table(priv, i);
+ if (err)
+ goto err_destroy_tables;
+ }
+
+ err = fs_udp_enable(priv);
+ if (err)
+ goto err_destroy_tables;
+
+ priv->fs.udp->ref_cnt = 1;
+
+ return 0;
+
+err_destroy_tables:
+ while (--i >= 0)
+ fs_udp_destroy_table(priv->fs.udp, i);
+
+ kfree(priv->fs.udp);
+ priv->fs.udp = NULL;
+ return err;
+}
+
+static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_type)
+{
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ether_type);
+}
+
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+ u32 tir_num, u16 ether_type)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_table *ft = NULL;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_fs_any *fs_any;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ fs_any = priv->fs.any;
+ ft = fs_any->table.t;
+
+ fs_any_set_ethertype_flow(spec, ether_type);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tir_num;
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add ANY rule failed, err %d\n",
+ __func__, err);
+ }
+ return rule;
+}
+
+static int fs_any_add_default_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *fs_any_t;
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5e_fs_any *fs_any;
+ int err;
+
+ fs_any = priv->fs.any;
+ fs_any_t = &fs_any->table;
+
+ dest = mlx5e_ttc_get_default_dest(priv, MLX5E_TT_ANY);
+ rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev,
+ "%s: add default rule failed, fs type=ANY, err %d\n",
+ __func__, err);
+ return err;
+ }
+
+ fs_any->default_rule = rule;
+ return 0;
+}
+
+#define MLX5E_FS_ANY_NUM_GROUPS (2)
+#define MLX5E_FS_ANY_GROUP1_SIZE (BIT(16))
+#define MLX5E_FS_ANY_GROUP2_SIZE (BIT(0))
+#define MLX5E_FS_ANY_TABLE_SIZE (MLX5E_FS_ANY_GROUP1_SIZE +\
+ MLX5E_FS_ANY_GROUP2_SIZE)
+
+static int fs_any_create_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
+ kvfree(in);
+ return -ENOMEM;
+ }
+
+ /* Match on ethertype */
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_ANY_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Default Flow Group */
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_ANY_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int fs_any_create_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *ft = &priv->fs.any->table;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int err;
+
+ ft->num_groups = 0;
+
+ ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
+ ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ netdev_dbg(priv->netdev, "Created fs ANY table id %u level %u\n",
+ ft->t->id, ft->t->level);
+
+ err = fs_any_create_groups(ft);
+ if (err)
+ goto err;
+
+ err = fs_any_add_default_rule(priv);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+static int fs_any_disable(struct mlx5e_priv *priv)
+{
+ int err;
+
+ /* Modify ttc rules destination to point back to the indir TIRs */
+ err = mlx5e_ttc_fwd_default_dest(priv, MLX5E_TT_ANY);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, MLX5E_TT_ANY, err);
+ return err;
+ }
+ return 0;
+}
+
+static int fs_any_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest = {};
+ int err;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.any->table.t;
+
+ /* Modify ttc rules destination to point on the accel_fs FTs */
+ err = mlx5e_ttc_fwd_dest(priv, MLX5E_TT_ANY, &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, MLX5E_TT_ANY, err);
+ return err;
+ }
+ return 0;
+}
+
+static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
+{
+ if (IS_ERR_OR_NULL(fs_any->table.t))
+ return;
+
+ mlx5_del_flow_rules(fs_any->default_rule);
+ mlx5e_destroy_flow_table(&fs_any->table);
+ fs_any->table.t = NULL;
+}
+
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_fs_any *fs_any = priv->fs.any;
+
+ if (!fs_any)
+ return;
+
+ if (--fs_any->ref_cnt)
+ return;
+
+ fs_any_disable(priv);
+
+ fs_any_destroy_table(fs_any);
+
+ kfree(fs_any);
+ priv->fs.any = NULL;
+}
+
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
+{
+ int err;
+
+ if (priv->fs.any) {
+ priv->fs.any->ref_cnt++;
+ return 0;
+ }
+
+ priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
+ if (!priv->fs.any)
+ return -ENOMEM;
+
+ err = fs_any_create_table(priv);
+ if (err)
+ return err;
+
+ err = fs_any_enable(priv);
+ if (err)
+ goto err_destroy_table;
+
+ priv->fs.any->ref_cnt = 1;
+
+ return 0;
+
+err_destroy_table:
+ fs_any_destroy_table(priv->fs.any);
+
+ kfree(priv->fs.any);
+ priv->fs.any = NULL;
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
new file mode 100644
index 000000000000..8385df24eb99
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5E_FS_TT_REDIRECT_H__
+#define __MLX5E_FS_TT_REDIRECT_H__
+
+#include "en.h"
+#include "en/fs.h"
+
+void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
+
+/* UDP traffic type redirect */
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types ttc_type,
+ u32 tir_num, u16 d_port);
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
+
+/* ANY traffic type redirect*/
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+ u32 tir_num, u16 ether_type);
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv);
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 84e501e057b4..6f4e6c34b2a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -128,7 +128,7 @@ int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg
if (err)
return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "size", eq->core.nent);
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core));
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 36381a2ed5a5..f410c1268422 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -3,10 +3,13 @@
#include "en/params.h"
#include "en/txrx.h"
-#include "en_accel/tls_rxtx.h"
+#include "en/port.h"
+#include "en_accel/en_accel.h"
+#include "accel/ipsec.h"
+#include "fpga/ipsec.h"
-static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
return params->xdp_prog || xsk;
}
@@ -37,8 +40,8 @@ u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
return linear_rq_headroom + hw_mtu;
}
-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
@@ -87,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
}
-#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
- MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
-bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ u8 log_stride_sz, u8 log_num_strides)
{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
- s8 signed_log_num_strides_param;
- u8 log_num_strides;
+ if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
+ return false;
- if (!mlx5e_rx_is_linear_skb(params, xsk))
+ if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
+ log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
return false;
- if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+ if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
return false;
if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
- return true;
+ return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
+
+ return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+}
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
- signed_log_num_strides_param =
- (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ s8 log_num_strides;
+ u8 log_stride_sz;
- return signed_log_num_strides_param >= 0;
+ if (!mlx5e_rx_is_linear_skb(params, xsk))
+ return false;
+
+ log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
+ log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
+
+ return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
}
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
@@ -172,17 +184,505 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
return stop_room;
}
-int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
+int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
size_t sq_size = 1 << params->log_sq_size;
u16 stop_room;
- stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
+ stop_room = mlx5e_calc_sq_stop_room(mdev, params);
if (stop_room >= sq_size) {
- netdev_err(priv->netdev, "Stop room %u is bigger than the SQ size %zu\n",
- stop_room, sq_size);
+ mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
+ stop_room, sq_size);
return -EINVAL;
}
return 0;
}
+
+static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
+{
+ struct dim_cq_moder moder;
+
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+
+ return moder;
+}
+
+static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+{
+ struct dim_cq_moder moder;
+
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+
+ return moder;
+}
+
+static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
+{
+ return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ if (params->tx_dim_enabled) {
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
+ } else {
+ params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
+ }
+}
+
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ if (params->rx_dim_enabled) {
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
+ } else {
+ params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
+ }
+}
+
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_tx_moderation(params, cq_period_mode);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_rx_moderation(params, cq_period_mode);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ params->rx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
+bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
+{
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
+
+ mlx5e_port_max_linkspeed(mdev, &link_speed);
+ pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
+ mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+
+#define MLX5E_SLOW_PCI_RATIO (2)
+
+ return link_speed && pci_bw &&
+ link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
+}
+
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+ return false;
+
+ if (mlx5_fpga_is_ipsec_device(mdev))
+ return false;
+
+ if (params->xdp_prog) {
+ /* XSK params are not considered here. If striding RQ is in use,
+ * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
+ * be called with the known XSK params.
+ */
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+ return false;
+ }
+
+ return true;
+}
+
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ params->log_rq_mtu_frames = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+
+ mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
+ BIT(params->log_rq_mtu_frames),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
+}
+
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_CYCLIC;
+}
+
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ /* Prefer Striding RQ, unless any of the following holds:
+ * - Striding RQ configuration is not possible/supported.
+ * - Slow PCI heuristic.
+ * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
+ *
+ * No XSK params: checking the availability of striding RQ in general.
+ */
+ if (!slow_pci_heuristic(mdev) &&
+ mlx5e_striding_rq_possible(mdev, params) &&
+ (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
+ !mlx5e_rx_is_linear_skb(params, NULL)))
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
+}
+
+/* Build queue parameters */
+
+void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
+{
+ *ccp = (struct mlx5e_create_cq_param) {
+ .napi = &c->napi,
+ .ch_stats = c->stats,
+ .node = cpu_to_node(c->cpu),
+ .ix = c->ix,
+ };
+}
+
+#define DEFAULT_FRAG_SIZE (2048)
+
+static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_frags_info *info)
+{
+ u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ int frag_size_max = DEFAULT_FRAG_SIZE;
+ u32 buf_size = 0;
+ int i;
+
+ if (mlx5_fpga_is_ipsec_device(mdev))
+ byte_count += MLX5E_METADATA_ETHER_LEN;
+
+ if (mlx5e_rx_is_linear_skb(params, xsk)) {
+ int frag_stride;
+
+ frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
+ frag_stride = roundup_pow_of_two(frag_stride);
+
+ info->arr[0].frag_size = byte_count;
+ info->arr[0].frag_stride = frag_stride;
+ info->num_frags = 1;
+ info->wqe_bulk = PAGE_SIZE / frag_stride;
+ goto out;
+ }
+
+ if (byte_count > PAGE_SIZE +
+ (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+ frag_size_max = PAGE_SIZE;
+
+ i = 0;
+ while (buf_size < byte_count) {
+ int frag_size = byte_count - buf_size;
+
+ if (i < MLX5E_MAX_RX_FRAGS - 1)
+ frag_size = min(frag_size, frag_size_max);
+
+ info->arr[i].frag_size = frag_size;
+ info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
+
+ buf_size += frag_size;
+ i++;
+ }
+ info->num_frags = i;
+ /* number of different wqes sharing a page */
+ info->wqe_bulk = 1 + (info->num_frags % 2);
+
+out:
+ info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
+ info->log_num_frags = order_base_2(info->num_frags);
+}
+
+static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
+{
+ int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
+
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ sz += sizeof(struct mlx5e_rx_wqe_ll);
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ sz += sizeof(struct mlx5e_rx_wqe_cyc);
+ }
+
+ return order_base_2(sz);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_cq_param *param)
+{
+ bool hw_stridx = false;
+ void *cqc = param->cqc;
+ u8 log_cq_size;
+
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ log_cq_size = params->log_rq_mtu_frames;
+ }
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
+ MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ }
+
+ mlx5e_build_common_cq_param(mdev, param);
+ param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
+}
+
+int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ u16 q_counter,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int ndsegs = 1;
+
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
+ u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+
+ if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
+ log_wqe_num_of_strides)) {
+ mlx5_core_err(mdev,
+ "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
+ log_wqe_stride_size, log_wqe_num_of_strides);
+ return -EINVAL;
+ }
+
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ break;
+ }
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
+ mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ ndsegs = param->frags_info.num_frags;
+ }
+
+ MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride,
+ mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(rqc, rqc, counter_set_id, q_counter);
+ MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
+ MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
+
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);
+
+ return 0;
+}
+
+void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+ u16 q_counter,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, log_wq_stride,
+ mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
+ MLX5_SET(rqc, rqc, counter_set_id, q_counter);
+
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+}
+
+void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
+
+ mlx5e_build_common_cq_param(mdev, param);
+ param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
+}
+
+void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+}
+
+void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ bool allow_swp;
+
+ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+ !!MLX5_IPSEC_DEV(mdev);
+ mlx5e_build_sq_param_common(mdev, param);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+ param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
+ mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
+}
+
+static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
+
+ mlx5e_build_common_cq_param(mdev, param);
+
+ param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
+{
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ return MLX5_GET(wq, wq, log_wq_sz);
+}
+
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp)
+{
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
+ order_base_2(MLX5E_UMR_WQEBBS) +
+ mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ }
+}
+
+static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
+{
+ if (mlx5_accel_is_ktls_rx(mdev))
+ return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+
+ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+}
+
+static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(mdev, param);
+
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
+ mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
+}
+
+static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(mdev, param);
+ param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+ param->is_tls = mlx5_accel_is_ktls_rx(mdev);
+ if (param->is_tls)
+ param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
+}
+
+void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(mdev, param);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+ mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
+}
+
+int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 q_counter,
+ struct mlx5e_channel_param *cparam)
+{
+ u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
+ int err;
+
+ err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
+ if (err)
+ return err;
+
+ icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+ async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
+
+ mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
+ mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index ea2cfb04b31a..e9593f5f0661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -30,6 +30,7 @@ struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
bool is_mpw;
+ bool is_tls;
u16 stop_room;
};
@@ -84,12 +85,23 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
/* Parameter calculations */
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+
+bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+
+bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
@@ -112,32 +124,31 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
/* Build queue parameters */
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
-void mlx5e_build_rq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *param);
-void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ u16 q_counter,
+ struct mlx5e_rq_param *param);
+void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+ u16 q_counter,
+ struct mlx5e_rq_param *param);
+void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
-void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_sq_param *param);
-void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_cq_param *param);
-void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_cq_param *param);
-void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_cq_param *param);
-void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_sq_param *param);
-void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_sq_param *param);
+int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 q_counter,
+ struct mlx5e_channel_param *cparam);
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params);
+int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d57b6f06382f..d907c1acd4d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -1,8 +1,26 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
+#include <linux/ptp_classify.h>
#include "en/ptp.h"
#include "en/txrx.h"
+#include "en/params.h"
+#include "en/fs_tt_redirect.h"
+
+struct mlx5e_ptp_fs {
+ struct mlx5_flow_handle *l2_rule;
+ struct mlx5_flow_handle *udp_v4_rule;
+ struct mlx5_flow_handle *udp_v6_rule;
+ bool valid;
+};
+
+#define MLX5E_PTP_CHANNEL_IX 0
+
+struct mlx5e_ptp_params {
+ struct mlx5e_params params;
+ struct mlx5e_sq_param txq_sq_param;
+ struct mlx5e_rq_param rq_param;
+};
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
@@ -116,9 +134,9 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
{
- struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp,
- napi);
+ struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
+ struct mlx5e_rq *rq = &c->rq;
bool busy = false;
int work_done = 0;
int i;
@@ -127,9 +145,19 @@ static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
ch_stats->poll++;
- for (i = 0; i < c->num_tc; i++) {
- busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
- busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (i = 0; i < c->num_tc; i++) {
+ busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
+ busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
+ }
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
+ busy |= work_done == budget;
+ busy |= INDIRECT_CALL_2(rq->post_wqes,
+ mlx5e_post_rx_mpwqes,
+ mlx5e_post_rx_wqes,
+ rq);
}
if (busy) {
@@ -142,10 +170,14 @@ static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
ch_stats->arm++;
- for (i = 0; i < c->num_tc; i++) {
- mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
- mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (i = 0; i < c->num_tc; i++) {
+ mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
+ mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
+ }
}
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_cq_arm(&rq->cq);
out:
rcu_read_unlock();
@@ -153,7 +185,7 @@ out:
return work_done;
}
-static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
+static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq, int tc,
@@ -172,20 +204,18 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
sq->netdev = c->netdev;
sq->priv = c->priv;
sq->mdev = mdev;
- sq->ch_ix = c->ix;
+ sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->stats = &c->priv->port_ptp_stats.sq[tc];
+ sq->stats = &c->priv->ptp_stats.sq[tc];
sq->ptpsq = ptpsq;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
- sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
- mlx5_real_time_cyc2time :
- mlx5_timecounter_cyc2time;
+ sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
node = dev_to_node(mlx5_core_dma_dev(mdev));
@@ -243,7 +273,7 @@ static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
kvfree(skb_fifo->fifo);
}
-static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
+static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
int txq_ix, struct mlx5e_ptp_params *cparams,
int tc, struct mlx5e_ptpsq *ptpsq)
{
@@ -293,7 +323,7 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
mlx5e_free_txqsq(sq);
}
-static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c,
+static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
@@ -321,7 +351,7 @@ close_txqsq:
return err;
}
-static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
+static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
{
int tc;
@@ -329,8 +359,8 @@ static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
}
-static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
- struct mlx5e_ptp_params *cparams)
+static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
+ struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
struct mlx5e_create_cq_param ccp = {};
@@ -342,7 +372,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
- ccp.ix = c->ix;
+ ccp.ix = MLX5E_PTP_CHANNEL_IX;
cq_param = &cparams->txq_sq_param.cqp;
@@ -362,7 +392,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
if (err)
goto out_err_ts_cq;
- ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc];
+ ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
}
return 0;
@@ -378,7 +408,25 @@ out_err_txqsq_cq:
return err;
}
-static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
+static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
+ struct mlx5e_ptp_params *cparams)
+{
+ struct mlx5e_create_cq_param ccp = {};
+ struct dim_cq_moder ptp_moder = {};
+ struct mlx5e_cq_param *cq_param;
+ struct mlx5e_cq *cq = &c->rq.cq;
+
+ ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
+ ccp.ch_stats = c->stats;
+ ccp.napi = &c->napi;
+ ccp.ix = MLX5E_PTP_CHANNEL_IX;
+
+ cq_param = &cparams->rq_param.cqp;
+
+ return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+}
+
+static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
{
int tc;
@@ -389,22 +437,36 @@ static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
}
-static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv,
+static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq;
- mlx5e_build_sq_param_common(priv, param);
+ mlx5e_build_sq_param_common(mdev, param);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
- mlx5e_build_tx_cq_param(priv, params, &param->cqp);
+ mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
+}
+
+static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ u16 q_counter,
+ struct mlx5e_ptp_params *ptp_params)
+{
+ struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
+ struct mlx5e_params *params = &ptp_params->params;
+
+ params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
+ mlx5e_init_rq_type_params(mdev, params);
+ params->sw_mtu = netdev->max_mtu;
+ mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
}
-static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
+static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
struct mlx5e_ptp_params *cparams,
struct mlx5e_params *orig)
{
@@ -417,52 +479,193 @@ static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
params->num_tc = orig->num_tc;
/* SQ */
- params->log_sq_size = orig->log_sq_size;
-
- mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ params->log_sq_size = orig->log_sq_size;
+ mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
}
-static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
- struct mlx5e_ptp_params *cparams)
+static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
+ struct mlx5e_rq *rq)
{
+ struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5e_priv *priv = c->priv;
int err;
- err = mlx5e_ptp_open_cqs(c, cparams);
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = mdev->device;
+ rq->netdev = priv->netdev;
+ rq->priv = priv;
+ rq->clock = &mdev->clock;
+ rq->tstamp = &priv->tstamp;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->stats = &c->priv->ptp_stats.rq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
+ err = mlx5e_rq_set_handlers(rq, params, false);
if (err)
return err;
- err = mlx5e_ptp_open_txqsqs(c, cparams);
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
+}
+
+static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ int node = dev_to_node(c->mdev->device);
+ int err;
+
+ err = mlx5e_init_ptp_rq(c, params, &c->rq);
if (err)
- goto close_cqs;
+ return err;
+
+ return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
+}
+
+static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
+ struct mlx5e_ptp_params *cparams)
+{
+ int err;
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ err = mlx5e_ptp_open_tx_cqs(c, cparams);
+ if (err)
+ return err;
+
+ err = mlx5e_ptp_open_txqsqs(c, cparams);
+ if (err)
+ goto close_tx_cqs;
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ err = mlx5e_ptp_open_rx_cq(c, cparams);
+ if (err)
+ goto close_txqsq;
+
+ err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
+ if (err)
+ goto close_rx_cq;
+ }
return 0;
-close_cqs:
- mlx5e_ptp_close_cqs(c);
+close_rx_cq:
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_close_cq(&c->rq.cq);
+close_txqsq:
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state))
+ mlx5e_ptp_close_txqsqs(c);
+close_tx_cqs:
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state))
+ mlx5e_ptp_close_tx_cqs(c);
return err;
}
-static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
+static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
{
- mlx5e_ptp_close_txqsqs(c);
- mlx5e_ptp_close_cqs(c);
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ mlx5e_close_rq(&c->rq);
+ mlx5e_close_cq(&c->rq.cq);
+ }
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ mlx5e_ptp_close_txqsqs(c);
+ mlx5e_ptp_close_tx_cqs(c);
+ }
}
-int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
- u8 lag_port, struct mlx5e_port_ptp **cp)
+static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
+{
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
+ __set_bit(MLX5E_PTP_STATE_TX, c->state);
+
+ if (params->ptp_rx)
+ __set_bit(MLX5E_PTP_STATE_RX, c->state);
+
+ return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
+}
+
+static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+
+ if (!ptp_fs->valid)
+ return;
+
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
+ mlx5e_fs_tt_redirect_any_destroy(priv);
+
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
+ mlx5e_fs_tt_redirect_udp_destroy(priv);
+ ptp_fs->valid = false;
+}
+
+static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5_flow_handle *rule;
+ u32 tirn = priv->ptp_tir.tirn;
+ int err;
+
+ if (ptp_fs->valid)
+ return 0;
+
+ err = mlx5e_fs_tt_redirect_udp_create(priv);
+ if (err)
+ goto out_free;
+
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP,
+ tirn, PTP_EV_PORT);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto out_destroy_fs_udp;
+ }
+ ptp_fs->udp_v4_rule = rule;
+
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP,
+ tirn, PTP_EV_PORT);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto out_destroy_udp_v4_rule;
+ }
+ ptp_fs->udp_v6_rule = rule;
+
+ err = mlx5e_fs_tt_redirect_any_create(priv);
+ if (err)
+ goto out_destroy_udp_v6_rule;
+
+ rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto out_destroy_fs_any;
+ }
+ ptp_fs->l2_rule = rule;
+ ptp_fs->valid = true;
+
+ return 0;
+
+out_destroy_fs_any:
+ mlx5e_fs_tt_redirect_any_destroy(priv);
+out_destroy_udp_v6_rule:
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
+out_destroy_udp_v4_rule:
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
+out_destroy_fs_udp:
+ mlx5e_fs_tt_redirect_udp_destroy(priv);
+out_free:
+ return err;
+}
+
+int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ u8 lag_port, struct mlx5e_ptp **cp)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_ptp_params *cparams;
- struct mlx5e_port_ptp *c;
- unsigned int irq;
+ struct mlx5e_ptp *c;
int err;
- int eqn;
- err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq);
- if (err)
- return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
@@ -472,14 +675,17 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->priv = priv;
c->mdev = priv->mdev;
c->tstamp = &priv->tstamp;
- c->ix = 0;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
c->num_tc = params->num_tc;
- c->stats = &priv->port_ptp_stats.ch;
+ c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
+ err = mlx5e_ptp_set_state(c, params);
+ if (err)
+ goto err_free;
+
netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
mlx5e_ptp_build_params(c, cparams, params);
@@ -488,6 +694,9 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_napi_del;
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ priv->rx_ptp_opened = true;
+
*cp = c;
kvfree(cparams);
@@ -496,13 +705,13 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
err_napi_del:
netif_napi_del(&c->napi);
-
+err_free:
kvfree(cparams);
kvfree(c);
return err;
}
-void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
+void mlx5e_ptp_close(struct mlx5e_ptp *c)
{
mlx5e_ptp_close_queues(c);
netif_napi_del(&c->napi);
@@ -510,22 +719,94 @@ void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
kvfree(c);
}
-void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
+void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
{
int tc;
napi_enable(&c->napi);
- for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ mlx5e_ptp_rx_set_fs(c->priv);
+ mlx5e_activate_rq(&c->rq);
+ }
}
-void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
+void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_deactivate_rq(&c->rq);
+
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
+ }
napi_disable(&c->napi);
}
+
+int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
+{
+ if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
+ return -EINVAL;
+
+ *rqn = c->rq.rqn;
+ return 0;
+}
+
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs;
+
+ if (!priv->profile->rx_ptp_support)
+ return 0;
+
+ ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
+ if (!ptp_fs)
+ return -ENOMEM;
+
+ priv->fs.ptp_fs = ptp_fs;
+ return 0;
+}
+
+void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+
+ if (!priv->profile->rx_ptp_support)
+ return;
+
+ mlx5e_ptp_rx_unset_fs(priv);
+ kfree(ptp_fs);
+}
+
+int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
+{
+ struct mlx5e_ptp *c = priv->channels.ptp;
+
+ if (!priv->profile->rx_ptp_support)
+ return 0;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ if (set) {
+ if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
+ return -EINVAL;
+ }
+ return mlx5e_ptp_rx_set_fs(priv);
+ }
+ /* set == false */
+ if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
+ return -EINVAL;
+ }
+ mlx5e_ptp_rx_unset_fs(priv);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 90c98ea63b7f..ab935cce952b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -5,7 +5,6 @@
#define __MLX5_EN_PTP_H__
#include "en.h"
-#include "en/params.h"
#include "en_stats.h"
struct mlx5e_ptpsq {
@@ -17,9 +16,16 @@ struct mlx5e_ptpsq {
struct mlx5e_ptp_cq_stats *cq_stats;
};
-struct mlx5e_port_ptp {
+enum {
+ MLX5E_PTP_STATE_TX,
+ MLX5E_PTP_STATE_RX,
+ MLX5E_PTP_STATE_NUM_STATES,
+};
+
+struct mlx5e_ptp {
/* data path */
struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_rq rq;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -34,20 +40,18 @@ struct mlx5e_port_ptp {
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
- DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
- int ix;
-};
-
-struct mlx5e_ptp_params {
- struct mlx5e_params params;
- struct mlx5e_sq_param txq_sq_param;
+ DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
};
-int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
- u8 lag_port, struct mlx5e_port_ptp **cp);
-void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c);
-void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c);
-void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c);
+int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ u8 lag_port, struct mlx5e_ptp **cp);
+void mlx5e_ptp_close(struct mlx5e_ptp *c);
+void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
+void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
+int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn);
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv);
+void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv);
+int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
enum {
MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 12d7ad061237..5efe3278b0f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -232,8 +232,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
memset(&param_sq, 0, sizeof(param_sq));
memset(&param_cq, 0, sizeof(param_cq));
- mlx5e_build_sq_param(priv, params, &param_sq);
- mlx5e_build_tx_cq_param(priv, params, &param_cq);
+ mlx5e_build_sq_param(priv->mdev, params, &param_sq);
+ mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 065126370acd..6cdc52d50a48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -17,6 +17,7 @@
#include "en/mapping.h"
#include "en/tc_tun.h"
#include "lib/port_tun.h"
+#include "esw/sample.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -169,6 +170,9 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
+ if (!priv->netdev || !netif_device_present(priv->netdev))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
@@ -321,6 +325,9 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
int err = 0;
+ if (!netif_device_present(indr_priv->rpriv->netdev))
+ return -EOPNOTSUPP;
+
switch (flower->command) {
case FLOW_CLS_REPLACE:
err = mlx5e_configure_flower(netdev, priv, flower, flags);
@@ -605,27 +612,50 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
return true;
}
+
+static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ u32 tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
+
+ if (chain) {
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_eswitch *esw;
+ u32 zone_restore_id;
+
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+ tc_skb_ext->chain = chain;
+ zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
+ esw = priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
+ zone_restore_id))
+ return false;
+ }
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+}
#endif /* CONFIG_NET_TC_SKB_EXT */
bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone_restore_id;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_mapped_obj mapped_obj;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
+ u32 reg_c0, reg_c1;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
- if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
- reg_c0 = 0;
- reg_c1 = be32_to_cpu(cqe->ft_metadata);
-
- if (!reg_c0)
+ if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
return true;
/* If reg_c0 is not equal to the default flow tag then skb->mark
@@ -633,38 +663,33 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
*/
skb->mark = 0;
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
-
- err = mlx5_get_chain_for_tag(esw_chains(esw), reg_c0, &chain);
+ err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
if (err) {
netdev_dbg(priv->netdev,
- "Couldn't find chain for chain tag: %d, err: %d\n",
+ "Couldn't find mapped object for reg_c0: %d, err: %d\n",
reg_c0, err);
return false;
}
- if (chain) {
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
- if (!tc_skb_ext) {
- WARN_ON(1);
- return false;
- }
-
- tc_skb_ext->chain = chain;
-
- zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
- zone_restore_id))
- return false;
- }
-
- tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN)
+ return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv);
#endif /* CONFIG_NET_TC_SKB_EXT */
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
+ mlx5_esw_sample_skb(skb, &mapped_obj);
+ return false;
+ }
+#endif /* CONFIG_MLX5_TC_SAMPLE */
+ if (mapped_obj.type != MLX5_MAPPED_OBJ_SAMPLE &&
+ mapped_obj.type != MLX5_MAPPED_OBJ_CHAIN) {
+ netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+ return false;
+ }
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index d80bbd17e5f8..0eb125316fe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -4,6 +4,8 @@
#include "health.h"
#include "params.h"
#include "txrx.h"
+#include "devlink.h"
+#include "ptp.h"
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
@@ -229,8 +231,9 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static int
+mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
u16 wqe_counter;
int wqes_sz;
@@ -246,14 +249,6 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
wq_head = mlx5e_rqwq_get_head(rq);
wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
- if (err)
- return err;
-
err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
if (err)
return err;
@@ -299,61 +294,155 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
return err;
}
- err = devlink_fmsg_obj_nest_end(fmsg);
+ return 0;
+}
+
+static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
- return 0;
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
+ if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_params *params = &priv->channels.params;
- struct mlx5e_rq *generic_rq;
+ struct mlx5e_priv *priv = rq->priv;
+ struct mlx5e_params *params;
u32 rq_stride, rq_sz;
- int i, err = 0;
-
- mutex_lock(&priv->state_lock);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ bool real_time;
+ int err;
- generic_rq = &priv->channels.c[0]->rq;
- rq_sz = mlx5e_rqwq_get_size(generic_rq);
+ params = &priv->channels.params;
+ rq_sz = mlx5e_rqwq_get_size(rq);
+ real_time = mlx5_is_real_time_rq(priv->mdev);
rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
- if (err)
- goto unlock;
-
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
if (err)
- goto unlock;
+ return err;
err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
if (err)
- goto unlock;
+ return err;
err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
if (err)
- goto unlock;
+ return err;
err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
if (err)
- goto unlock;
+ return err;
- err = mlx5e_health_cq_common_diag_fmsg(&generic_rq->cq, fmsg);
+ err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
if (err)
- goto unlock;
+ return err;
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int
+mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch,
+ struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int
+mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
+ if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
+ if (err)
+ return err;
+
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
+ err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
+ if (err)
+ return err;
+ }
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
+ if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i, err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
if (err)
goto unlock;
@@ -368,9 +457,12 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
if (err)
goto unlock;
}
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
+ err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+ if (err)
+ goto unlock;
+ }
err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- goto unlock;
unlock:
mutex_unlock(&priv->state_lock);
return err;
@@ -502,6 +594,7 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
int i, err;
@@ -534,6 +627,12 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
return err;
}
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
+ err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
+ if (err)
+ return err;
+ }
+
return devlink_fmsg_arr_pair_nest_end(fmsg);
}
@@ -615,9 +714,10 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_rx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(dl_port, &mlx5_rx_reporter_ops,
MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
@@ -633,4 +733,5 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
return;
devlink_port_health_reporter_destroy(priv->rx_reporter);
+ priv->rx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index d7275c84313e..9d361efd5ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -3,6 +3,7 @@
#include "health.h"
#include "en/ptp.h"
+#include "en/devlink.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
@@ -256,12 +257,14 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *txqsq)
{
u32 sq_stride, sq_sz;
+ bool real_time;
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
if (err)
return err;
+ real_time = mlx5_is_real_time_sq(txqsq->mdev);
sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
sq_stride = MLX5_SEND_WQE_BB;
@@ -273,6 +276,10 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
if (err)
return err;
+ err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
+ if (err)
+ return err;
+
err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
if (err)
return err;
@@ -303,6 +310,7 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5e_ptpsq *generic_ptpsq;
int err;
@@ -314,12 +322,11 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
if (err)
return err;
- generic_ptpsq = priv->channels.port_ptp ?
- &priv->channels.port_ptp->ptpsq[0] :
- NULL;
- if (!generic_ptpsq)
+ if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto out;
+ generic_ptpsq = &ptp_ch->ptpsq[0];
+
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
if (err)
return err;
@@ -345,7 +352,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
int i, tc, err = 0;
@@ -374,7 +381,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
}
- if (!ptp_ch)
+ if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto close_sqs_nest;
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
@@ -459,7 +466,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
- struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
int i, tc, err;
@@ -496,7 +503,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
}
}
- if (ptp_ch) {
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
@@ -572,9 +579,10 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_tx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(dl_port, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
@@ -591,4 +599,5 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
return;
devlink_port_health_reporter_destroy(priv->tx_reporter);
+ priv->tx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 68e54cc1cd16..5da5e5323a44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -29,6 +29,8 @@
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
#define MLX5_CT_STATE_REPLY_BIT BIT(4)
+#define MLX5_CT_STATE_RELATED_BIT BIT(5)
+#define MLX5_CT_STATE_INVALID_BIT BIT(6)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@@ -717,7 +719,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
zone_rule->nat = nat;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -759,7 +761,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
zone_rule->attr = attr;
- kfree(spec);
+ kvfree(spec);
ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
return 0;
@@ -771,7 +773,7 @@ err_rule:
err_mod_hdr:
kfree(attr);
err_attr:
- kfree(spec);
+ kvfree(spec);
return err;
}
@@ -1229,8 +1231,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
+ bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- bool trk, est, untrk, unest, new, rpl, unrpl;
struct flow_dissector_key_ct *mask, *key;
u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off;
@@ -1258,7 +1260,9 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
TCA_FLOWER_KEY_CT_FLAGS_NEW |
- TCA_FLOWER_KEY_CT_FLAGS_REPLY)) {
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY |
+ TCA_FLOWER_KEY_CT_FLAGS_RELATED |
+ TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
NL_SET_ERR_MSG_MOD(extack,
"only ct_state trk, est, new and rpl are supported for offload");
return -EOPNOTSUPP;
@@ -1270,9 +1274,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
+ rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
+ inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
+ unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
+ uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
@@ -1280,6 +1288,20 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
+ ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
+ ctstate_mask |= uninv ? MLX5_CT_STATE_INVALID_BIT : 0;
+
+ if (rel) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "matching on ct_state +rel isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (inv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "matching on ct_state +inv isn't supported");
+ return -EOPNOTSUPP;
+ }
if (new) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1562,6 +1584,14 @@ mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft)
mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
}
+/* To avoid false lock dependency warning set the ct_entries_ht lock
+ * class different than the lock class of the ht being used when deleting
+ * last flow from a group and then deleting a group, we get into del_sw_flow_group()
+ * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
+ * it's different than the ht->mutex here.
+ */
+static struct lock_class_key ct_entries_ht_lock_key;
+
static struct mlx5_ct_ft *
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
struct nf_flowtable *nf_ft)
@@ -1596,6 +1626,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
if (err)
goto err_init;
+ lockdep_set_class(&ft->ct_entries_ht.mutex, &ct_entries_ht_lock_key);
+
err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
zone_params);
if (err)
@@ -1697,10 +1729,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_ft *ft;
u32 fte_id = 1;
- post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
+ post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!post_ct_spec || !ct_flow) {
- kfree(post_ct_spec);
+ kvfree(post_ct_spec);
kfree(ct_flow);
return ERR_PTR(-ENOMEM);
}
@@ -1810,6 +1842,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_flow->post_ct_attr->prio = 0;
ct_flow->post_ct_attr->ft = ct_priv->post_ct;
+ /* Splits were handled before CT */
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ ct_flow->post_ct_attr->esw_attr->split_count = 0;
+
ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
@@ -1835,7 +1871,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
attr->ct_attr.ct_flow = ct_flow;
dealloc_mod_hdr_actions(&pre_mod_acts);
- kfree(post_ct_spec);
+ kvfree(post_ct_spec);
return rule;
@@ -1856,7 +1892,7 @@ err_alloc_pre:
err_idr:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
- kfree(post_ct_spec);
+ kvfree(post_ct_spec);
kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
return ERR_PTR(err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index c223591ffc22..d1599b7b944b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -27,6 +27,7 @@ enum {
MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
+ MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11,
};
struct mlx5e_tc_flow_parse_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index e1271998b937..9350ca05ce65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -83,10 +83,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
static inline int
mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
-int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e)
+{ return -EOPNOTSUPP; }
+static inline int
+mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
{ return -EOPNOTSUPP; }
#endif
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 9f16ad2c0710..593503bc4d07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021 Mellanox Technologies. */
#include <net/fib_notifier.h>
+#include <net/nexthop.h>
#include "tc_tun_encap.h"
#include "en_tc.h"
#include "tc_tun.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 37fc1d77ded7..86ab4e864fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -30,172 +30,62 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
- struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
- struct mlx5e_ch_stats *ch_stats,
+static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params,
struct mlx5e_rq *rq)
{
- void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct page_pool_params pp_params = {};
- int node = dev_to_node(mdev->device);
- u32 pool_size;
- int wq_sz;
- int err;
- int i;
-
- rqp->wq.db_numa_node = node;
-
- rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
- rq->netdev = priv->netdev;
- rq->mdev = mdev;
- rq->priv = priv;
- rq->stats = stats;
- rq->clock = &mdev->clock;
- rq->tstamp = &priv->tstamp;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
-
+ struct mlx5_core_dev *mdev = t->mdev;
+ struct mlx5e_priv *priv = t->priv;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = mdev->device;
+ rq->netdev = priv->netdev;
+ rq->priv = priv;
+ rq->clock = &mdev->clock;
+ rq->tstamp = &priv->tstamp;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->stats = &priv->trap_stats.rq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
xdp_rxq_info_unused(&rq->xdp_rxq);
-
- rq->buff.map_dir = DMA_FROM_DEVICE;
- rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
- pool_size = 1 << params->log_rq_mtu_frames;
-
- err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
- if (err)
- return err;
-
- rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
-
- wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
-
- rq->wqe.info = rqp->frags_info;
- rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
- rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
- (wq_sz << rq->wqe.info.log_num_frags)),
- GFP_KERNEL, node);
- if (!rq->wqe.frags) {
- err = -ENOMEM;
- goto err_wq_cyc_destroy;
- }
-
- err = mlx5e_init_di_list(rq, wq_sz, node);
- if (err)
- goto err_free_frags;
-
- rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
-
mlx5e_rq_set_trap_handlers(rq, params);
-
- /* Create a page_pool and register it with rxq */
- pp_params.order = 0;
- pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
- pp_params.pool_size = pool_size;
- pp_params.nid = node;
- pp_params.dev = mdev->device;
- pp_params.dma_dir = rq->buff.map_dir;
-
- /* page_pool can be used even when there is no rq->xdp_prog,
- * given page_pool does not handle DMA mapping there is no
- * required state to clear. And page_pool gracefully handle
- * elevated refcnt.
- */
- rq->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(rq->page_pool)) {
- err = PTR_ERR(rq->page_pool);
- rq->page_pool = NULL;
- goto err_free_di_list;
- }
- for (i = 0; i < wq_sz; i++) {
- struct mlx5e_rx_wqe_cyc *wqe =
- mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
- int f;
-
- for (f = 0; f < rq->wqe.info.num_frags; f++) {
- u32 frag_size = rq->wqe.info.arr[f].frag_size |
- MLX5_HW_START_PADDING;
-
- wqe->data[f].byte_count = cpu_to_be32(frag_size);
- wqe->data[f].lkey = rq->mkey_be;
- }
- /* check if num_frags is not a pow of two */
- if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
- wqe->data[f].byte_count = 0;
- wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
- wqe->data[f].addr = 0;
- }
- }
- return 0;
-
-err_free_di_list:
- mlx5e_free_di_list(rq);
-err_free_frags:
- kvfree(rq->wqe.frags);
-err_wq_cyc_destroy:
- mlx5_wq_destroy(&rq->wq_ctrl);
-
- return err;
}
-static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
-{
- page_pool_destroy(rq->page_pool);
- mlx5e_free_di_list(rq);
- kvfree(rq->wqe.frags);
- mlx5_wq_destroy(&rq->wq_ctrl);
-}
-
-static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
- struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param,
- struct mlx5e_ch_stats *ch_stats,
- struct mlx5e_rq *rq)
+static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
{
+ struct mlx5e_rq_param *rq_param = &t->rq_param;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder trap_moder = {};
- struct mlx5e_cq *cq = &rq->cq;
+ struct mlx5e_rq *rq = &t->rq;
+ int node;
int err;
- ccp.node = dev_to_node(mdev->device);
- ccp.ch_stats = ch_stats;
- ccp.napi = napi;
+ node = dev_to_node(mdev->device);
+
+ ccp.node = node;
+ ccp.ch_stats = t->stats;
+ ccp.napi = &t->napi;
ccp.ix = 0;
- err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
+ err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
- err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
+ mlx5e_init_trap_rq(t, &t->params, rq);
+ err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq);
if (err)
goto err_destroy_cq;
- err = mlx5e_create_rq(rq, rq_param);
- if (err)
- goto err_free_rq;
-
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
- if (err)
- goto err_destroy_rq;
-
return 0;
-err_destroy_rq:
- mlx5e_destroy_rq(rq);
- mlx5e_free_rx_descs(rq);
-err_free_rq:
- mlx5e_free_trap_rq(rq);
err_destroy_cq:
- mlx5e_close_cq(cq);
+ mlx5e_close_cq(&rq->cq);
return err;
}
static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
{
- mlx5e_destroy_rq(rq);
- mlx5e_free_rx_descs(rq);
- mlx5e_free_trap_rq(rq);
+ mlx5e_close_rq(rq);
mlx5e_close_cq(&rq->cq);
}
@@ -213,7 +103,7 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml
return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rqn);
@@ -228,24 +118,16 @@ static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct
mlx5e_destroy_tir(mdev, tir);
}
-static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
-{
- set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-}
-
-static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
-{
- clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-}
-
-static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
+static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
+ int max_mtu, u16 q_counter,
+ struct mlx5e_trap *t)
{
struct mlx5e_params *params = &t->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
- mlx5e_init_rq_type_params(priv->mdev, params);
- params->sw_mtu = priv->netdev->max_mtu;
- mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
+ mlx5e_init_rq_type_params(mdev, params);
+ params->sw_mtu = max_mtu;
+ mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param);
}
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
@@ -259,23 +141,19 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
if (!t)
return ERR_PTR(-ENOMEM);
- mlx5e_build_trap_params(priv, t);
+ mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t);
t->priv = priv;
t->mdev = priv->mdev;
t->tstamp = &priv->tstamp;
t->pdev = mlx5_core_dma_dev(priv->mdev);
t->netdev = priv->netdev;
- t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
t->stats = &priv->trap_stats.ch;
netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
- err = mlx5e_open_trap_rq(priv, &t->napi,
- &priv->trap_stats.rq,
- &t->params, &t->rq_param,
- &priv->trap_stats.ch,
- &t->rq);
+ err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
goto err_napi_del;
@@ -304,15 +182,14 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
napi_enable(&trap->napi);
- mlx5e_activate_trap_rq(&trap->rq);
- napi_schedule(&trap->napi);
+ mlx5e_activate_rq(&trap->rq);
}
void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
{
struct mlx5e_trap *trap = priv->en_trap;
- mlx5e_deactivate_trap_rq(&trap->rq);
+ mlx5e_deactivate_rq(&trap->rq);
napi_disable(&trap->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 2e3e78b0f333..2f0df5cc1a2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -500,7 +500,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_xdpsq *sq;
- int drops = 0;
+ int nxmit = 0;
int sq_num;
int i;
@@ -529,11 +529,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
xdptxd.len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- continue;
- }
+ if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
+ break;
xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
xdpi.frame.xdpf = xdpf;
@@ -544,9 +541,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE);
- xdp_return_frame_rx_napi(xdpf);
- drops++;
+ break;
}
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH) {
@@ -555,7 +552,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
mlx5e_xmit_xdp_doorbell(sq);
}
- return n - drops;
+ return nxmit;
}
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index f4bce1365639..a8315f166696 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -35,13 +35,59 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
}
}
-static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
+static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
+ u16 q_counter,
struct mlx5e_channel_param *cparam)
{
- mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
- mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
+ mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+}
+
+static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct xsk_buff_pool *pool,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5_core_dev *mdev = c->mdev;
+ int rq_xdp_ix;
+ int err;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->priv = c->priv;
+ rq->tstamp = c->tstamp;
+ rq->clock = &mdev->clock;
+ rq->icosq = &c->icosq;
+ rq->ix = c->ix;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->xdpsq = &c->rq_xdpsq;
+ rq->xsk_pool = pool;
+ rq->stats = &c->priv->channel_stats[c->ix].xskrq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
+ rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
+ err = mlx5e_rq_set_handlers(rq, params, xsk);
+ if (err)
+ return err;
+
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
+}
+
+static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
+ struct mlx5e_xsk_param *xsk)
+{
+ int err;
+
+ err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq);
+ if (err)
+ return err;
+
+ return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq);
}
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
@@ -61,14 +107,14 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (!cparam)
return -ENOMEM;
- mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
+ mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq);
+ err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk);
if (unlikely(err))
goto err_close_rx_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index cc0efac7b812..00af0b831a28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -123,11 +123,10 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
mlx5e_udp_gso_handle_tx_skb(skb);
#ifdef CONFIG_MLX5_EN_TLS
- if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
- /* May send SKBs and WQEs. */
+ /* May send SKBs and WQEs. */
+ if (mlx5e_tls_skb_offloaded(skb))
if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
return false;
- }
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
@@ -186,7 +185,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+ mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 381a9c8c9da9..34119ce92031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -60,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec;
int err = 0;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -101,7 +101,7 @@ out:
if (err)
mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
- kfree(spec);
+ kvfree(spec);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index baa58b62e8df..aaa579bf9a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -12,6 +12,9 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
+struct mlx5e_ktls_resync_resp *
+mlx5e_ktls_rx_resync_create_resp_list(void);
+void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
@@ -33,6 +36,14 @@ static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enab
return -EOPNOTSUPP;
}
+static inline struct mlx5e_ktls_resync_resp *
+mlx5e_ktls_rx_resync_create_resp_list(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 19d22a63313f..4e58fade7a60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -56,6 +56,7 @@ struct mlx5e_ktls_offload_context_rx {
/* resync */
struct mlx5e_ktls_rx_resync_ctx resync;
+ struct list_head list;
};
static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
@@ -72,6 +73,32 @@ static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx
refcount_inc(&priv_rx->resync.refcnt);
}
+struct mlx5e_ktls_resync_resp {
+ /* protects list changes */
+ spinlock_t lock;
+ struct list_head list;
+};
+
+void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list)
+{
+ kvfree(resp_list);
+}
+
+struct mlx5e_ktls_resync_resp *
+mlx5e_ktls_rx_resync_create_resp_list(void)
+{
+ struct mlx5e_ktls_resync_resp *resp_list;
+
+ resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL);
+ if (!resp_list)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&resp_list->list);
+ spin_lock_init(&resp_list->lock);
+
+ return resp_list;
+}
+
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
{
int err, inlen;
@@ -85,7 +112,7 @@ static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
@@ -119,8 +146,7 @@ out:
complete(&priv_rx->add_ctx);
}
-static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv,
- struct sock *sk)
+static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv)
{
INIT_WORK(&rule->work, accel_rule_handle_work);
rule->priv = priv;
@@ -359,33 +385,32 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
/* Function can be called with the refcount being either elevated or not.
* It does not affect the refcount.
*/
-static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
- struct mlx5e_channel *c)
+static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
+ struct mlx5e_channel *c)
{
struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
- struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq;
- int err;
+ bool trigger_poll;
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
- err = 0;
sq = &c->async_icosq;
- spin_lock_bh(&c->async_icosq_lock);
+ ktls_resync = sq->ktls_resync;
- cseg = post_static_params(sq, priv_rx);
- if (IS_ERR(cseg)) {
- priv_rx->rq_stats->tls_resync_res_skip++;
- err = PTR_ERR(cseg);
- goto unlock;
- }
- /* Do not increment priv_rx refcnt, CQE handling is empty */
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- priv_rx->rq_stats->tls_resync_res_ok++;
-unlock:
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_lock_bh(&ktls_resync->lock);
+ list_add_tail(&priv_rx->list, &ktls_resync->list);
+ trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ spin_unlock_bh(&ktls_resync->lock);
- return err;
+ if (!trigger_poll)
+ return;
+
+ if (!napi_if_scheduled_mark_missed(&c->napi)) {
+ spin_lock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_irq(sq);
+ spin_unlock_bh(&c->async_icosq_lock);
+ }
}
/* Function can be called with the refcount being either elevated or not.
@@ -618,7 +643,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
init_completion(&priv_rx->add_ctx);
- accel_rule_init(&priv_rx->rule, priv, sk);
+ accel_rule_init(&priv_rx->rule, priv);
resync = &priv_rx->resync;
resync_init(resync, priv);
tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core;
@@ -676,3 +701,65 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
*/
mlx5e_ktls_priv_rx_put(priv_rx);
}
+
+bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
+{
+ struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp;
+ struct mlx5e_ktls_resync_resp *ktls_resync;
+ struct mlx5_wqe_ctrl_seg *db_cseg;
+ struct mlx5e_icosq *sq;
+ LIST_HEAD(local_list);
+ int i, j;
+
+ sq = &c->async_icosq;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ ktls_resync = sq->ktls_resync;
+ db_cseg = NULL;
+ i = 0;
+
+ spin_lock(&ktls_resync->lock);
+ list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) {
+ list_move(&priv_rx->list, &local_list);
+ if (++i == budget)
+ break;
+ }
+ if (list_empty(&ktls_resync->list))
+ clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ spin_unlock(&ktls_resync->lock);
+
+ spin_lock(&c->async_icosq_lock);
+ for (j = 0; j < i; j++) {
+ struct mlx5_wqe_ctrl_seg *cseg;
+
+ priv_rx = list_first_entry(&local_list,
+ struct mlx5e_ktls_offload_context_rx,
+ list);
+ cseg = post_static_params(sq, priv_rx);
+ if (IS_ERR(cseg))
+ break;
+ list_del(&priv_rx->list);
+ db_cseg = cseg;
+ }
+ if (db_cseg)
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
+ spin_unlock(&c->async_icosq_lock);
+
+ priv_rx->rq_stats->tls_resync_res_ok += j;
+
+ if (!list_empty(&local_list)) {
+ /* This happens only if ICOSQ is full.
+ * There is no need to mark busy or explicitly ask for a NAPI cycle,
+ * it will be triggered by the outstanding ICOSQ completions.
+ */
+ spin_lock(&ktls_resync->lock);
+ list_splice(&local_list, &ktls_resync->list);
+ set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ spin_unlock(&ktls_resync->lock);
+ priv_rx->rq_stats->tls_resync_res_retry++;
+ }
+
+ return i == budget;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index ee04e916fa21..8f79335057dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -40,6 +40,14 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
}
return false;
}
+
+bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget);
+
+static inline bool
+mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
+{
+ return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
+}
#else
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
@@ -49,6 +57,18 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
return false;
}
+static inline bool
+mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
+{
+ return false;
+}
+
+static inline bool
+mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
+{
+ return false;
+}
+
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 2b51d3222ca1..82dc09aaa7fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -263,9 +263,6 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
int datalen;
u32 skb_seq;
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- return true;
-
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
return true;
@@ -301,12 +298,6 @@ err_out:
return false;
}
-void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state)
-{
- cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
-}
-
static int tls_update_resync_sn(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_tls_metadata *mdata)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 9923132c9440..0ca0a023fb8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -47,8 +47,18 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
-void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state);
+
+static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
+static inline void
+mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
+}
void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 39475f6565c7..5cd466ec6492 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -36,6 +36,32 @@
#include <linux/ipv6.h>
#include "en.h"
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
+
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
+};
+
struct arfs_tuple {
__be16 etype;
u8 ip_proto;
@@ -121,7 +147,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+ dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest);
if (err) {
@@ -141,25 +167,31 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft);
}
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
{
int i;
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
- return;
-
arfs_del_rules(priv);
- destroy_workqueue(priv->fs.arfs.wq);
+ destroy_workqueue(priv->fs.arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
}
}
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return;
+
+ _mlx5e_cleanup_tables(priv);
+ kvfree(priv->fs.arfs);
+}
+
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+ struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -290,7 +322,7 @@ out:
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -330,20 +362,27 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
- spin_lock_init(&priv->fs.arfs.arfs_lock);
- INIT_LIST_HEAD(&priv->fs.arfs.rules);
- priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs.arfs.wq)
+ priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
+ if (!priv->fs.arfs)
return -ENOMEM;
+ spin_lock_init(&priv->fs.arfs->arfs_lock);
+ INIT_LIST_HEAD(&priv->fs.arfs->rules);
+ priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs.arfs->wq)
+ goto err;
+
for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i);
if (err)
- goto err;
+ goto err_des;
}
return 0;
+
+err_des:
+ _mlx5e_cleanup_tables(priv);
err:
- mlx5e_arfs_destroy_tables(priv);
+ kvfree(priv->fs.arfs);
return err;
}
@@ -353,13 +392,13 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
+ HLIST_HEAD(del_list);
int quota = 0;
int i;
int j;
- HLIST_HEAD(del_list);
- spin_lock_bh(&priv->fs.arfs.arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -370,7 +409,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -383,16 +422,16 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
{
struct hlist_node *htmp;
struct arfs_rule *rule;
+ HLIST_HEAD(del_list);
int i;
int j;
- HLIST_HEAD(del_list);
- spin_lock_bh(&priv->fs.arfs.arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -436,7 +475,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -554,9 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ spin_lock_bh(&priv->fs.arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ spin_unlock_bh(&priv->fs.arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -609,7 +648,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -653,7 +692,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
@@ -687,7 +726,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+ queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index a6cf008057b5..8c166ee56d8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -38,15 +38,16 @@
int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
{
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
err = mlx5_core_create_tir(mdev, in, &tir->tirn);
if (err)
return err;
- mutex_lock(&mdev->mlx5e_res.td.list_lock);
- list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
- mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ mutex_lock(&res->td.list_lock);
+ list_add(&tir->list, &res->td.tirs_list);
+ mutex_unlock(&res->td.list_lock);
return 0;
}
@@ -54,10 +55,12 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
- mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+
+ mutex_lock(&res->td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
- mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ mutex_unlock(&res->td.list_lock);
}
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
@@ -99,7 +102,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
{
- struct mlx5e_resources *res = &mdev->mlx5e_res;
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
err = mlx5_core_alloc_pd(mdev, &res->pdn);
@@ -126,8 +129,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
- INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
- mutex_init(&mdev->mlx5e_res.td.list_lock);
+ INIT_LIST_HEAD(&res->td.tirs_list);
+ mutex_init(&res->td.list_lock);
return 0;
@@ -142,7 +145,7 @@ err_dealloc_pd:
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{
- struct mlx5e_resources *res = &mdev->mlx5e_res;
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, &res->mkey);
@@ -180,8 +183,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
- mutex_lock(&mdev->mlx5e_res.td.list_lock);
- list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+ mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock);
+ list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in);
if (err)
@@ -192,7 +195,7 @@ out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
- mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f23c67575073..a4c8d8d00d5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1149,35 +1149,23 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
- struct mlx5e_channels new_channels = {};
- bool reset_channels = true;
- bool opened;
- int err = 0;
+ struct mlx5e_params new_params;
+ bool reset = true;
+ int err;
mutex_lock(&priv->state_lock);
- new_channels.params = priv->channels.params;
- mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
+ new_params = priv->channels.params;
+ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
trust_state);
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (!opened)
- reset_channels = false;
-
/* Skip if tx_min_inline is the same */
- if (new_channels.params.tx_min_inline_mode ==
- priv->channels.params.tx_min_inline_mode)
- reset_channels = false;
-
- if (reset_channels) {
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_update_trust_state_hw,
- &trust_state);
- } else {
- err = mlx5e_update_trust_state_hw(priv, &trust_state);
- if (!err && !opened)
- priv->channels.params = new_channels.params;
- }
+ if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
+ reset = false;
+
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_update_trust_state_hw,
+ &trust_state, reset);
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 53802e18af90..8360289813f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -34,6 +34,7 @@
#include "en/port.h"
#include "en/params.h"
#include "en/xsk/pool.h"
+#include "en/ptp.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -325,7 +326,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param)
{
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
u8 log_rq_size;
u8 log_sq_size;
int err = 0;
@@ -364,20 +365,15 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
- new_channels.params = priv->channels.params;
- new_channels.params.log_rq_mtu_frames = log_rq_size;
- new_channels.params.log_sq_size = log_sq_size;
+ new_params = priv->channels.params;
+ new_params.log_rq_mtu_frames = log_rq_size;
+ new_params.log_sq_size = log_sq_size;
- err = mlx5e_validate_params(priv, &new_channels.params);
+ err = mlx5e_validate_params(priv->mdev, &new_params);
if (err)
goto unlock;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- goto unlock;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
unlock:
mutex_unlock(&priv->state_lock);
@@ -422,8 +418,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
{
struct mlx5e_params *cur_params = &priv->channels.params;
unsigned int count = ch->combined_count;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
bool arfs_enabled;
+ bool opened;
int err = 0;
if (!count) {
@@ -458,28 +455,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- new_channels.params = *cur_params;
- new_channels.params.num_channels = count;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_params old_params;
+ new_params = *cur_params;
+ new_params.num_channels = count;
- old_params = *cur_params;
- *cur_params = new_channels.params;
- err = mlx5e_num_channels_changed(priv);
- if (err)
- *cur_params = old_params;
-
- goto out;
- }
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE;
+ arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
if (arfs_enabled)
mlx5e_arfs_disable(priv);
/* Switch to new channels, set new parameters and close old ones */
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_num_channels_changed_ctx, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_num_channels_changed_ctx, NULL, true);
if (arfs_enabled) {
int err2 = mlx5e_arfs_enable(priv);
@@ -574,8 +561,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
bool reset_rx, reset_tx;
+ bool reset = true;
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
@@ -596,51 +584,47 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
mutex_lock(&priv->state_lock);
- new_channels.params = priv->channels.params;
+ new_params = priv->channels.params;
- rx_moder = &new_channels.params.rx_cq_moderation;
+ rx_moder = &new_params.rx_cq_moderation;
rx_moder->usec = coal->rx_coalesce_usecs;
rx_moder->pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
- tx_moder = &new_channels.params.tx_cq_moderation;
+ tx_moder = &new_params.tx_cq_moderation;
tx_moder->usec = coal->tx_coalesce_usecs;
tx_moder->pkts = coal->tx_max_coalesced_frames;
- new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+ new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
if (reset_rx) {
- u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
- mlx5e_reset_rx_moderation(&new_channels.params, mode);
+ mlx5e_reset_rx_moderation(&new_params, mode);
}
if (reset_tx) {
- u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_TX_CQE_BASED_MODER);
- mlx5e_reset_tx_moderation(&new_channels.params, mode);
+ mlx5e_reset_tx_moderation(&new_params, mode);
}
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- goto out;
- }
-
- if (!reset_rx && !reset_tx) {
+ /* If DIM state hasn't changed, it's possible to modify interrupt
+ * moderation parameters on the fly, even if the channels are open.
+ */
+ if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
if (!coal->use_adaptive_rx_coalesce)
mlx5e_set_priv_channels_rx_coalesce(priv, coal);
if (!coal->use_adaptive_tx_coalesce)
mlx5e_set_priv_channels_tx_coalesce(priv, coal);
- priv->channels.params = new_channels.params;
- goto out;
+ reset = false;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
-out:
mutex_unlock(&priv->state_lock);
return err;
}
@@ -1601,6 +1585,14 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static void mlx5e_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_fec_get(priv, fec_stats);
+}
+
static int mlx5e_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam)
{
@@ -1769,6 +1761,49 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_module_eeprom_query_params query;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 *data = page_data->data;
+ int size_read;
+ int i = 0;
+
+ if (!page_data->length)
+ return -EINVAL;
+
+ memset(data, 0, page_data->length);
+
+ query.offset = page_data->offset;
+ query.i2c_address = page_data->i2c_address;
+ query.bank = page_data->bank;
+ query.page = page_data->page;
+ while (i < page_data->length) {
+ query.size = page_data->length - i;
+ size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i);
+
+ /* Done reading, return how many bytes was read */
+ if (!size_read)
+ return i;
+
+ if (size_read == -EINVAL)
+ return -EINVAL;
+ if (size_read < 0) {
+ netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n",
+ __func__, size_read);
+ return i;
+ }
+
+ i += size_read;
+ query.offset += size_read;
+ }
+
+ return i;
+}
+
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash)
{
@@ -1808,7 +1843,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
bool mode_changed;
u8 cq_period_mode, current_cq_period_mode;
@@ -1827,18 +1862,13 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
if (!mode_changed)
return 0;
- new_channels.params = priv->channels.params;
+ new_params = priv->channels.params;
if (is_rx_cq)
- mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode);
+ mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
else
- mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode);
+ mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
-
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -1854,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
@@ -1863,15 +1893,16 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- new_channels.params = priv->channels.params;
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
+ new_params = priv->channels.params;
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
+ if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+ new_params.ptp_rx = new_val;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ if (new_params.ptp_rx == priv->channels.params.ptp_rx)
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ else
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
if (err)
return err;
@@ -1892,11 +1923,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
- netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
- return -EINVAL;
- }
-
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
if (err)
return err;
@@ -1910,7 +1936,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
if (enable) {
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
@@ -1922,17 +1948,12 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return -EINVAL;
}
- new_channels.params = priv->channels.params;
-
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
- mlx5e_set_rq_type(mdev, &new_channels.params);
+ new_params = priv->channels.params;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
+ mlx5e_set_rq_type(mdev, &new_params);
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
@@ -1961,23 +1982,16 @@ static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool e
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
- int err;
+ struct mlx5e_params new_params;
if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
return -EOPNOTSUPP;
- new_channels.params = priv->channels.params;
-
- MLX5E_SET_PFLAG(&new_channels.params, flag, enable);
+ new_params = priv->channels.params;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
+ MLX5E_SET_PFLAG(&new_params, flag, enable);
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
- return err;
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
@@ -1994,7 +2008,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
int err;
if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
@@ -2010,29 +2024,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
return -EINVAL;
}
- new_channels.params = priv->channels.params;
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable);
+ new_params = priv->channels.params;
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
* ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
* has the same log_sq_size.
*/
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_params old_params;
-
- old_params = priv->channels.params;
- priv->channels.params = new_channels.params;
- err = mlx5e_num_channels_changed(priv);
- if (err)
- priv->channels.params = old_params;
- goto out;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_num_channels_changed_ctx, NULL);
-out:
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_num_channels_changed_ctx, NULL, true);
if (!err)
- priv->port_ptp_opened = true;
+ priv->tx_ptp_opened = true;
return err;
}
@@ -2123,12 +2125,216 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
+static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode)
+{
+ struct mlx5_ifc_pddr_troubleshooting_page_bits *pddr_troubleshooting_page;
+ u32 in[MLX5_ST_SZ_DW(pddr_reg)] = {};
+ u32 out[MLX5_ST_SZ_DW(pddr_reg)];
+ int err;
+
+ MLX5_SET(pddr_reg, in, local_port, 1);
+ MLX5_SET(pddr_reg, in, page_select,
+ MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE);
+
+ pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, in, page_data);
+ MLX5_SET(pddr_troubleshooting_page, pddr_troubleshooting_page,
+ group_opcode, MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR);
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PDDR, 0, 0);
+ if (err)
+ return err;
+
+ pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, out, page_data);
+ *status_opcode = MLX5_GET(pddr_troubleshooting_page, pddr_troubleshooting_page,
+ status_opcode);
+ return 0;
+}
+
+struct mlx5e_ethtool_link_ext_state_opcode_mapping {
+ u32 status_opcode;
+ enum ethtool_link_ext_state link_ext_state;
+ u8 link_ext_substate;
+};
+
+static const struct mlx5e_ethtool_link_ext_state_opcode_mapping
+mlx5e_link_ext_state_opcode_map[] = {
+ /* States relating to the autonegotiation or issues therein */
+ {2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED},
+ {3, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
+ {4, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED},
+ {36, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE},
+ {38, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE},
+ {39, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
+
+ /* Failure during link training */
+ {5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED},
+ {6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
+ {7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
+ {8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0},
+ {14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
+
+ /* Logical mismatch in physical coding sublayer or forward error correction sublayer */
+ {9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
+ {10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK},
+ {11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS},
+ {12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
+ {13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
+
+ /* Signal integrity issues */
+ {15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0},
+ {17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
+ {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE},
+
+ /* No cable connected */
+ {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
+
+ /* Failure is related to cable, e.g., unsupported cable */
+ {16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0},
+
+ /* Failure is related to EEPROM, e.g., failure during reading or parsing the data */
+ {1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
+
+ /* Failure during calibration algorithm */
+ {23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0},
+
+ /* The hardware is not able to provide the power required from cable or module */
+ {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
+
+ /* The module is overheated */
+ {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+};
+
+static void
+mlx5e_set_link_ext_state(struct mlx5e_ethtool_link_ext_state_opcode_mapping
+ link_ext_state_mapping,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ switch (link_ext_state_mapping.link_ext_state) {
+ case ETHTOOL_LINK_EXT_STATE_AUTONEG:
+ link_ext_state_info->autoneg =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE:
+ link_ext_state_info->link_training =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH:
+ link_ext_state_info->link_logical_mismatch =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY:
+ link_ext_state_info->bad_signal_integrity =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE:
+ link_ext_state_info->cable_issue =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ default:
+ break;
+ }
+
+ link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state;
+}
+
+static int
+mlx5e_get_link_ext_state(struct net_device *dev,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ struct mlx5e_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u32 status_opcode = 0;
+ int i;
+
+ /* Exit without data if the interface state is OK, since no extended data is
+ * available in such case
+ */
+ if (netif_carrier_ok(dev))
+ return -ENODATA;
+
+ if (query_port_status_opcode(priv->mdev, &status_opcode) ||
+ !status_opcode)
+ return -ENODATA;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5e_link_ext_state_opcode_map); i++) {
+ link_ext_state_mapping = mlx5e_link_ext_state_opcode_map[i];
+ if (link_ext_state_mapping.status_opcode == status_opcode) {
+ mlx5e_set_link_ext_state(link_ext_state_mapping,
+ link_ext_state_info);
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
+static void mlx5e_get_eth_phy_stats(struct net_device *netdev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_eth_phy_get(priv, phy_stats);
+}
+
+static void mlx5e_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_eth_mac_get(priv, mac_stats);
+}
+
+static void mlx5e_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_eth_ctrl_get(priv, ctrl_stats);
+}
+
+static void mlx5e_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_rmon_get(priv, rmon_stats, ranges);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ext_state = mlx5e_get_link_ext_state,
.get_strings = mlx5e_get_strings,
.get_sset_count = mlx5e_get_sset_count,
.get_ethtool_stats = mlx5e_get_ethtool_stats,
@@ -2157,12 +2363,18 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_wol = mlx5e_set_wol,
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
+ .get_module_eeprom_by_page = mlx5e_get_module_eeprom_by_page,
.flash_device = mlx5e_flash_device,
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
+ .get_fec_stats = mlx5e_get_fec_stats,
.get_fecparam = mlx5e_get_fecparam,
.set_fecparam = mlx5e_set_fecparam,
+ .get_eth_phy_stats = mlx5e_get_eth_phy_stats,
+ .get_eth_mac_stats = mlx5e_get_eth_mac_stats,
+ .get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
+ .get_rmon_stats = mlx5e_get_rmon_stats,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 16ce7756ac43..0d571a0c76d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -36,7 +36,9 @@
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include "en.h"
+#include "en_rep.h"
#include "lib/mpfs.h"
+#include "en/ptp.h"
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type);
@@ -106,6 +108,29 @@ static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
kfree(hn);
}
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *untagged_rule;
+ struct mlx5_flow_handle *any_cvlan_rule;
+ struct mlx5_flow_handle *any_svlan_rule;
+ struct mlx5_flow_handle *trap_rule;
+ bool cvlan_filter_disabled;
+};
+
+unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
+{
+ return vlan->active_svlans;
+}
+
+struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
+{
+ return vlan->ft.t;
+}
+
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{
struct net_device *ndev = priv->netdev;
@@ -117,7 +142,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -134,7 +159,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -161,7 +186,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+ struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -178,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
- rule_p = &priv->fs.vlan.untagged_rule;
+ rule_p = &priv->fs.vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- rule_p = &priv->fs.vlan.any_cvlan_rule;
+ rule_p = &priv->fs.vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- rule_p = &priv->fs.vlan.any_svlan_rule;
+ rule_p = &priv->fs.vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- rule_p = &priv->fs.vlan.active_svlans_rule[vid];
+ rule_p = &priv->fs.vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -205,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
- rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
+ rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -255,33 +280,33 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->fs.vlan.untagged_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
- priv->fs.vlan.untagged_rule = NULL;
+ if (priv->fs.vlan->untagged_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
+ priv->fs.vlan->untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- if (priv->fs.vlan.any_cvlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
- priv->fs.vlan.any_cvlan_rule = NULL;
+ if (priv->fs.vlan->any_cvlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
+ priv->fs.vlan->any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- if (priv->fs.vlan.any_svlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
- priv->fs.vlan.any_svlan_rule = NULL;
+ if (priv->fs.vlan->any_svlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
+ priv->fs.vlan->any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- if (priv->fs.vlan.active_svlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
- priv->fs.vlan.active_svlans_rule[vid] = NULL;
+ if (priv->fs.vlan->active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
+ priv->fs.vlan->active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- if (priv->fs.vlan.active_cvlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
- priv->fs.vlan.active_cvlans_rule[vid] = NULL;
+ if (priv->fs.vlan->active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
+ priv->fs.vlan->active_cvlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -328,27 +353,27 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+ struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.vlan.trap_rule = NULL;
+ priv->fs.vlan->trap_rule = NULL;
netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
__func__, err);
return err;
}
- priv->fs.vlan.trap_rule = rule;
+ priv->fs.vlan->trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan.trap_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.trap_rule);
- priv->fs.vlan.trap_rule = NULL;
+ if (priv->fs.vlan->trap_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
+ priv->fs.vlan->trap_rule = NULL;
}
}
@@ -380,10 +405,10 @@ void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan.cvlan_filter_disabled)
+ if (!priv->fs.vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan.cvlan_filter_disabled = false;
+ priv->fs.vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@@ -391,10 +416,10 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan.cvlan_filter_disabled)
+ if (priv->fs.vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan.cvlan_filter_disabled = true;
+ priv->fs.vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@@ -404,11 +429,11 @@ static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
{
int err;
- set_bit(vid, priv->fs.vlan.active_cvlans);
+ set_bit(vid, priv->fs.vlan->active_cvlans);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
- clear_bit(vid, priv->fs.vlan.active_cvlans);
+ clear_bit(vid, priv->fs.vlan->active_cvlans);
return err;
}
@@ -418,11 +443,11 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
struct net_device *netdev = priv->netdev;
int err;
- set_bit(vid, priv->fs.vlan.active_svlans);
+ set_bit(vid, priv->fs.vlan->active_svlans);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
- clear_bit(vid, priv->fs.vlan.active_svlans);
+ clear_bit(vid, priv->fs.vlan->active_svlans);
return err;
}
@@ -435,6 +460,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
if (be16_to_cpu(proto) == ETH_P_8021Q)
return mlx5e_vlan_rx_add_cvid(priv, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
@@ -447,11 +475,14 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
if (be16_to_cpu(proto) == ETH_P_8021Q) {
- clear_bit(vid, priv->fs.vlan.active_cvlans);
+ clear_bit(vid, priv->fs.vlan->active_cvlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
- clear_bit(vid, priv->fs.vlan.active_svlans);
+ clear_bit(vid, priv->fs.vlan->active_svlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
netdev_update_features(dev);
}
@@ -465,14 +496,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan.cvlan_filter_disabled)
+ if (priv->fs.vlan->cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
@@ -482,11 +513,11 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
@@ -496,7 +527,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs.vlan.cvlan_filter_disabled)
+ if (priv->fs.vlan->cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
}
@@ -1684,10 +1715,15 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5e_flow_table *ft;
int err;
+ priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+ if (!priv->fs.vlan)
+ return -ENOMEM;
+
+ ft = &priv->fs.vlan->ft;
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
@@ -1695,12 +1731,11 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
-
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- ft->t = NULL;
- return err;
+ goto err_free_t;
}
+
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
err = -ENOMEM;
@@ -1719,7 +1754,9 @@ err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
- ft->t = NULL;
+err_free_t:
+ kvfree(priv->fs.vlan);
+ priv->fs.vlan = NULL;
return err;
}
@@ -1727,7 +1764,8 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
+ mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+ kvfree(priv->fs.vlan);
}
int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
@@ -1785,10 +1823,16 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_l2_table;
}
+ err = mlx5e_ptp_alloc_rx_fs(priv);
+ if (err)
+ goto err_destory_vlan_table;
+
mlx5e_ethtool_init_steering(priv);
return 0;
+err_destory_vlan_table:
+ mlx5e_destroy_vlan_table(priv);
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
@@ -1803,6 +1847,7 @@ err_destroy_arfs_tables:
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
+ mlx5e_ptp_free_rx_fs(priv);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5db63b9f3b70..bca832cdc4cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -87,51 +87,6 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
return true;
}
-void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- params->log_rq_mtu_frames = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
-
- mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
- BIT(params->log_rq_mtu_frames),
- BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
-}
-
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return false;
-
- if (mlx5_fpga_is_ipsec_device(mdev))
- return false;
-
- if (params->xdp_prog) {
- /* XSK params are not considered here. If striding RQ is in use,
- * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
- * be called with the known XSK params.
- */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- return false;
- }
-
- return true;
-}
-
-void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_CYCLIC;
-}
-
void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -259,18 +214,17 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
- struct mlx5e_channel *c)
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
sizeof(*rq->mpwqe.info)),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ GFP_KERNEL, node);
if (!rq->mpwqe.info)
return -ENOMEM;
- mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
return 0;
}
@@ -302,7 +256,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
MLX5_SET(mkc, mkc, translations_octword_size,
MLX5_MTT_OCTW(npages));
@@ -419,58 +373,53 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
__free_page(rq->wqe_overflow.page);
}
-static int mlx5e_alloc_rq(struct mlx5e_channel *c,
- struct mlx5e_params *params,
+static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5_core_dev *mdev = c->mdev;
+ int err;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->priv = c->priv;
+ rq->tstamp = c->tstamp;
+ rq->clock = &mdev->clock;
+ rq->icosq = &c->icosq;
+ rq->ix = c->ix;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->xdpsq = &c->rq_xdpsq;
+ rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
+ err = mlx5e_rq_set_handlers(rq, params, NULL);
+ if (err)
+ return err;
+
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
+}
+
+static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct xsk_buff_pool *xsk_pool,
struct mlx5e_rq_param *rqp,
- struct mlx5e_rq *rq)
+ int node, struct mlx5e_rq *rq)
{
struct page_pool_params pp_params = { 0 };
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
- u32 rq_xdp_ix;
u32 pool_size;
int wq_sz;
int err;
int i;
- rqp->wq.db_numa_node = cpu_to_node(c->cpu);
-
- rq->wq_type = params->rq_wq_type;
- rq->pdev = c->pdev;
- rq->netdev = c->netdev;
- rq->priv = c->priv;
- rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
- rq->icosq = &c->icosq;
- rq->ix = c->ix;
- rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- rq->xdpsq = &c->rq_xdpsq;
- rq->xsk_pool = xsk_pool;
- rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ?
- mlx5_real_time_cyc2time :
- mlx5_timecounter_cyc2time;
-
- if (rq->xsk_pool)
- rq->stats = &c->priv->channel_stats[c->ix].xskrq;
- else
- rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rqp->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
- rq_xdp_ix = rq->ix;
- if (xsk)
- rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
- err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
- if (err < 0)
- goto err_rq_xdp_prog;
-
rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
@@ -480,7 +429,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
- goto err_rq_xdp;
+ goto err_rq_xdp_prog;
err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
if (err)
@@ -504,7 +453,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_drop_page;
rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
- err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+ err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
goto err_rq_mkey;
break;
@@ -512,7 +461,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
- goto err_rq_xdp;
+ goto err_rq_xdp_prog;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@@ -524,23 +473,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->wqe.frags =
kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
(wq_sz << rq->wqe.info.log_num_frags)),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ GFP_KERNEL, node);
if (!rq->wqe.frags) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
+ err = mlx5e_init_di_list(rq, wq_sz, node);
if (err)
goto err_rq_frags;
- rq->mkey_be = c->mkey_be;
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
}
- err = mlx5e_rq_set_handlers(rq, params, xsk);
- if (err)
- goto err_free_by_rq_type;
-
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
@@ -550,8 +495,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
pp_params.order = 0;
pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
pp_params.pool_size = pool_size;
- pp_params.nid = cpu_to_node(c->cpu);
- pp_params.dev = c->pdev;
+ pp_params.nid = node;
+ pp_params.dev = rq->pdev;
pp_params.dma_dir = rq->buff.map_dir;
/* page_pool can be used even when there is no rq->xdp_prog,
@@ -565,8 +510,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->page_pool = NULL;
goto err_free_by_rq_type;
}
- err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_PAGE_POOL, rq->page_pool);
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
goto err_free_by_rq_type;
@@ -635,8 +581,6 @@ err_rq_frags:
}
err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
-err_rq_xdp:
- xdp_rxq_info_unreg(&rq->xdp_rxq);
err_rq_xdp_prog:
if (params->xdp_prog)
bpf_prog_put(params->xdp_prog);
@@ -649,10 +593,12 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
struct bpf_prog *old_prog;
int i;
- old_prog = rcu_dereference_protected(rq->xdp_prog,
- lockdep_is_held(&rq->priv->state_lock));
- if (old_prog)
- bpf_prog_put(old_prog);
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&rq->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -888,13 +834,14 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
-int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
+int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
+ struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_rq *rq)
{
+ struct mlx5_core_dev *mdev = rq->mdev;
int err;
- err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
+ err = mlx5e_alloc_rq(params, xsk, param, node, rq);
if (err)
return err;
@@ -906,28 +853,28 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev))
- __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */
+ if (mlx5e_is_tls_on(rq->priv) && !mlx5_accel_is_ktls_device(mdev))
+ __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
- if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
- __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
+ if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
+ __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
if (params->rx_dim_enabled)
- __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
+ __set_bit(MLX5E_RQ_STATE_AM, &rq->state);
/* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
* skb->checksum incorrect.
*/
- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
- __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog)
+ __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
/* For CQE compression on striding RQ, use stride index provided by
* HW if capability is supported.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
- MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
- __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
+ MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
return 0;
@@ -942,7 +889,10 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- mlx5e_trigger_irq(rq->icosq);
+ if (rq->icosq)
+ mlx5e_trigger_irq(rq->icosq);
+ else
+ napi_schedule(rq->cq.napi);
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -954,7 +904,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
- cancel_work_sync(&rq->icosq->recover_work);
+ if (rq->icosq)
+ cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1019,7 +970,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->xsk_pool = xsk_pool;
@@ -1090,7 +1041,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
int err;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
@@ -1175,7 +1126,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
@@ -1183,14 +1134,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
- if (mlx5_accel_is_tls_device(c->priv->mdev))
- set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
sq->stop_room = param->stop_room;
- sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
- mlx5_real_time_cyc2time :
- mlx5_timecounter_cyc2time;
+ sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1258,7 +1205,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
@@ -1462,8 +1409,17 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (err)
goto err_free_icosq;
+ if (param->is_tls) {
+ sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
+ if (IS_ERR(sq->ktls_resync)) {
+ err = PTR_ERR(sq->ktls_resync);
+ goto err_destroy_icosq;
+ }
+ }
return 0;
+err_destroy_icosq:
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
err_free_icosq:
mlx5e_free_icosq(sq);
@@ -1485,6 +1441,8 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{
struct mlx5e_channel *c = sq->channel;
+ if (sq->ktls_resync)
+ mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync);
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_icosq_descs(sq);
mlx5e_free_icosq(sq);
@@ -1861,14 +1819,16 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
+static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_params)
{
- *ccp = (struct mlx5e_create_cq_param) {
- .napi = &c->napi,
- .ch_stats = c->stats,
- .node = cpu_to_node(c->cpu),
- .ix = c->ix,
- };
+ int err;
+
+ err = mlx5e_init_rxq_rq(c, params, &c->rq);
+ if (err)
+ return err;
+
+ return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
}
static int mlx5e_open_queues(struct mlx5e_channel *c,
@@ -1931,7 +1891,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
goto err_close_sqs;
}
- err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
+ err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
if (err)
goto err_close_xdp_sq;
@@ -2033,7 +1993,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
@@ -2112,314 +2072,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
kvfree(c);
}
-#define DEFAULT_FRAG_SIZE (2048)
-
-static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_frags_info *info)
-{
- u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- int frag_size_max = DEFAULT_FRAG_SIZE;
- u32 buf_size = 0;
- int i;
-
- if (mlx5_fpga_is_ipsec_device(mdev))
- byte_count += MLX5E_METADATA_ETHER_LEN;
-
- if (mlx5e_rx_is_linear_skb(params, xsk)) {
- int frag_stride;
-
- frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
- frag_stride = roundup_pow_of_two(frag_stride);
-
- info->arr[0].frag_size = byte_count;
- info->arr[0].frag_stride = frag_stride;
- info->num_frags = 1;
- info->wqe_bulk = PAGE_SIZE / frag_stride;
- goto out;
- }
-
- if (byte_count > PAGE_SIZE +
- (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
- frag_size_max = PAGE_SIZE;
-
- i = 0;
- while (buf_size < byte_count) {
- int frag_size = byte_count - buf_size;
-
- if (i < MLX5E_MAX_RX_FRAGS - 1)
- frag_size = min(frag_size, frag_size_max);
-
- info->arr[i].frag_size = frag_size;
- info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
-
- buf_size += frag_size;
- i++;
- }
- info->num_frags = i;
- /* number of different wqes sharing a page */
- info->wqe_bulk = 1 + (info->num_frags % 2);
-
-out:
- info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
- info->log_num_frags = order_base_2(info->num_frags);
-}
-
-static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
-{
- int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
-
- switch (wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- sz += sizeof(struct mlx5e_rx_wqe_ll);
- break;
- default: /* MLX5_WQ_TYPE_CYCLIC */
- sz += sizeof(struct mlx5e_rx_wqe_cyc);
- }
-
- return order_base_2(sz);
-}
-
-static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
-{
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
- return MLX5_GET(wq, wq, log_wq_sz);
-}
-
-void mlx5e_build_rq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *param)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqc = param->rqc;
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- int ndsegs = 1;
-
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- MLX5_SET(wq, wq, log_wqe_num_of_strides,
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
- MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
- MLX5_SET(wq, wq, log_wqe_stride_size,
- mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
- MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
- break;
- default: /* MLX5_WQ_TYPE_CYCLIC */
- MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
- ndsegs = param->frags_info.num_frags;
- }
-
- MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
- MLX5_SET(wq, wq, log_wq_stride,
- mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
- MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
- MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
- MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
- MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
-
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
-}
-
-static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
- struct mlx5e_rq_param *param)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqc = param->rqc;
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, log_wq_stride,
- mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
-
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
-}
-
-void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
- MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
-
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
-}
-
-void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
- bool allow_swp;
-
- allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
- !!MLX5_IPSEC_DEV(priv->mdev);
- mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- MLX5_SET(sqc, sqc, allow_swp, allow_swp);
- param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
- param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
- mlx5e_build_tx_cq_param(priv, params, &param->cqp);
-}
-
-static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_cq_param *param)
-{
- void *cqc = param->cqc;
-
- MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
- if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
- MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
-}
-
-void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_cq_param *param)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- bool hw_stridx = false;
- void *cqc = param->cqc;
- u8 log_cq_size;
-
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
- hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
- break;
- default: /* MLX5_WQ_TYPE_CYCLIC */
- log_cq_size = params->log_rq_mtu_frames;
- }
-
- MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
- MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
- MLX5_SET(cqc, cqc, cqe_comp_en, 1);
- }
-
- mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
-}
-
-void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_cq_param *param)
-{
- void *cqc = param->cqc;
-
- MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
-
- mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
-}
-
-void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_cq_param *param)
-{
- void *cqc = param->cqc;
-
- MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
-
- mlx5e_build_common_cq_param(priv, param);
-
- param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- mlx5e_build_sq_param_common(priv, param);
-
- MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
- MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
-}
-
-static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- u8 log_wq_size,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- mlx5e_build_sq_param_common(priv, param);
-
- /* async_icosq is used by XSK only if xdp_prog is active */
- if (params->xdp_prog)
- param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
- MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
- mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
-}
-
-void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- mlx5e_build_tx_cq_param(priv, params, &param->cqp);
-}
-
-static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
- struct mlx5e_rq_param *rqp)
-{
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
- order_base_2(MLX5E_UMR_WQEBBS) +
- mlx5e_get_rq_log_wq_sz(rqp->rqc));
- default: /* MLX5_WQ_TYPE_CYCLIC */
- return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- }
-}
-
-static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev)
-{
- if (netdev->hw_features & NETIF_F_HW_TLS_RX)
- return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
-
- return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
-}
-
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam)
-{
- u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
-
- mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
-
- icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
- async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev);
-
- mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
- mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
- mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
-}
-
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
@@ -2434,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (!chs->c || !cparam)
goto err_free;
- mlx5e_build_channel_param(priv, &chs->params, cparam);
+ err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
+ if (err)
+ goto err_free;
+
for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL;
@@ -2446,9 +2101,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) {
- err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port,
- &chs->port_ptp);
+ if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) {
+ err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
if (err)
goto err_close_channels;
}
@@ -2462,8 +2116,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
return 0;
err_close_ptp:
- if (chs->port_ptp)
- mlx5e_port_ptp_close(chs->port_ptp);
+ if (chs->ptp)
+ mlx5e_ptp_close(chs->ptp);
err_close_channels:
for (i--; i >= 0; i--)
@@ -2483,8 +2137,8 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]);
- if (chs->port_ptp)
- mlx5e_ptp_activate_channel(chs->port_ptp);
+ if (chs->ptp)
+ mlx5e_ptp_activate_channel(chs->ptp);
}
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
@@ -2511,8 +2165,8 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
{
int i;
- if (chs->port_ptp)
- mlx5e_ptp_deactivate_channel(chs->port_ptp);
+ if (chs->ptp)
+ mlx5e_ptp_deactivate_channel(chs->ptp);
for (i = 0; i < chs->num; i++)
mlx5e_deactivate_channel(chs->c[i]);
@@ -2522,11 +2176,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
- if (chs->port_ptp) {
- mlx5e_port_ptp_close(chs->port_ptp);
- chs->port_ptp = NULL;
+ if (chs->ptp) {
+ mlx5e_ptp_close(chs->ptp);
+ chs->ptp = NULL;
}
-
for (i = 0; i < chs->num; i++)
mlx5e_close_channel(chs->c[i]);
@@ -2582,12 +2235,12 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
return err;
}
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
int err;
int ix;
- for (ix = 0; ix < priv->max_nch; ix++) {
+ for (ix = 0; ix < n; ix++) {
err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
if (unlikely(err))
goto err_destroy_rqts;
@@ -2603,11 +2256,11 @@ err_destroy_rqts:
return err;
}
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
int i;
- for (i = 0; i < priv->max_nch; i++)
+ for (i = 0; i < n; i++)
mlx5e_destroy_rqt(priv, &tirs[i].rqt);
}
@@ -2690,7 +2343,8 @@ static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
}
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
- struct mlx5e_redirect_rqt_param rrp)
+ struct mlx5e_redirect_rqt_param rrp,
+ struct mlx5e_redirect_rqt_param *ptp_rrp)
{
u32 rqtn;
int ix;
@@ -2716,11 +2370,17 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
rqtn = priv->direct_tir[ix].rqt.rqtn;
mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
}
+ if (ptp_rrp) {
+ rqtn = priv->ptp_tir.rqt.rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp);
+ }
}
static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
+ bool rx_ptp_support = priv->profile->rx_ptp_support;
+ struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL;
struct mlx5e_redirect_rqt_param rrp = {
.is_rss = true,
{
@@ -2730,12 +2390,22 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
}
},
};
+ struct mlx5e_redirect_rqt_param ptp_rrp;
+
+ if (rx_ptp_support) {
+ u32 ptp_rqn;
- mlx5e_redirect_rqts(priv, rrp);
+ ptp_rrp.is_rss = false;
+ ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ?
+ priv->drop_rq.rqn : ptp_rqn;
+ ptp_rrp_p = &ptp_rrp;
+ }
+ mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p);
}
static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
{
+ bool rx_ptp_support = priv->profile->rx_ptp_support;
struct mlx5e_redirect_rqt_param drop_rrp = {
.is_rss = false,
{
@@ -2743,7 +2413,7 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
},
};
- mlx5e_redirect_rqts(priv, drop_rrp);
+ mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL);
}
static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
@@ -3032,6 +2702,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
+ if (priv->channels.params.ptp_rx)
+ num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@ -3117,11 +2789,14 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
}
}
- if (!priv->channels.port_ptp)
+ if (!priv->channels.ptp)
+ return;
+
+ if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
return;
for (tc = 0; tc < num_tc; tc++) {
- struct mlx5e_port_ptp *c = priv->channels.port_ptp;
+ struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
@@ -3172,6 +2847,29 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_deactivate_channels(&priv->channels);
}
+static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params,
+ mlx5e_fp_preactivate preactivate,
+ void *context)
+{
+ struct mlx5e_params old_params;
+
+ old_params = priv->channels.params;
+ priv->channels.params = *new_params;
+
+ if (preactivate) {
+ int err;
+
+ err = preactivate(priv, context);
+ if (err) {
+ priv->channels.params = old_params;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
@@ -3214,35 +2912,32 @@ out:
return err;
}
-int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_preactivate preactivate,
- void *context)
+int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ mlx5e_fp_preactivate preactivate,
+ void *context, bool reset)
{
+ struct mlx5e_channels new_chs = {};
int err;
- err = mlx5e_open_channels(priv, new_chs);
+ reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (!reset)
+ return mlx5e_switch_priv_params(priv, params, preactivate, context);
+
+ new_chs.params = *params;
+ err = mlx5e_open_channels(priv, &new_chs);
if (err)
return err;
-
- err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
+ err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
if (err)
- goto err_close;
-
- return 0;
-
-err_close:
- mlx5e_close_channels(new_chs);
+ mlx5e_close_channels(&new_chs);
return err;
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
{
- struct mlx5e_channels new_channels = {};
-
- new_channels.params = priv->channels.params;
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true);
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -3395,7 +3090,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(priv, &rq_param);
+ mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
@@ -3443,10 +3138,10 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
{
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
if (MLX5_GET(tisc, tisc, tls_en))
- MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
@@ -3516,7 +3211,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
u32 rqtn, u32 *tirc)
{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en,
@@ -3608,7 +3303,7 @@ err_destroy_inner_tirs:
return err;
}
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
struct mlx5e_tir *tir;
void *tirc;
@@ -3622,7 +3317,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
if (!in)
return -ENOMEM;
- for (ix = 0; ix < priv->max_nch; ix++) {
+ for (ix = 0; ix < n; ix++) {
memset(in, 0, inlen);
tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
@@ -3660,11 +3355,11 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
}
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
int i;
- for (i = 0; i < priv->max_nch; i++)
+ for (i = 0; i < n; i++)
mlx5e_destroy_tir(priv->mdev, &tirs[i]);
}
@@ -3699,7 +3394,7 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
struct tc_mqprio_qopt *mqprio)
{
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
u8 tc = mqprio->num_tc;
int err = 0;
@@ -3718,23 +3413,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
goto out;
}
- new_channels.params = priv->channels.params;
- new_channels.params.num_tc = tc ? tc : 1;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_params old_params;
+ new_params = priv->channels.params;
+ new_params.num_tc = tc ? tc : 1;
- old_params = priv->channels.params;
- priv->channels.params = new_channels.params;
- err = mlx5e_num_channels_changed(priv);
- if (err)
- priv->channels.params = old_params;
-
- goto out;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_num_channels_changed_ctx, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_num_channels_changed_ctx, NULL, true);
out:
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
@@ -3791,8 +3474,16 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ bool tc_unbind = false;
int err;
+ if (type == TC_SETUP_BLOCK &&
+ ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND)
+ tc_unbind = true;
+
+ if (!netif_device_present(dev) && !tc_unbind)
+ return -ENODEV;
+
switch (type) {
case TC_SETUP_BLOCK: {
struct flow_block_offload *f = type_data;
@@ -3837,15 +3528,22 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
s->tx_dropped += sq_stats->dropped;
}
}
- if (priv->port_ptp_opened) {
+ if (priv->tx_ptp_opened) {
for (i = 0; i < priv->max_opened_tc; i++) {
- struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i];
+ struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i];
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_dropped += sq_stats->dropped;
}
}
+ if (priv->rx_ptp_opened) {
+ struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+ s->multicast += rq_stats->mcast_packets;
+ }
}
void
@@ -3854,6 +3552,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ if (!netif_device_present(dev))
+ return;
+
/* In switchdev mode, monitor counters doesn't monitor
* rx/tx stats of 802_3. The update stats mechanism
* should keep the 802_3 layout counters updated
@@ -3895,11 +3596,19 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
}
+static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
+{
+ if (mlx5e_is_uplink_rep(priv))
+ return; /* no rx mode for uplink rep */
+
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+}
+
static void mlx5e_set_rx_mode(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
}
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -3914,7 +3623,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
ether_addr_copy(netdev->dev_addr, saddr->sa_data);
netif_addr_unlock_bh(netdev);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
return 0;
}
@@ -3933,10 +3642,10 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
struct mlx5e_params *cur_params;
+ struct mlx5e_params new_params;
+ bool reset = true;
int err = 0;
- bool reset;
mutex_lock(&priv->state_lock);
@@ -3954,30 +3663,17 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
- reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ new_params = *cur_params;
+ new_params.lro_en = enable;
- new_channels.params = *cur_params;
- new_channels.params.lro_en = enable;
-
- if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
- mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
reset = false;
}
- if (!reset) {
- struct mlx5e_params old_params;
-
- old_params = *cur_params;
- *cur_params = new_channels.params;
- err = mlx5e_modify_tirs_lro(priv);
- if (err)
- *cur_params = old_params;
- goto out;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_modify_tirs_lro_ctx, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_modify_tirs_lro_ctx, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -4136,7 +3832,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
+ if (!priv->fs.vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -4205,26 +3902,23 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
struct mlx5e_params *params;
+ bool reset = true;
int err = 0;
- bool reset;
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- reset = !params->lro_en;
- reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
-
- new_channels.params = *params;
- new_channels.params.sw_mtu = new_mtu;
- err = mlx5e_validate_params(priv, &new_channels.params);
+ new_params = *params;
+ new_params.sw_mtu = new_mtu;
+ err = mlx5e_validate_params(priv->mdev, &new_params);
if (err)
goto out;
if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
+ !mlx5e_rx_is_linear_skb(&new_params, NULL)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
new_mtu, mlx5e_xdp_max_mtu(params, NULL));
err = -EINVAL;
@@ -4233,47 +3927,34 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (priv->xsk.refcnt &&
!mlx5e_xsk_validate_mtu(netdev, &priv->channels,
- &new_channels.params, priv->mdev)) {
+ &new_params, priv->mdev)) {
err = -EINVAL;
goto out;
}
+ if (params->lro_en)
+ reset = false;
+
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
- &new_channels.params,
- NULL);
+ bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
+ bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
+ &new_params, NULL);
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
- u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL);
+ u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL);
- /* If XSK is active, XSK RQs are linear. */
- is_linear |= priv->xsk.refcnt;
-
- /* Always reset in linear mode - hw_mtu is used in data path. */
- reset = reset && (is_linear || (ppw_old != ppw_new));
- }
-
- if (!reset) {
- unsigned int old_mtu = params->sw_mtu;
-
- params->sw_mtu = new_mtu;
- if (preactivate) {
- err = preactivate(priv, NULL);
- if (err) {
- params->sw_mtu = old_mtu;
- goto out;
- }
- }
- netdev->mtu = params->sw_mtu;
- goto out;
+ /* Always reset in linear mode - hw_mtu is used in data path.
+ * Check that the mode was non-linear and didn't change.
+ * If XSK is active, XSK RQs are linear.
+ */
+ if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
+ ppw_old == ppw_new)
+ reset = false;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL);
- if (err)
- goto out;
-
- netdev->mtu = new_channels.params.sw_mtu;
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
out:
+ netdev->mtu = params->sw_mtu;
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4283,9 +3964,18 @@ static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
+int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
+{
+ bool set = *(bool *)ctx;
+
+ return mlx5e_ptp_rx_manage_fs(priv, set);
+}
+
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
+ struct mlx5e_params new_params;
struct hwtstamp_config config;
+ bool rx_cqe_compress_def;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -4305,11 +3995,13 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
}
mutex_lock(&priv->state_lock);
+ new_params = priv->channels.params;
+ rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- /* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
+ new_params.ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -4326,15 +4018,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- /* Disable CQE compression */
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
- netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
- if (err) {
- netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
- mutex_unlock(&priv->state_lock);
- return err;
- }
+ new_params.ptp_rx = rx_cqe_compress_def;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -4342,6 +4026,16 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
return -ERANGE;
}
+ if (new_params.ptp_rx == priv->channels.params.ptp_rx)
+ goto out;
+
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
+ if (err) {
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
+out:
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
@@ -4452,6 +4146,9 @@ static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (mlx5e_is_uplink_rep(priv))
+ return -EOPNOTSUPP;
+
return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
mlx5_ifla_link2vport(link_state));
}
@@ -4463,6 +4160,9 @@ int mlx5e_get_vf_config(struct net_device *dev,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ if (!netif_device_present(dev))
+ return -EOPNOTSUPP;
+
err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
if (err)
return err;
@@ -4479,6 +4179,32 @@ int mlx5e_get_vf_stats(struct net_device *dev,
return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
vf_stats);
}
+
+static bool
+mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (!netif_device_present(dev))
+ return false;
+
+ if (!mlx5e_is_uplink_rep(priv))
+ return false;
+
+ return mlx5e_rep_has_offload_stats(dev, attr_id);
+}
+
+static int
+mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (!mlx5e_is_uplink_rep(priv))
+ return -EOPNOTSUPP;
+
+ return mlx5e_rep_get_offload_stats(attr_id, dev, sp);
+}
#endif
static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
@@ -4622,7 +4348,7 @@ static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
{
struct net_device *netdev = priv->netdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
if (priv->channels.params.lro_en) {
netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
@@ -4635,16 +4361,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- new_channels.params = priv->channels.params;
- new_channels.params.xdp_prog = prog;
+ new_params = priv->channels.params;
+ new_params.xdp_prog = prog;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
- if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
+ if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
- new_channels.params.sw_mtu,
- mlx5e_xdp_max_mtu(&new_channels.params, NULL));
+ new_params.sw_mtu,
+ mlx5e_xdp_max_mtu(&new_params, NULL));
return -EINVAL;
}
@@ -4664,9 +4390,10 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params new_params;
struct bpf_prog *old_prog;
- bool reset, was_opened;
int err = 0;
+ bool reset;
int i;
mutex_lock(&priv->state_lock);
@@ -4677,46 +4404,29 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
goto unlock;
}
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
- if (was_opened && !reset)
- /* num_channels is invariant here, so we can take the
- * batched reference right upfront.
- */
- bpf_prog_add(prog, priv->channels.num);
-
- if (was_opened && reset) {
- struct mlx5e_channels new_channels = {};
-
- new_channels.params = priv->channels.params;
- new_channels.params.xdp_prog = prog;
- mlx5e_set_rq_type(priv->mdev, &new_channels.params);
- old_prog = priv->channels.params.xdp_prog;
+ new_params = priv->channels.params;
+ new_params.xdp_prog = prog;
+ if (reset)
+ mlx5e_set_rq_type(priv->mdev, &new_params);
+ old_prog = priv->channels.params.xdp_prog;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
- if (err)
- goto unlock;
- } else {
- /* exchange programs, extra prog reference we got from caller
- * as long as we don't fail from this point onwards.
- */
- old_prog = xchg(&priv->channels.params.xdp_prog, prog);
- }
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
+ if (err)
+ goto unlock;
if (old_prog)
bpf_prog_put(old_prog);
- if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
-
- if (!was_opened || reset)
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
/* exchanging programs w/o reset, we update ref counts on behalf
* of the channels RQs here.
*/
+ bpf_prog_add(prog, priv->channels.num);
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -4837,6 +4547,8 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_has_offload_stats = mlx5e_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
.ndo_get_devlink_port = mlx5e_get_devlink_port,
};
@@ -4850,93 +4562,6 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
indirection_rqt[i] = i % num_channels;
}
-static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
-{
- u32 link_speed = 0;
- u32 pci_bw = 0;
-
- mlx5e_port_max_linkspeed(mdev, &link_speed);
- pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
- mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
-
-#define MLX5E_SLOW_PCI_RATIO (2)
-
- return link_speed && pci_bw &&
- link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
-}
-
-static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder;
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder;
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
-{
- return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
- DIM_CQ_PERIOD_MODE_START_FROM_CQE :
- DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->tx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
- } else {
- params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->rx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
- } else {
- params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_tx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_rx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
@@ -4949,25 +4574,6 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
-void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- /* Prefer Striding RQ, unless any of the following holds:
- * - Striding RQ configuration is not possible/supported.
- * - Slow PCI heuristic.
- * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
- *
- * No XSK params: checking the availability of striding RQ in general.
- */
- if (!slow_pci_heuristic(mdev) &&
- mlx5e_striding_rq_possible(mdev, params) &&
- (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
- !mlx5e_rx_is_linear_skb(params, NULL)))
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
- mlx5e_set_rq_type(mdev, params);
- mlx5e_init_rq_type_params(mdev, params);
-}
-
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels)
{
@@ -5283,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct devlink_port *dl_port;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -5298,19 +4905,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
- err = mlx5e_devlink_port_register(priv);
- if (err)
- mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
-
- mlx5e_health_create_reporters(priv);
+ dl_port = mlx5e_devlink_get_dl_port(priv);
+ if (dl_port->registered)
+ mlx5e_health_create_reporters(priv);
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
- mlx5e_health_destroy_reporters(priv);
- mlx5e_devlink_port_unregister(priv);
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+
+ if (dl_port->registered)
+ mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
}
@@ -5318,6 +4925,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_nch = priv->max_nch;
int err;
mlx5e_create_q_counters(priv);
@@ -5332,7 +4940,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_rqts;
@@ -5340,22 +4948,30 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5e_create_direct_rqts(priv, priv->xsk_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->xsk_tir, max_nch);
if (unlikely(err))
goto err_destroy_direct_tirs;
- err = mlx5e_create_direct_tirs(priv, priv->xsk_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->xsk_tir, max_nch);
if (unlikely(err))
goto err_destroy_xsk_rqts;
+ err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1);
+ if (err)
+ goto err_destroy_xsk_tirs;
+
+ err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1);
+ if (err)
+ goto err_destroy_ptp_rqt;
+
err = mlx5e_create_flow_steering(priv);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
- goto err_destroy_xsk_tirs;
+ goto err_destroy_ptp_direct_tir;
}
err = mlx5e_tc_nic_init(priv);
@@ -5376,16 +4992,20 @@ err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
+err_destroy_ptp_direct_tir:
+ mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
+err_destroy_ptp_rqt:
+ mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
err_destroy_xsk_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
err_destroy_xsk_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -5397,14 +5017,18 @@ err_destroy_q_counters:
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
+ u16 max_nch = priv->max_nch;
+
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
+ mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
+ mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
+ mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -5450,7 +5074,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
return;
mlx5e_dcbnl_init_app(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
if (netif_running(netdev))
@@ -5473,7 +5097,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
netif_device_detach(priv->netdev);
rtnl_unlock();
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
@@ -5512,6 +5136,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
+ .rx_ptp_support = true,
};
/* mlx5e generic netdev management API (move to en_common.c) */
@@ -5746,6 +5371,11 @@ rollback:
return err;
}
+void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
+{
+ mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
+}
+
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
@@ -5828,10 +5458,17 @@ static int mlx5e_probe(struct auxiliary_device *adev,
priv->profile = profile;
priv->ppriv = NULL;
+
+ err = mlx5e_devlink_port_register(priv);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+ goto err_destroy_netdev;
+ }
+
err = profile->init(mdev, netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
- goto err_destroy_netdev;
+ goto err_devlink_cleanup;
}
err = mlx5e_resume(adev);
@@ -5849,12 +5486,15 @@ static int mlx5e_probe(struct auxiliary_device *adev,
mlx5e_devlink_port_type_eth_set(priv);
mlx5e_dcbnl_init_app(priv);
+ mlx5_uplink_netdev_set(mdev, netdev);
return 0;
err_resume:
mlx5e_suspend(adev, state);
err_profile_cleanup:
profile->cleanup(priv);
+err_devlink_cleanup:
+ mlx5e_devlink_port_unregister(priv);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
return err;
@@ -5869,6 +5509,7 @@ static void mlx5e_remove(struct auxiliary_device *adev)
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
priv->profile->cleanup(priv);
+ mlx5e_devlink_port_unregister(priv);
mlx5e_destroy_netdev(priv);
}
@@ -5894,18 +5535,18 @@ int mlx5e_init(void)
mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
- ret = mlx5e_rep_init();
+ ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
return ret;
- ret = auxiliary_driver_register(&mlx5e_driver);
+ ret = mlx5e_rep_init();
if (ret)
- mlx5e_rep_cleanup();
+ auxiliary_driver_unregister(&mlx5e_driver);
return ret;
}
void mlx5e_cleanup(void)
{
- auxiliary_driver_unregister(&mlx5e_driver);
mlx5e_rep_cleanup();
+ auxiliary_driver_unregister(&mlx5e_driver);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 8d39bfee84a9..34eb1118670f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -40,17 +40,19 @@
#include "eswitch.h"
#include "en.h"
#include "en_rep.h"
+#include "en/params.h"
#include "en/txrx.h"
#include "en_tc.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
+#include "en/devlink.h"
#include "fs_core.h"
#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
- max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -69,16 +71,6 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
fw_rev_sub(mdev), mdev->board_id);
}
-static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- mlx5e_rep_get_drvinfo(dev, drvinfo);
- strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
- sizeof(drvinfo->bus_info));
-}
-
static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
@@ -285,46 +277,6 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static void mlx5e_uplink_rep_get_pause_stats(struct net_device *netdev,
- struct ethtool_pause_stats *stats)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- mlx5e_stats_pause_get(priv, stats);
-}
-
-static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- mlx5e_ethtool_get_pauseparam(priv, pauseparam);
-}
-
-static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
-}
-
-static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *link_ksettings)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
-}
-
-static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *link_ksettings)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
-}
-
static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -344,34 +296,6 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
};
-static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USE_ADAPTIVE,
- .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_strings = mlx5e_rep_get_strings,
- .get_sset_count = mlx5e_rep_get_sset_count,
- .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
- .get_ringparam = mlx5e_rep_get_ringparam,
- .set_ringparam = mlx5e_rep_set_ringparam,
- .get_channels = mlx5e_rep_get_channels,
- .set_channels = mlx5e_rep_set_channels,
- .get_coalesce = mlx5e_rep_get_coalesce,
- .set_coalesce = mlx5e_rep_set_coalesce,
- .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
- .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
- .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
- .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
- .get_rxfh = mlx5e_get_rxfh,
- .set_rxfh = mlx5e_set_rxfh,
- .get_rxnfc = mlx5e_get_rxnfc,
- .set_rxnfc = mlx5e_set_rxnfc,
- .get_pause_stats = mlx5e_uplink_rep_get_pause_stats,
- .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
- .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
-};
-
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
@@ -411,8 +335,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
}
/* Add re-inject rule to the PF/representor sqs */
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- rep->vport,
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, rep,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -522,7 +445,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
return (rep->vport == MLX5_VPORT_UPLINK);
}
-static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
+bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
@@ -542,8 +465,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
return 0;
}
-static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
- void *sp)
+int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
@@ -568,34 +491,6 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
return mlx5e_change_mtu(netdev, new_mtu, NULL);
}
-static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
-{
- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
-}
-
-static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
-{
- struct sockaddr *saddr = addr;
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
- return 0;
-}
-
-static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
- __be16 vlan_proto)
-{
- netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
-
- if (vlan != 0)
- return -EOPNOTSUPP;
-
- /* allow setting 0-vid for compatibility with libvirt */
- return 0;
-}
-
static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -641,29 +536,10 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_change_carrier = mlx5e_rep_change_carrier,
};
-static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
- .ndo_open = mlx5e_open,
- .ndo_stop = mlx5e_close,
- .ndo_start_xmit = mlx5e_xmit,
- .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
- .ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
- .ndo_get_stats64 = mlx5e_get_stats,
- .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
- .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
- .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
- .ndo_features_check = mlx5e_features_check,
- .ndo_set_vf_mac = mlx5e_set_vf_mac,
- .ndo_set_vf_rate = mlx5e_set_vf_rate,
- .ndo_get_vf_config = mlx5e_get_vf_config,
- .ndo_get_vf_stats = mlx5e_get_vf_stats,
- .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
- .ndo_set_features = mlx5e_set_features,
-};
-
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
{
- return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
+ return netdev->netdev_ops == &mlx5e_netdev_ops &&
+ mlx5e_is_uplink_rep(netdev_priv(netdev));
}
bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
@@ -713,26 +589,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
}
static void mlx5e_build_rep_netdev(struct net_device *netdev,
- struct mlx5_core_dev *mdev,
- struct mlx5_eswitch_rep *rep)
+ struct mlx5_core_dev *mdev)
{
SET_NETDEV_DEV(netdev, mdev->device);
- if (rep->vport == MLX5_VPORT_UPLINK) {
- netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
- /* we want a persistent mac for the uplink rep */
- mlx5_query_mac_address(mdev, netdev->dev_addr);
- netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
- mlx5e_dcbnl_build_rep_netdev(netdev);
- } else {
- netdev->netdev_ops = &mlx5e_netdev_ops_rep;
- eth_hw_addr_random(netdev);
- netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
- }
+ netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ eth_hw_addr_random(netdev);
+ netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
- netdev->features |= NETIF_F_NETNS_LOCAL;
-
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
@@ -744,12 +609,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
- if (rep->vport == MLX5_VPORT_UPLINK)
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- else
- netdev->features |= NETIF_F_VLAN_CHALLENGED;
-
netdev->features |= netdev->hw_features;
+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
+ netdev->features |= NETIF_F_NETNS_LOCAL;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -890,6 +752,7 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_nch = priv->max_nch;
int err;
mlx5e_init_l2_addr(priv);
@@ -904,7 +767,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_rqts;
@@ -912,7 +775,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_tirs;
@@ -937,11 +800,11 @@ err_destroy_root_ft:
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -951,13 +814,15 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
+ u16 max_nch = priv->max_nch;
+
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
}
@@ -1116,6 +981,14 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
mlx5e_rep_neigh_init(rpriv);
+
+ netdev->wanted_features |= NETIF_F_HW_TC;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_open(netdev);
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
@@ -1123,6 +996,12 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
+ rtnl_lock();
+ if (netif_running(priv->netdev))
+ mlx5e_close(priv->netdev);
+ netif_device_detach(priv->netdev);
+ rtnl_unlock();
+
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
@@ -1183,6 +1062,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
+ .rx_ptp_support = false,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1199,33 +1079,65 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ /* XSK is needed so we can replace profile with NIC netdev */
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
+ .rx_ptp_support = false,
};
/* e-Switch vport representors */
static int
-mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct devlink_port *dl_port;
+ int err;
+
+ rpriv->netdev = priv->netdev;
+
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ if (err)
+ return err;
+
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_eth_set(dl_port, rpriv->netdev);
+
+ return 0;
+}
+
+static void
+mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
+{
+ struct net_device *netdev = rpriv->netdev;
+ struct devlink_port *dl_port;
+ struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
+
+ priv = netdev_priv(netdev);
+ dev = priv->mdev;
+
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_clear(dl_port);
+ mlx5e_netdev_attach_nic_profile(priv);
+}
+
+static int
+mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
const struct mlx5e_profile *profile;
- struct mlx5e_rep_priv *rpriv;
struct devlink_port *dl_port;
struct net_device *netdev;
struct mlx5e_priv *priv;
unsigned int txqs, rxqs;
int nch, err;
- rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
- if (!rpriv)
- return -ENOMEM;
-
- /* rpriv->rep to be looked up when profile->init() is called */
- rpriv->rep = rep;
-
- profile = (rep->vport == MLX5_VPORT_UPLINK) ?
- &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
-
+ profile = &mlx5e_rep_profile;
nch = mlx5e_get_max_num_channels(dev);
txqs = nch * profile->max_tc;
rxqs = nch * profile->rq_groups;
@@ -1234,21 +1146,11 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
rep->vport);
- kfree(rpriv);
return -EINVAL;
}
- mlx5e_build_rep_netdev(netdev, dev, rep);
-
+ mlx5e_build_rep_netdev(netdev, dev);
rpriv->netdev = netdev;
- rep->rep_data[REP_ETH].priv = rpriv;
- INIT_LIST_HEAD(&rpriv->vport_sqs_list);
-
- if (rep->vport == MLX5_VPORT_UPLINK) {
- err = mlx5e_create_mdev_resources(dev);
- if (err)
- goto err_destroy_netdev;
- }
priv = netdev_priv(netdev);
priv->profile = profile;
@@ -1256,7 +1158,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = profile->init(dev, netdev);
if (err) {
netdev_warn(netdev, "rep profile init failed, %d\n", err);
- goto err_destroy_mdev_resources;
+ goto err_destroy_netdev;
}
err = mlx5e_attach_netdev(netdev_priv(netdev));
@@ -1286,13 +1188,34 @@ err_detach_netdev:
err_cleanup_profile:
priv->profile->cleanup(priv);
-err_destroy_mdev_resources:
- if (rep->vport == MLX5_VPORT_UPLINK)
- mlx5e_destroy_mdev_resources(dev);
-
err_destroy_netdev:
mlx5e_destroy_netdev(netdev_priv(netdev));
- kfree(rpriv);
+ return err;
+}
+
+static int
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_priv *rpriv;
+ int err;
+
+ rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ if (!rpriv)
+ return -ENOMEM;
+
+ /* rpriv->rep to be looked up when profile->init() is called */
+ rpriv->rep = rep;
+ rep->rep_data[REP_ETH].priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ err = mlx5e_vport_uplink_rep_load(dev, rep);
+ else
+ err = mlx5e_vport_vf_rep_load(dev, rep);
+
+ if (err)
+ kfree(rpriv);
+
return err;
}
@@ -1306,15 +1229,19 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct devlink_port *dl_port;
void *ppriv = priv->ppriv;
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ mlx5e_vport_uplink_rep_unload(rpriv);
+ goto free_ppriv;
+ }
+
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
if (dl_port)
devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
- if (rep->vport == MLX5_VPORT_UPLINK)
- mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
+free_ppriv:
kfree(ppriv); /* mlx5e_rep_priv */
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index d1696404cca9..22585015c7a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -89,6 +89,7 @@ struct mlx5_rep_uplink_priv {
struct mapping_ctx *tunnel_enc_opts_mapping;
struct mlx5_tc_ct_priv *ct_priv;
+ struct mlx5_esw_psample *esw_psample;
/* support eswitch vports bonding */
struct mlx5e_rep_bond *bond;
@@ -220,6 +221,10 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
const struct net_device *lag_dev);
int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup);
+bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id);
+int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp);
+
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
@@ -240,6 +245,11 @@ static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
static inline int mlx5e_rep_init(void) { return 0; };
static inline void mlx5e_rep_cleanup(void) {};
+static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev,
+ int attr_id) { return false; }
+static inline int mlx5e_rep_get_offload_stats(int attr_id,
+ const struct net_device *dev,
+ void *sp) { return -EOPNOTSUPP; }
#endif
static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 249d8905e644..f90894eea9e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,6 +52,7 @@
#include "en/health.h"
#include "en/params.h"
#include "devlink.h"
+#include "en/devlink.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -669,6 +670,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
get_cqe_opcode(cqe));
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->priv->wq, &sq->recover_work);
break;
@@ -1822,6 +1824,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
struct mlx5e_priv *priv = netdev_priv(rq->netdev);
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
+ struct devlink_port *dl_port;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 trap_id;
@@ -1844,7 +1847,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb_push(skb, ETH_HLEN);
- mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port);
+ dl_port = mlx5e_devlink_get_dl_port(priv);
+ mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port);
dev_kfree_skb_any(skb);
free_wqe:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 88a01c59ce61..e4f5b6395148 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -184,6 +184,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
#endif
@@ -344,6 +345,7 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
+ s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
s->rx_tls_err += rq_stats->tls_err;
#endif
@@ -401,13 +403,21 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
{
int i;
- if (!priv->port_ptp_opened)
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return;
- mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
+ mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
- for (i = 0; i < priv->max_opened_tc; i++) {
- mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
+ if (priv->tx_ptp_opened) {
+ for (i = 0; i < priv->max_opened_tc; i++) {
+ mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
+ }
+ }
+ if (priv->rx_ptp_opened) {
+ mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier();
@@ -760,35 +770,112 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
-#define MLX5E_READ_CTR64_BE_F(ptr, c) \
+#define MLX5E_READ_CTR64_BE_F(ptr, set, c) \
be64_to_cpu(*(__be64 *)((char *)ptr + \
MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
+ counter_set.set.c##_high)))
-void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
- struct ethtool_pause_stats *pause_stats)
+static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
+ u32 *ppcnt_ieee_802_3)
{
- u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
- struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
- return;
+ return -EOPNOTSUPP;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
- sz, MLX5_REG_PPCNT, 0, 0);
+ return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
+ sz, MLX5_REG_PPCNT, 0, 0);
+}
+
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
pause_stats->tx_pause_frames =
MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
a_pause_mac_ctrl_frames_transmitted);
pause_stats->rx_pause_frames =
MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
a_pause_mac_ctrl_frames_received);
}
+void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
+
+ phy_stats->SymbolErrorDuringCarrier =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_symbol_error_during_carrier);
+}
+
+void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
+
+#define RD(name) \
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \
+ eth_802_3_cntrs_grp_data_layout, \
+ name)
+
+ mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok);
+ mac_stats->FramesReceivedOK = RD(a_frames_received_ok);
+ mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
+ mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok);
+ mac_stats->OctetsReceivedOK = RD(a_octets_received_ok);
+ mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
+ mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
+ mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
+ mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
+ mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors);
+ mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
+ mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors);
+#undef RD
+}
+
+void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
+
+ ctrl_stats->MACControlFramesTransmitted =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_mac_control_frames_transmitted);
+ ctrl_stats->MACControlFramesReceived =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_mac_control_frames_received);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_unsupported_opcodes_received);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -900,6 +987,59 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 10239 },
+ {}
+};
+
+void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
+ sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+#define RD(name) \
+ MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \
+ eth_2819_cntrs_grp_data_layout, \
+ name)
+
+ rmon->undersize_pkts = RD(ether_stats_undersize_pkts);
+ rmon->fragments = RD(ether_stats_fragments);
+ rmon->jabbers = RD(ether_stats_jabbers);
+
+ rmon->hist[0] = RD(ether_stats_pkts64octets);
+ rmon->hist[1] = RD(ether_stats_pkts65to127octets);
+ rmon->hist[2] = RD(ether_stats_pkts128to255octets);
+ rmon->hist[3] = RD(ether_stats_pkts256to511octets);
+ rmon->hist[4] = RD(ether_stats_pkts512to1023octets);
+ rmon->hist[5] = RD(ether_stats_pkts1024to1518octets);
+ rmon->hist[6] = RD(ether_stats_pkts1519to2047octets);
+ rmon->hist[7] = RD(ether_stats_pkts2048to4095octets);
+ rmon->hist[8] = RD(ether_stats_pkts4096to8191octets);
+ rmon->hist[9] = RD(ether_stats_pkts8192to10239octets);
+#undef RD
+
+ *ranges = mlx5e_rmon_ranges;
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1007,6 +1147,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
+ sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ fec_stats->corrected_bits.total =
+ MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
+ phys_layer_statistical_cntrs,
+ phy_corrected_bits);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -1621,6 +1784,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
#endif
@@ -1751,6 +1915,38 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
};
+static const struct counter_desc ptp_rq_stats_desc[] = {
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
+};
+
static const struct counter_desc qos_sq_stats_desc[] = {
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
@@ -1795,6 +1991,7 @@ static const struct counter_desc qos_sq_stats_desc[] = {
#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
+#define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
@@ -1841,32 +2038,46 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
{
- return priv->port_ptp_opened ?
- NUM_PTP_CH_STATS +
- ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
- 0;
+ int num = NUM_PTP_CH_STATS;
+
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
+ return 0;
+
+ if (priv->tx_ptp_opened)
+ num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
+ if (priv->rx_ptp_opened)
+ num += NUM_PTP_RQ_STATS;
+
+ return num;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
{
int i, tc;
- if (!priv->port_ptp_opened)
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_ch_stats_desc[i].format);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_sq_stats_desc[i].format, tc);
+ if (priv->tx_ptp_opened) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_SQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ptp_sq_stats_desc[i].format, tc);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ptp_cq_stats_desc[i].format, tc);
+ }
+ if (priv->rx_ptp_opened) {
+ for (i = 0; i < NUM_PTP_RQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_cq_stats_desc[i].format, tc);
+ ptp_rq_stats_desc[i].format);
+ }
return idx;
}
@@ -1874,26 +2085,33 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
{
int i, tc;
- if (!priv->port_ptp_opened)
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
ptp_ch_stats_desc, i);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
- ptp_sq_stats_desc, i);
+ if (priv->tx_ptp_opened) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_SQ_STATS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
+ ptp_sq_stats_desc, i);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
+ ptp_cq_stats_desc, i);
+ }
+ if (priv->rx_ptp_opened) {
+ for (i = 0; i < NUM_PTP_RQ_STATS; i++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
- ptp_cq_stats_desc, i);
-
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
+ ptp_rq_stats_desc, i);
+ }
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index adf9b7b8b712..139e59f30db0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -54,6 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
@@ -113,6 +114,18 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
struct ethtool_pause_stats *pause_stats);
+void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats);
+
+void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_phy_stats *phy_stats);
+void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_mac_stats *mac_stats);
+void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges);
/* Concrete NIC Stats */
@@ -206,6 +219,7 @@ struct mlx5e_sw_stats {
u64 rx_tls_resync_req_end;
u64 rx_tls_resync_req_skip;
u64 rx_tls_resync_res_ok;
+ u64 rx_tls_resync_res_retry;
u64 rx_tls_resync_res_skip;
u64 rx_tls_err;
#endif
@@ -336,6 +350,7 @@ struct mlx5e_rq_stats {
u64 tls_resync_req_end;
u64 tls_resync_req_skip;
u64 tls_resync_res_ok;
+ u64 tls_resync_res_retry;
u64 tls_resync_res_skip;
u64 tls_err;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index d675107d9eca..47a9c49b25fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -47,6 +47,7 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_mpls.h>
+#include <net/psample.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
@@ -65,6 +66,7 @@
#include "en/mod_hdr.h"
#include "en/tc_priv.h"
#include "en/tc_tun_encap.h"
+#include "esw/sample.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -221,6 +223,25 @@ get_ct_priv(struct mlx5e_priv *priv)
return priv->fs.tc.ct;
}
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+static struct mlx5_esw_psample *
+get_sample_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->esw_psample;
+ }
+
+ return NULL;
+}
+#endif
+
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
@@ -445,11 +466,15 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
-static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
{
- u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
struct mlx5e_priv *priv = hp->func_priv;
int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+ u32 *indirection_rqt, rqn;
+
+ indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL);
+ if (!indirection_rqt)
+ return -ENOMEM;
mlx5e_build_default_indir_rqt(indirection_rqt, sz,
hp->num_channels);
@@ -462,6 +487,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
rqn = hp->pair->rqn[ix];
MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
}
+
+ kfree(indirection_rqt);
+ return 0;
}
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
@@ -482,12 +510,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+ err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+ if (err)
+ goto out;
err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
if (!err)
hp->indir_rqt.enabled = true;
+out:
kvfree(in);
return err;
}
@@ -896,7 +927,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
}
dest_ix++;
}
@@ -1077,19 +1108,27 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
- return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
+ rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ } else if (flow_flag_test(flow, SAMPLE)) {
+ rule = mlx5_esw_sample_offload(get_sample_priv(flow->priv), spec, attr);
+#endif
+ } else {
+ rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
- rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return flow->rule[1];
}
}
@@ -1111,6 +1150,13 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
return;
}
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ if (flow_flag_test(flow, SAMPLE)) {
+ mlx5_esw_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
+ return;
+ }
+#endif
+
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1467,6 +1513,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ kfree(flow->attr->esw_attr->sample);
kfree(flow->attr);
}
@@ -1950,6 +1997,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
+ void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_3);
+ void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
@@ -1979,6 +2030,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
@@ -2298,7 +2350,49 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->flags)
*match_level = MLX5_MATCH_L4;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+ flow_rule_match_icmp(rule, &match);
+ switch (ip_proto) {
+ case IPPROTO_ICMP:
+ if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_ICMP))
+ return -EOPNOTSUPP;
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
+ match.mask->type);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
+ match.key->type);
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
+ match.mask->code);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
+ match.key->code);
+ break;
+ case IPPROTO_ICMPV6:
+ if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_ICMPV6))
+ return -EOPNOTSUPP;
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
+ match.mask->type);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
+ match.key->type);
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
+ match.mask->code);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
+ match.key->code);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Code and type matching only with ICMP and ICMPv6");
+ netdev_err(priv->netdev,
+ "Code and type matching only with ICMP and ICMPv6\n");
+ return -EINVAL;
+ }
+ if (match.mask->code || match.mask->type) {
+ *match_level = MLX5_MATCH_L4;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
+ }
+ }
/* Currenlty supported only for MPLS over UDP */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
!netif_is_bareudp(filter_dev)) {
@@ -3014,7 +3108,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
actions = flow->attr->action;
if (mlx5e_is_eswitch_flow(flow)) {
- if (flow->attr->esw_attr->split_count && ct_flow) {
+ if (flow->attr->esw_attr->split_count && ct_flow &&
+ !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
/* All registers used by ct are cleared when using
* split rules.
*/
@@ -3052,6 +3147,13 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
+static bool same_vf_reps(struct mlx5e_priv *priv,
+ struct net_device *out_dev)
+{
+ return mlx5e_eswitch_vf_rep(priv->netdev) &&
+ priv->netdev == out_dev;
+}
+
static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -3561,6 +3663,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_sample_attr sample = {};
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
@@ -3737,6 +3840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (same_vf_reps(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't forward from a VF to itself");
+ return -EOPNOTSUPP;
+ }
+
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
@@ -3809,11 +3918,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
+ if (flow_flag_test(flow, SAMPLE)) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
+ return -EOPNOTSUPP;
+ }
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
if (err)
return err;
flow_flag_set(flow, CT);
+ esw_attr->split_count = esw_attr->out_count;
+ break;
+ case FLOW_ACTION_SAMPLE:
+ if (flow_flag_test(flow, CT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
+ return -EOPNOTSUPP;
+ }
+ sample.rate = act->sample.rate;
+ sample.group_num = act->sample.psample_group->group_num;
+ if (act->sample.truncate)
+ sample.trunc_size = act->sample.trunc_size;
+ flow_flag_set(flow, SAMPLE);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
@@ -3876,11 +4001,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG_MOD(extack,
- "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
@@ -3898,6 +4018,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ /* Allocate sample attribute only when there is a sample action and
+ * no errors after parsing.
+ */
+ if (flow_flag_test(flow, SAMPLE)) {
+ esw_attr->sample = kzalloc(sizeof(*esw_attr->sample), GFP_KERNEL);
+ if (!esw_attr->sample)
+ return -ENOMEM;
+ *esw_attr->sample = sample;
+ }
+
return 0;
}
@@ -4300,6 +4430,11 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err = 0;
+ if (!mlx5_esw_hold(priv->mdev))
+ return -EAGAIN;
+
+ mlx5_esw_get(priv->mdev);
+
rcu_read_lock();
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
if (flow) {
@@ -4337,11 +4472,14 @@ rcu_unlock:
if (err)
goto err_free;
+ mlx5_esw_release(priv->mdev);
return 0;
err_free:
mlx5e_flow_put(priv, flow);
out:
+ mlx5_esw_put(priv->mdev);
+ mlx5_esw_release(priv->mdev);
return err;
}
@@ -4381,6 +4519,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
trace_mlx5e_delete_flower(f);
mlx5e_flow_put(priv, flow);
+ mlx5_esw_put(priv->mdev);
return 0;
errout:
@@ -4516,6 +4655,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
@@ -4650,6 +4793,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_core_dev *dev = priv->mdev;
+ struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
int err;
@@ -4664,15 +4808,22 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
- if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj),
+ MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
+ if (IS_ERR(chains_mapping)) {
+ err = PTR_ERR(chains_mapping);
+ goto err_mapping;
+ }
+ tc->mapping = chains_mapping;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
- attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
- }
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs.vlan.ft.t;
+ attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) {
@@ -4682,10 +4833,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (IS_ERR(tc->ct)) {
- err = PTR_ERR(tc->ct);
- goto err_ct;
- }
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
@@ -4701,9 +4848,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
err_reg:
mlx5_tc_ct_clean(tc->ct);
-err_ct:
mlx5_chains_destroy(tc->chains);
err_chains:
+ mapping_destroy(chains_mapping);
+err_mapping:
rhashtable_destroy(&tc->ht);
return err;
}
@@ -4738,6 +4886,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mutex_destroy(&tc->t_lock);
mlx5_tc_ct_clean(tc->ct);
+ mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
}
@@ -4760,8 +4909,10 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
esw_chains(esw),
&esw->offloads.mod_hdr,
MLX5_FLOW_NAMESPACE_FDB);
- if (IS_ERR(uplink_priv->ct_priv))
- goto err_ct;
+
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev));
+#endif
mapping = mapping_create(sizeof(struct tunnel_match_key),
TUNNEL_INFO_BITS_MASK, true);
@@ -4800,8 +4951,10 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
+#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
-err_ct:
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
return err;
@@ -4819,6 +4972,9 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
+#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
}
@@ -4874,9 +5030,17 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
- unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
+ unsigned long flags = MLX5_TC_FLAG(INGRESS);
struct mlx5e_priv *priv = cb_priv;
+ if (!priv->netdev || !netif_device_present(priv->netdev))
+ return -EOPNOTSUPP;
+
+ if (mlx5e_is_uplink_rep(priv))
+ flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
+ else
+ flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
@@ -4892,6 +5056,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
int err;
@@ -4899,7 +5064,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
- err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
+ err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
if (err) {
netdev_dbg(priv->netdev,
"Couldn't find chain for chain tag: %d, err: %d\n",
@@ -4907,7 +5072,8 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
return false;
}
- if (chain) {
+ if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
+ chain = mapped_obj.chain;
tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
if (WARN_ON(!tc_skb_ext))
return false;
@@ -4920,6 +5086,9 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
zone_restore_id))
return false;
+ } else {
+ netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+ return false;
}
#endif /* CONFIG_NET_TC_SKB_EXT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index bdbffe484fce..8ba62671f5f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -133,6 +133,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
/* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
+ struct mlx5e_ptp *ptp_channel;
+
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
@@ -142,10 +144,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq_ix;
}
- if (unlikely(priv->channels.port_ptp))
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- mlx5e_use_ptpsq(skb))
- return mlx5e_select_ptpsq(dev, skb);
+ ptp_channel = READ_ONCE(priv->channels.ptp);
+ if (unlikely(ptp_channel) &&
+ test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+ mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
/* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
@@ -576,7 +579,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- prefetchw(wqe->data);
+ net_prefetchw(wqe->data);
*session = (struct mlx5e_tx_mpwqe) {
.wqe = wqe,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index d54da3797c30..833be29170a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -36,6 +36,7 @@
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
+#include "en_accel/ktls_txrx.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
@@ -171,6 +172,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
*/
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
+ /* Keep after async ICOSQ CQ poll */
+ if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
+ busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+
busy |= INDIRECT_CALL_2(rq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 1fa9c18563da..77c0ca655975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -271,7 +271,7 @@ static void init_eq_buf(struct mlx5_eq *eq)
struct mlx5_eqe *eqe;
int i;
- for (i = 0; i < eq->nent; i++) {
+ for (i = 0; i < eq_get_size(eq); i++) {
eqe = get_eqe(eq, i);
eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
}
@@ -281,8 +281,10 @@ static int
create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_eq_param *param)
{
+ u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
+ u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
struct mlx5_priv *priv = &dev->priv;
u8 vecidx = param->irq_index;
__be64 *pas;
@@ -297,16 +299,18 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
- eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
- err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
+
+ err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
+ &eq->frag_buf, dev->priv.numa_node);
if (err)
return err;
+ mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
- MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
+ MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
@@ -315,7 +319,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
}
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
- mlx5_fill_page_array(&eq->buf, pas);
+ mlx5_fill_page_frag_array(&eq->frag_buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
@@ -326,11 +330,11 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
param->mask[i]);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
- MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
+ MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
- eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
@@ -356,7 +360,7 @@ err_in:
kvfree(in);
err_buf:
- mlx5_buf_free(dev, &eq->buf);
+ mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
}
@@ -413,7 +417,7 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
eq->eqn);
synchronize_irq(eq->irqn);
- mlx5_buf_free(dev, &eq->buf);
+ mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
}
@@ -764,10 +768,11 @@ EXPORT_SYMBOL(mlx5_eq_destroy_generic);
struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
{
u32 ci = eq->cons_index + cc;
+ u32 nent = eq_get_size(eq);
struct mlx5_eqe *eqe;
- eqe = get_eqe(eq, ci & (eq->nent - 1));
- eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
+ eqe = get_eqe(eq, ci & (nent - 1));
+ eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 3e19b1721303..0399a396d166 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
}
if (!vport->egress.acl) {
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size);
if (IS_ERR(vport->egress.acl)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 26b37a0f8762..505bf811984a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport);
}
-static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
+static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
}
@@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++;
if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
table_size++;
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index 4a369669e51e..45b839116212 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -6,14 +6,14 @@
#include "helper.h"
struct mlx5_flow_table *
-esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
+esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
int acl_supported;
- int vport_index;
+ u16 vport_num;
int err;
acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
@@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
if (!acl_supported)
return ERR_PTR(-EOPNOTSUPP);
+ vport_num = vport->vport;
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num);
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
index 8dc4cab66a71..a47063fab57e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
@@ -8,7 +8,7 @@
/* General acl helper functions */
struct mlx5_flow_table *
-esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size);
+esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size);
/* Egress acl helper functions */
void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index d64fad2823e7..f75b86abaf1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
}
if (!vport->ingress.acl) {
- vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size);
if (IS_ERR(vport->ingress.acl)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 548c005ea633..39e948bc1204 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -7,7 +7,7 @@
#include "ofld.h"
static bool
-esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
+esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
{
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
@@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
- vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes);
if (IS_ERR(vport->ingress.acl)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index cb1e181f4c6a..1703384eca95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
-static bool
-mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
+static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_UPLINK ||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
@@ -120,11 +119,11 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
- return vport->dl_port;
+ return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
}
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum)
+ u16 vport_num, u32 controller, u32 sfnum)
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
@@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum);
+ devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
err = devlink_port_register(devlink, dl_port, dl_port_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
index cb9eafd1b4ee..21d56b49d14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
@@ -30,13 +30,13 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr);
#else
/* indir API stubs */
-struct mlx5_esw_indir_table *
+static inline struct mlx5_esw_indir_table *
mlx5_esw_indir_table_init(void)
{
return NULL;
}
-void
+static inline void
mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
{
}
@@ -57,7 +57,7 @@ mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
{
}
-bool
+static inline bool
mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
u16 vport_num,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
new file mode 100644
index 000000000000..d9041b16611d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "esw/acl/lgcy.h"
+#include "esw/legacy.h"
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "fs_core.h"
+
+enum {
+ LEGACY_VEPA_PRIO = 0,
+ LEGACY_FDB_PRIO,
+};
+
+static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb;
+ int err;
+
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* num FTE 2, num FG 2 */
+ ft_attr.prio = LEGACY_VEPA_PRIO;
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
+ return err;
+ }
+ esw->fdb_table.legacy.vepa_fdb = fdb;
+
+ return 0;
+}
+
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
+{
+ esw_debug(esw->dev, "Destroy FDB Table\n");
+ if (!esw->fdb_table.legacy.fdb)
+ return;
+
+ if (esw->fdb_table.legacy.promisc_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+ if (esw->fdb_table.legacy.allmulti_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ if (esw->fdb_table.legacy.addr_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
+
+ esw->fdb_table.legacy.fdb = NULL;
+ esw->fdb_table.legacy.addr_grp = NULL;
+ esw->fdb_table.legacy.allmulti_grp = NULL;
+ esw->fdb_table.legacy.promisc_grp = NULL;
+ atomic64_set(&esw->user_count, 0);
+}
+
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int table_size;
+ u32 *flow_group_in;
+ u8 *dmac;
+ int err = 0;
+
+ esw_debug(dev, "Create FDB log_max_size(%d)\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ ft_attr.max_fte = table_size;
+ ft_attr.prio = LEGACY_FDB_PRIO;
+ fdb = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.fdb = fdb;
+
+ /* Addresses group : Full match unicast/multicast addresses */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ /* Preserve 2 entries for allmulti and promisc rules*/
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
+ eth_broadcast_addr(dmac);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.addr_grp = g;
+
+ /* Allmulti group : One rule that forwards any mcast traffic */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+ eth_zero_addr(dmac);
+ dmac[0] = 0x01;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.allmulti_grp = g;
+
+ /* Promiscuous group :
+ * One rule that forward all unmatched traffic from previous groups
+ */
+ eth_zero_addr(dmac);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.promisc_grp = g;
+
+out:
+ if (err)
+ esw_destroy_legacy_fdb_table(esw);
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+ esw_debug(esw->dev, "Destroy VEPA Table\n");
+ if (!esw->fdb_table.legacy.vepa_fdb)
+ return;
+
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
+ esw->fdb_table.legacy.vepa_fdb = NULL;
+}
+
+static int esw_create_legacy_table(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+ atomic64_set(&esw->user_count, 0);
+
+ err = esw_create_legacy_vepa_table(esw);
+ if (err)
+ return err;
+
+ err = esw_create_legacy_fdb_table(esw);
+ if (err)
+ esw_destroy_legacy_vepa_table(esw);
+
+ return err;
+}
+
+static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
+{
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
+
+ if (esw->fdb_table.legacy.vepa_star_rule)
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
+
+ esw->fdb_table.legacy.vepa_uplink_rule = NULL;
+ esw->fdb_table.legacy.vepa_star_rule = NULL;
+}
+
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+{
+ esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_fdb_table(esw);
+ esw_destroy_legacy_vepa_table(esw);
+}
+
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int ret;
+
+ ret = esw_create_legacy_table(esw);
+ if (ret)
+ return ret;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+
+ ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ if (ret)
+ esw_destroy_legacy_table(esw);
+ return ret;
+}
+
+void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+ struct esw_mc_addr *mc_promisc;
+
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+
+ mc_promisc = &esw->mc_promisc;
+ if (mc_promisc->uplink_rule)
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_legacy_table(esw);
+}
+
+static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
+ u8 setting)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+ void *misc;
+
+ if (!setting) {
+ esw_cleanup_vepa_rules(esw);
+ return 0;
+ }
+
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
+ return 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Uplink rule forward uplink traffic to FDB */
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = esw->fdb_table.legacy.fdb;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ } else {
+ esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
+ }
+
+ /* Star rule to forward all traffic to uplink vport */
+ memset(&dest, 0, sizeof(dest));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = MLX5_VPORT_UPLINK;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ } else {
+ esw->fdb_table.legacy.vepa_star_rule = flow_rule;
+ }
+
+out:
+ kvfree(spec);
+ if (err)
+ esw_cleanup_vepa_rules(esw);
+ return err;
+}
+
+int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
+{
+ int err = 0;
+
+ if (!esw)
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = _mlx5_eswitch_set_vepa_locked(esw, setting);
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+{
+ if (!esw)
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
+ return -EOPNOTSUPP;
+
+ *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+ return 0;
+}
+
+int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int ret;
+
+ /* Only non manager vports need ACL in legacy mode */
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return 0;
+
+ ret = esw_acl_ingress_lgcy_setup(esw, vport);
+ if (ret)
+ goto ingress_err;
+
+ ret = esw_acl_egress_lgcy_setup(esw, vport);
+ if (ret)
+ goto egress_err;
+
+ return 0;
+
+egress_err:
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+ingress_err:
+ return ret;
+}
+
+void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return;
+
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+}
+
+int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport,
+ struct mlx5_vport_drop_stats *stats)
+{
+ u64 rx_discard_vport_down, tx_discard_vport_down;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ u64 bytes = 0;
+ int err = 0;
+
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
+ return 0;
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled)
+ goto unlock;
+
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
+ mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
+ &stats->rx_dropped, &bytes);
+
+ if (vport->ingress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
+ &stats->tx_dropped, &bytes);
+
+ if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
+ !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ goto unlock;
+
+ err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
+ &rx_discard_vport_down,
+ &tx_discard_vport_down);
+ if (err)
+ goto unlock;
+
+ if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
+ stats->rx_dropped += rx_discard_vport_down;
+ if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ stats->tx_dropped += tx_discard_vport_down;
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ u16 vport, u16 vlan, u8 qos)
+{
+ u8 set_flags = 0;
+ int err = 0;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ if (vlan || qos)
+ set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ if (!vlan)
+ goto unlock; /* compatibility with libvirt */
+
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ u16 vport, bool spoofchk)
+{
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ bool pschk;
+ int err = 0;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ pschk = evport->info.spoofchk;
+ evport->info.spoofchk = spoofchk;
+ if (pschk && !is_valid_ether_addr(evport->info.mac))
+ mlx5_core_warn(esw->dev,
+ "Spoofchk in set while MAC is invalid, vport(%d)\n",
+ evport->vport);
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
+ if (err)
+ evport->info.spoofchk = pschk;
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ u16 vport, bool setting)
+{
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ int err = 0;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ evport->info.trusted = setting;
+ if (evport->enabled)
+ esw_vport_change_handle_locked(evport);
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h
new file mode 100644
index 000000000000..e0820bb72b57
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_ESW_LEGACY_H__
+#define __MLX5_ESW_LEGACY_H__
+
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+struct mlx5_eswitch;
+
+int esw_legacy_enable(struct mlx5_eswitch *esw);
+void esw_legacy_disable(struct mlx5_eswitch *esw);
+
+int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport,
+ struct mlx5_vport_drop_stats *stats);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
new file mode 100644
index 000000000000..794012c5c476
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/skbuff.h>
+#include <net/psample.h>
+#include "en/mapping.h"
+#include "esw/sample.h"
+#include "eswitch.h"
+#include "en_tc.h"
+#include "fs_core.h"
+
+#define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
+
+static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
+ .max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
+ .max_num_groups = 0, /* default num of groups */
+ .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
+};
+
+struct mlx5_esw_psample {
+ struct mlx5e_priv *priv;
+ struct mlx5_flow_table *termtbl;
+ struct mlx5_flow_handle *termtbl_rule;
+ DECLARE_HASHTABLE(hashtbl, 8);
+ struct mutex ht_lock; /* protect hashtbl */
+ DECLARE_HASHTABLE(restore_hashtbl, 8);
+ struct mutex restore_lock; /* protect restore_hashtbl */
+};
+
+struct mlx5_sampler {
+ struct hlist_node hlist;
+ u32 sampler_id;
+ u32 sample_ratio;
+ u32 sample_table_id;
+ u32 default_table_id;
+ int count;
+};
+
+struct mlx5_sample_flow {
+ struct mlx5_sampler *sampler;
+ struct mlx5_sample_restore *restore;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_flow_handle *pre_rule;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5_sample_restore {
+ struct hlist_node hlist;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *rule;
+ u32 obj_id;
+ int count;
+};
+
+static int
+sampler_termtbl_create(struct mlx5_esw_psample *esw_psample)
+{
+ struct mlx5_core_dev *dev = esw_psample->priv->mdev;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_act act = {};
+ int err;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, termination_table)) {
+ mlx5_core_warn(dev, "termination table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ mlx5_core_warn(dev, "failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 1;
+ ft_attr.level = 1;
+ esw_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(esw_psample->termtbl)) {
+ err = PTR_ERR(esw_psample->termtbl);
+ mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err);
+ return err;
+ }
+
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.vport.num = esw->manager_vport;
+ esw_psample->termtbl_rule = mlx5_add_flow_rules(esw_psample->termtbl, NULL, &act, &dest, 1);
+ if (IS_ERR(esw_psample->termtbl_rule)) {
+ err = PTR_ERR(esw_psample->termtbl_rule);
+ mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err);
+ mlx5_destroy_flow_table(esw_psample->termtbl);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+sampler_termtbl_destroy(struct mlx5_esw_psample *esw_psample)
+{
+ mlx5_del_flow_rules(esw_psample->termtbl_rule);
+ mlx5_destroy_flow_table(esw_psample->termtbl);
+}
+
+static int
+sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5_sampler *sampler)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER))
+ return -EOPNOTSUPP;
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
+ return -EOPNOTSUPP;
+
+ obj = MLX5_ADDR_OF(create_sampler_obj_in, in, sampler_object);
+ MLX5_SET(sampler_obj, obj, table_type, FS_FT_FDB);
+ MLX5_SET(sampler_obj, obj, ignore_flow_level, 1);
+ MLX5_SET(sampler_obj, obj, level, 1);
+ MLX5_SET(sampler_obj, obj, sample_ratio, sampler->sample_ratio);
+ MLX5_SET(sampler_obj, obj, sample_table_id, sampler->sample_table_id);
+ MLX5_SET(sampler_obj, obj, default_table_id, sampler->default_table_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ sampler->sampler_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void
+sampler_obj_destroy(struct mlx5_core_dev *mdev, u32 sampler_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static u32
+sampler_hash(u32 sample_ratio, u32 default_table_id)
+{
+ return jhash_2words(sample_ratio, default_table_id, 0);
+}
+
+static int
+sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 default_table_id2)
+{
+ return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2;
+}
+
+static struct mlx5_sampler *
+sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_table_id)
+{
+ struct mlx5_sampler *sampler;
+ u32 hash_key;
+ int err;
+
+ mutex_lock(&esw_psample->ht_lock);
+ hash_key = sampler_hash(sample_ratio, default_table_id);
+ hash_for_each_possible(esw_psample->hashtbl, sampler, hlist, hash_key)
+ if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id,
+ sample_ratio, default_table_id))
+ goto add_ref;
+
+ sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
+ if (!sampler) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ sampler->sample_table_id = esw_psample->termtbl->id;
+ sampler->default_table_id = default_table_id;
+ sampler->sample_ratio = sample_ratio;
+
+ err = sampler_obj_create(esw_psample->priv->mdev, sampler);
+ if (err)
+ goto err_create;
+
+ hash_add(esw_psample->hashtbl, &sampler->hlist, hash_key);
+
+add_ref:
+ sampler->count++;
+ mutex_unlock(&esw_psample->ht_lock);
+ return sampler;
+
+err_create:
+ kfree(sampler);
+err_alloc:
+ mutex_unlock(&esw_psample->ht_lock);
+ return ERR_PTR(err);
+}
+
+static void
+sampler_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sampler *sampler)
+{
+ mutex_lock(&esw_psample->ht_lock);
+ if (--sampler->count == 0) {
+ hash_del(&sampler->hlist);
+ sampler_obj_destroy(esw_psample->priv->mdev, sampler->sampler_id);
+ kfree(sampler);
+ }
+ mutex_unlock(&esw_psample->ht_lock);
+}
+
+static struct mlx5_modify_hdr *
+sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id)
+{
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ CHAIN_TO_REG, obj_id);
+ if (err)
+ goto err_set_regc0;
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_modify_hdr;
+ }
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ return modify_hdr;
+
+err_modify_hdr:
+ dealloc_mod_hdr_actions(&mod_acts);
+err_set_regc0:
+ return ERR_PTR(err);
+}
+
+static struct mlx5_sample_restore *
+sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id)
+{
+ struct mlx5_core_dev *mdev = esw_psample->priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_sample_restore *restore;
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ mutex_lock(&esw_psample->restore_lock);
+ hash_for_each_possible(esw_psample->restore_hashtbl, restore, hlist, obj_id)
+ if (restore->obj_id == obj_id)
+ goto add_ref;
+
+ restore = kzalloc(sizeof(*restore), GFP_KERNEL);
+ if (!restore) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ restore->obj_id = obj_id;
+
+ modify_hdr = sample_metadata_rule_get(mdev, obj_id);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_modify_hdr;
+ }
+ restore->modify_hdr = modify_hdr;
+
+ restore->rule = esw_add_restore_rule(esw, obj_id);
+ if (IS_ERR(restore->rule)) {
+ err = PTR_ERR(restore->rule);
+ goto err_restore;
+ }
+
+ hash_add(esw_psample->restore_hashtbl, &restore->hlist, obj_id);
+add_ref:
+ restore->count++;
+ mutex_unlock(&esw_psample->restore_lock);
+ return restore;
+
+err_restore:
+ mlx5_modify_header_dealloc(mdev, restore->modify_hdr);
+err_modify_hdr:
+ kfree(restore);
+err_alloc:
+ mutex_unlock(&esw_psample->restore_lock);
+ return ERR_PTR(err);
+}
+
+static void
+sample_restore_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sample_restore *restore)
+{
+ mutex_lock(&esw_psample->restore_lock);
+ if (--restore->count == 0)
+ hash_del(&restore->hlist);
+ mutex_unlock(&esw_psample->restore_lock);
+
+ if (!restore->count) {
+ mlx5_del_flow_rules(restore->rule);
+ mlx5_modify_header_dealloc(esw_psample->priv->mdev, restore->modify_hdr);
+ kfree(restore);
+ }
+}
+
+void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj)
+{
+ u32 trunc_size = mapped_obj->sample.trunc_size;
+ struct psample_group psample_group = {};
+ struct psample_metadata md = {};
+
+ md.trunc_size = trunc_size ? min(trunc_size, skb->len) : skb->len;
+ md.in_ifindex = skb->dev->ifindex;
+ psample_group.group_num = mapped_obj->sample.group_id;
+ psample_group.net = &init_net;
+ skb_push(skb, skb->mac_len);
+
+ psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md);
+}
+
+/* For the following typical flow table:
+ *
+ * +-------------------------------+
+ * + original flow table +
+ * +-------------------------------+
+ * + original match +
+ * +-------------------------------+
+ * + sample action + other actions +
+ * +-------------------------------+
+ *
+ * We translate the tc filter with sample action to the following HW model:
+ *
+ * +---------------------+
+ * + original flow table +
+ * +---------------------+
+ * + original match +
+ * +---------------------+
+ * |
+ * v
+ * +------------------------------------------------+
+ * + Flow Sampler Object +
+ * +------------------------------------------------+
+ * + sample ratio +
+ * +------------------------------------------------+
+ * + sample table id | default table id +
+ * +------------------------------------------------+
+ * | |
+ * v v
+ * +-----------------------------+ +----------------------------------------+
+ * + sample table + + default table per <vport, chain, prio> +
+ * +-----------------------------+ +----------------------------------------+
+ * + forward to management vport + + original match +
+ * +-----------------------------+ +----------------------------------------+
+ * + other actions +
+ * +----------------------------------------+
+ */
+struct mlx5_flow_handle *
+mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_vport_tbl_attr per_vport_tbl_attr;
+ struct mlx5_esw_flow_attr *pre_esw_attr;
+ struct mlx5_mapped_obj restore_obj = {};
+ struct mlx5_sample_flow *sample_flow;
+ struct mlx5_sample_attr *sample_attr;
+ struct mlx5_flow_table *default_tbl;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_eswitch *esw;
+ u32 obj_id;
+ int err;
+
+ if (IS_ERR_OR_NULL(esw_psample))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* If slow path flag is set, eg. when the neigh is invalid for encap,
+ * don't offload sample action.
+ */
+ esw = esw_psample->priv->mdev->priv.eswitch;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
+ if (!sample_flow)
+ return ERR_PTR(-ENOMEM);
+ esw_attr->sample->sample_flow = sample_flow;
+
+ /* Allocate default table per vport, chain and prio. Otherwise, there is
+ * only one default table for the same sampler object. Rules with different
+ * prio and chain may overlap. For CT sample action, per vport default
+ * table is needed to resotre the metadata.
+ */
+ per_vport_tbl_attr.chain = attr->chain;
+ per_vport_tbl_attr.prio = attr->prio;
+ per_vport_tbl_attr.vport = esw_attr->in_rep->vport;
+ per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
+ default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr);
+ if (IS_ERR(default_tbl)) {
+ err = PTR_ERR(default_tbl);
+ goto err_default_tbl;
+ }
+
+ /* Perform the original matches on the default table.
+ * Offload all actions except the sample action.
+ */
+ esw_attr->sample->sample_default_tbl = default_tbl;
+ /* When offloading sample and encap action, if there is no valid
+ * neigh data struct, a slow path rule is offloaded first. Source
+ * port metadata match is set at that time. A per vport table is
+ * already allocated. No need to match it again. So clear the source
+ * port metadata match.
+ */
+ mlx5_eswitch_clear_rule_source_port(esw, spec);
+ sample_flow->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ if (IS_ERR(sample_flow->rule)) {
+ err = PTR_ERR(sample_flow->rule);
+ goto err_offload_rule;
+ }
+
+ /* Create sampler object. */
+ sample_flow->sampler = sampler_get(esw_psample, esw_attr->sample->rate, default_tbl->id);
+ if (IS_ERR(sample_flow->sampler)) {
+ err = PTR_ERR(sample_flow->sampler);
+ goto err_sampler;
+ }
+
+ /* Create an id mapping reg_c0 value to sample object. */
+ restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
+ restore_obj.sample.group_id = esw_attr->sample->group_num;
+ restore_obj.sample.rate = esw_attr->sample->rate;
+ restore_obj.sample.trunc_size = esw_attr->sample->trunc_size;
+ err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
+ if (err)
+ goto err_obj_id;
+ esw_attr->sample->restore_obj_id = obj_id;
+
+ /* Create sample restore context. */
+ sample_flow->restore = sample_restore_get(esw_psample, obj_id);
+ if (IS_ERR(sample_flow->restore)) {
+ err = PTR_ERR(sample_flow->restore);
+ goto err_sample_restore;
+ }
+
+ /* Perform the original matches on the original table. Offload the
+ * sample action. The destination is the sampler object.
+ */
+ pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!pre_attr) {
+ err = -ENOMEM;
+ goto err_alloc_flow_attr;
+ }
+ sample_attr = kzalloc(sizeof(*sample_attr), GFP_KERNEL);
+ if (!sample_attr) {
+ err = -ENOMEM;
+ goto err_alloc_sample_attr;
+ }
+ pre_esw_attr = pre_attr->esw_attr;
+ pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
+ pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE;
+ pre_attr->chain = attr->chain;
+ pre_attr->prio = attr->prio;
+ pre_esw_attr->sample = sample_attr;
+ pre_esw_attr->sample->sampler_id = sample_flow->sampler->sampler_id;
+ pre_esw_attr->in_mdev = esw_attr->in_mdev;
+ pre_esw_attr->in_rep = esw_attr->in_rep;
+ sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr);
+ if (IS_ERR(sample_flow->pre_rule)) {
+ err = PTR_ERR(sample_flow->pre_rule);
+ goto err_pre_offload_rule;
+ }
+ sample_flow->pre_attr = pre_attr;
+
+ return sample_flow->rule;
+
+err_pre_offload_rule:
+ kfree(sample_attr);
+err_alloc_sample_attr:
+ kfree(pre_attr);
+err_alloc_flow_attr:
+ sample_restore_put(esw_psample, sample_flow->restore);
+err_sample_restore:
+ mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id);
+err_obj_id:
+ sampler_put(esw_psample, sample_flow->sampler);
+err_sampler:
+ /* For sample offload, rule is added in default_tbl. No need to call
+ * mlx5_esw_chains_put_table()
+ */
+ attr->prio = 0;
+ attr->chain = 0;
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr);
+err_offload_rule:
+ mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
+err_default_tbl:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_esw_sample_unoffload(struct mlx5_esw_psample *esw_psample,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_sample_flow *sample_flow;
+ struct mlx5_vport_tbl_attr tbl_attr;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_eswitch *esw;
+
+ if (IS_ERR_OR_NULL(esw_psample))
+ return;
+
+ /* If slow path flag is set, sample action is not offloaded.
+ * No need to delete sample rule.
+ */
+ esw = esw_psample->priv->mdev->priv.eswitch;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ return;
+ }
+
+ sample_flow = esw_attr->sample->sample_flow;
+ pre_attr = sample_flow->pre_attr;
+ memset(pre_attr, 0, sizeof(*pre_attr));
+ esw = esw_psample->priv->mdev->priv.eswitch;
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, pre_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr);
+
+ sample_restore_put(esw_psample, sample_flow->restore);
+ mapping_remove(esw->offloads.reg_c0_obj_pool, esw_attr->sample->restore_obj_id);
+ sampler_put(esw_psample, sample_flow->sampler);
+ tbl_attr.chain = attr->chain;
+ tbl_attr.prio = attr->prio;
+ tbl_attr.vport = esw_attr->in_rep->vport;
+ tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
+ mlx5_esw_vporttbl_put(esw, &tbl_attr);
+
+ kfree(pre_attr->esw_attr->sample);
+ kfree(pre_attr);
+ kfree(sample_flow);
+}
+
+struct mlx5_esw_psample *
+mlx5_esw_sample_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_esw_psample *esw_psample;
+ int err;
+
+ esw_psample = kzalloc(sizeof(*esw_psample), GFP_KERNEL);
+ if (!esw_psample)
+ return ERR_PTR(-ENOMEM);
+ esw_psample->priv = priv;
+ err = sampler_termtbl_create(esw_psample);
+ if (err)
+ goto err_termtbl;
+
+ mutex_init(&esw_psample->ht_lock);
+ mutex_init(&esw_psample->restore_lock);
+
+ return esw_psample;
+
+err_termtbl:
+ kfree(esw_psample);
+ return ERR_PTR(err);
+}
+
+void
+mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample)
+{
+ if (IS_ERR_OR_NULL(esw_psample))
+ return;
+
+ mutex_destroy(&esw_psample->restore_lock);
+ mutex_destroy(&esw_psample->ht_lock);
+ sampler_termtbl_destroy(esw_psample);
+ kfree(esw_psample);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h
new file mode 100644
index 000000000000..2a3f4be10030
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_SAMPLE_H__
+#define __MLX5_EN_TC_SAMPLE_H__
+
+#include "en.h"
+#include "eswitch.h"
+
+struct mlx5e_priv;
+struct mlx5_flow_attr;
+struct mlx5_esw_psample;
+
+struct mlx5_sample_attr {
+ u32 group_num;
+ u32 rate;
+ u32 trunc_size;
+ u32 restore_obj_id;
+ u32 sampler_id;
+ struct mlx5_flow_table *sample_default_tbl;
+ struct mlx5_sample_flow *sample_flow;
+};
+
+void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
+
+struct mlx5_flow_handle *
+mlx5_esw_sample_offload(struct mlx5_esw_psample *sample_priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+
+void
+mlx5_esw_sample_unoffload(struct mlx5_esw_psample *sample_priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5_esw_psample *
+mlx5_esw_sample_init(struct mlx5e_priv *priv);
+
+void
+mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample);
+
+#endif /* __MLX5_EN_TC_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
new file mode 100644
index 000000000000..9e72118f2e4c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021 Mellanox Technologies.
+
+#include "eswitch.h"
+
+/* This struct is used as a key to the hash table and we need it to be packed
+ * so hash result is consistent
+ */
+struct mlx5_vport_key {
+ u32 chain;
+ u16 prio;
+ u16 vport;
+ u16 vhca_id;
+ const struct esw_vport_tbl_namespace *vport_ns;
+} __packed;
+
+struct mlx5_vport_table {
+ struct hlist_node hlist;
+ struct mlx5_flow_table *fdb;
+ u32 num_rules;
+ struct mlx5_vport_key key;
+};
+
+static struct mlx5_flow_table *
+esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
+ const struct esw_vport_tbl_namespace *vport_ns)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb;
+
+ if (vport_ns->max_num_groups)
+ ft_attr.autogroup.max_num_groups = vport_ns->max_num_groups;
+ else
+ ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
+ ft_attr.max_fte = vport_ns->max_fte;
+ ft_attr.prio = FDB_PER_VPORT;
+ ft_attr.flags = vport_ns->flags;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
+ PTR_ERR(fdb));
+ }
+
+ return fdb;
+}
+
+static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
+ struct mlx5_vport_tbl_attr *attr,
+ struct mlx5_vport_key *key)
+{
+ key->vport = attr->vport;
+ key->chain = attr->chain;
+ key->prio = attr->prio;
+ key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ key->vport_ns = attr->vport_ns;
+ return jhash(key, sizeof(*key), 0);
+}
+
+/* caller must hold vports.lock */
+static struct mlx5_vport_table *
+esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
+{
+ struct mlx5_vport_table *e;
+
+ hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
+ if (!memcmp(&e->key, skey, sizeof(*skey)))
+ return e;
+
+ return NULL;
+}
+
+struct mlx5_flow_table *
+mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key skey;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &skey);
+ e = esw_vport_tbl_lookup(esw, &skey, hkey);
+ if (e) {
+ e->num_rules++;
+ goto out;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ fdb = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB namespace\n");
+ fdb = ERR_PTR(-ENOENT);
+ goto err_ns;
+ }
+
+ fdb = esw_vport_tbl_create(esw, ns, attr->vport_ns);
+ if (IS_ERR(fdb))
+ goto err_ns;
+
+ e->fdb = fdb;
+ e->num_rules = 1;
+ e->key = skey;
+ hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return e->fdb;
+
+err_ns:
+ kfree(e);
+err_alloc:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return fdb;
+}
+
+void
+mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
+{
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key key;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &key);
+ e = esw_vport_tbl_lookup(esw, &key, hkey);
+ if (!e || --e->num_rules)
+ goto out;
+
+ hash_del(&e->hlist);
+ mlx5_destroy_flow_table(e->fdb);
+ kfree(e);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index aba17835465b..570f2280823c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "esw/acl/lgcy.h"
+#include "esw/legacy.h"
#include "mlx5_core.h"
#include "lib/eq.h"
#include "eswitch.h"
@@ -61,9 +62,6 @@ struct vport_addr {
bool mc_promisc;
};
-static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
-static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
-
static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
{
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -90,20 +88,17 @@ struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
- u16 idx;
+ struct mlx5_vport *vport;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return ERR_PTR(-EPERM);
- idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
-
- if (idx > esw->total_vports - 1) {
- esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
- vport_num, idx);
+ vport = xa_load(&esw->vports, vport_num);
+ if (!vport) {
+ esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
return ERR_PTR(-EINVAL);
}
-
- return &esw->vports[idx];
+ return vport;
}
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
@@ -278,224 +273,6 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
-enum {
- LEGACY_VEPA_PRIO = 0,
- LEGACY_FDB_PRIO,
-};
-
-static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *fdb;
- int err;
-
- root_ns = mlx5_get_fdb_sub_ns(dev, 0);
- if (!root_ns) {
- esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -EOPNOTSUPP;
- }
-
- /* num FTE 2, num FG 2 */
- ft_attr.prio = LEGACY_VEPA_PRIO;
- ft_attr.max_fte = 2;
- ft_attr.autogroup.max_num_groups = 2;
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
- return err;
- }
- esw->fdb_table.legacy.vepa_fdb = fdb;
-
- return 0;
-}
-
-static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_group *g;
- void *match_criteria;
- int table_size;
- u32 *flow_group_in;
- u8 *dmac;
- int err = 0;
-
- esw_debug(dev, "Create FDB log_max_size(%d)\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-
- root_ns = mlx5_get_fdb_sub_ns(dev, 0);
- if (!root_ns) {
- esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -EOPNOTSUPP;
- }
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- ft_attr.max_fte = table_size;
- ft_attr.prio = LEGACY_FDB_PRIO;
- fdb = mlx5_create_flow_table(root_ns, &ft_attr);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create FDB Table err %d\n", err);
- goto out;
- }
- esw->fdb_table.legacy.fdb = fdb;
-
- /* Addresses group : Full match unicast/multicast addresses */
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- /* Preserve 2 entries for allmulti and promisc rules*/
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
- eth_broadcast_addr(dmac);
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create flow group err(%d)\n", err);
- goto out;
- }
- esw->fdb_table.legacy.addr_grp = g;
-
- /* Allmulti group : One rule that forwards any mcast traffic */
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
- eth_zero_addr(dmac);
- dmac[0] = 0x01;
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
- goto out;
- }
- esw->fdb_table.legacy.allmulti_grp = g;
-
- /* Promiscuous group :
- * One rule that forward all unmatched traffic from previous groups
- */
- eth_zero_addr(dmac);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
- goto out;
- }
- esw->fdb_table.legacy.promisc_grp = g;
-
-out:
- if (err)
- esw_destroy_legacy_fdb_table(esw);
-
- kvfree(flow_group_in);
- return err;
-}
-
-static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
-{
- esw_debug(esw->dev, "Destroy VEPA Table\n");
- if (!esw->fdb_table.legacy.vepa_fdb)
- return;
-
- mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
- esw->fdb_table.legacy.vepa_fdb = NULL;
-}
-
-static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
-{
- esw_debug(esw->dev, "Destroy FDB Table\n");
- if (!esw->fdb_table.legacy.fdb)
- return;
-
- if (esw->fdb_table.legacy.promisc_grp)
- mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
- if (esw->fdb_table.legacy.allmulti_grp)
- mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
- if (esw->fdb_table.legacy.addr_grp)
- mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
- mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
-
- esw->fdb_table.legacy.fdb = NULL;
- esw->fdb_table.legacy.addr_grp = NULL;
- esw->fdb_table.legacy.allmulti_grp = NULL;
- esw->fdb_table.legacy.promisc_grp = NULL;
-}
-
-static int esw_create_legacy_table(struct mlx5_eswitch *esw)
-{
- int err;
-
- memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
-
- err = esw_create_legacy_vepa_table(esw);
- if (err)
- return err;
-
- err = esw_create_legacy_fdb_table(esw);
- if (err)
- esw_destroy_legacy_vepa_table(esw);
-
- return err;
-}
-
-static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
-{
- esw_cleanup_vepa_rules(esw);
- esw_destroy_legacy_fdb_table(esw);
- esw_destroy_legacy_vepa_table(esw);
-}
-
-#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
- MLX5_VPORT_MC_ADDR_CHANGE | \
- MLX5_VPORT_PROMISC_CHANGE)
-
-static int esw_legacy_enable(struct mlx5_eswitch *esw)
-{
- struct mlx5_vport *vport;
- int ret, i;
-
- ret = esw_create_legacy_table(esw);
- if (ret)
- return ret;
-
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
-
- ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
- if (ret)
- esw_destroy_legacy_table(esw);
- return ret;
-}
-
-static void esw_legacy_disable(struct mlx5_eswitch *esw)
-{
- struct esw_mc_addr *mc_promisc;
-
- mlx5_eswitch_disable_pf_vf_vports(esw);
-
- mc_promisc = &esw->mc_promisc;
- if (mc_promisc->uplink_rule)
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
- esw_destroy_legacy_table(esw);
-}
-
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -565,9 +342,10 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
{
u8 *mac = vaddr->node.addr;
struct mlx5_vport *vport;
- u16 i, vport_num;
+ unsigned long i;
+ u16 vport_num;
- mlx5_esw_for_all_vports(esw, i, vport) {
+ mlx5_esw_for_each_vport(esw, i, vport) {
struct hlist_head *vport_hash = vport->mc_list;
struct vport_addr *iter_vaddr =
l2addr_hash_find(vport_hash,
@@ -917,7 +695,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
(promisc_all || promisc_mc));
}
-static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
+void esw_vport_change_handle_locked(struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = vport->dev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1141,6 +919,8 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
if (!vport->qos.enabled)
return -EOPNOTSUPP;
@@ -1166,56 +946,20 @@ static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
((u8 *)node_guid)[0] = mac[5];
}
-static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int ret;
-
- /* Only non manager vports need ACL in legacy mode */
- if (mlx5_esw_is_manager_vport(esw, vport->vport))
- return 0;
-
- ret = esw_acl_ingress_lgcy_setup(esw, vport);
- if (ret)
- goto ingress_err;
-
- ret = esw_acl_egress_lgcy_setup(esw, vport);
- if (ret)
- goto egress_err;
-
- return 0;
-
-egress_err:
- esw_acl_ingress_lgcy_cleanup(esw, vport);
-ingress_err:
- return ret;
-}
-
static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (esw->mode == MLX5_ESWITCH_LEGACY)
- return esw_vport_create_legacy_acl_tables(esw, vport);
+ return esw_legacy_vport_acl_setup(esw, vport);
else
return esw_vport_create_offloads_acl_tables(esw, vport);
}
-static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-
-{
- if (mlx5_esw_is_manager_vport(esw, vport->vport))
- return;
-
- esw_acl_egress_lgcy_cleanup(esw, vport);
- esw_acl_ingress_lgcy_cleanup(esw, vport);
-}
-
static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_vport_destroy_legacy_acl_tables(esw, vport);
+ esw_legacy_vport_acl_cleanup(esw, vport);
else
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
@@ -1231,7 +975,7 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
return err;
/* Attach vport to the eswitch rate limiter */
- esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
+ esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
@@ -1279,6 +1023,8 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
int ret;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
@@ -1326,6 +1072,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
mutex_lock(&esw->state_lock);
if (!vport->enabled)
@@ -1382,15 +1130,9 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
- u16 max_sf_vports;
u32 *out;
int err;
- max_sf_vports = mlx5_sf_max_functions(dev);
- /* Device interface is array of 64-bits */
- if (max_sf_vports)
- outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64);
-
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return ERR_PTR(-ENOMEM);
@@ -1431,7 +1173,7 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int i;
+ unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->qos, 0, sizeof(vport->qos));
@@ -1441,8 +1183,6 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
}
/* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
@@ -1471,20 +1211,25 @@ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
{
- int i;
+ struct mlx5_vport *vport;
+ unsigned long i;
- mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
- mlx5_eswitch_unload_vport(esw, i);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+ if (!vport->enabled)
+ continue;
+ mlx5_eswitch_unload_vport(esw, vport->vport);
+ }
}
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
+ struct mlx5_vport *vport;
+ unsigned long i;
int err;
- int i;
- mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
- err = mlx5_eswitch_load_vport(esw, i, enabled_events);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+ err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
if (err)
goto vf_err;
}
@@ -1492,7 +1237,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
return 0;
vf_err:
- mlx5_eswitch_unload_vf_vports(esw, i - 1);
+ mlx5_eswitch_unload_vf_vports(esw, num_vfs);
return err;
}
@@ -1625,6 +1370,47 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
blocking_notifier_call_chain(&esw->n_head, 0, &info);
}
+static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int total_vports;
+ int err;
+
+ total_vports = mlx5_eswitch_get_total_vports(dev);
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = mlx5_fs_egress_acls_init(dev, total_vports);
+ if (err)
+ return err;
+ } else {
+ esw_warn(dev, "engress ACL is not supported by FW\n");
+ }
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = mlx5_fs_ingress_acls_init(dev, total_vports);
+ if (err)
+ goto err;
+ } else {
+ esw_warn(dev, "ingress ACL is not supported by FW\n");
+ }
+ return 0;
+
+err:
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ mlx5_fs_egress_acls_cleanup(dev);
+ return err;
+}
+
+static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ mlx5_fs_ingress_acls_cleanup(dev);
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ mlx5_fs_egress_acls_cleanup(dev);
+}
+
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
@@ -1653,14 +1439,12 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
return -EOPNOTSUPP;
}
- if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
-
- if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "engress ACL is not supported by FW\n");
-
mlx5_eswitch_get_devlink_param(esw);
+ err = mlx5_esw_acls_ns_init(esw);
+ if (err)
+ return err;
+
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
esw_create_tsar(esw);
@@ -1696,6 +1480,7 @@ abort:
mlx5_rescan_drivers(esw->dev);
esw_destroy_tsar(esw);
+ mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1711,10 +1496,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
int ret;
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return 0;
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE) {
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
} else {
@@ -1726,7 +1511,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!ret)
esw->esw_funcs.num_vfs = num_vfs;
}
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return ret;
}
@@ -1764,6 +1549,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_rescan_drivers(esw->dev);
esw_destroy_tsar(esw);
+ mlx5_esw_acls_ns_cleanup(esw);
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
@@ -1771,33 +1557,170 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
{
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return;
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw, clear_vf);
esw->esw_funcs.num_vfs = 0;
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
+}
+
+static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
+{
+ u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
+ MLX5_SET(query_hca_cap_in, in, other_function, true);
+ return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
+}
+
+int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
+
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ if (!mlx5_core_is_ecpf(dev)) {
+ *max_sfs = 0;
+ return 0;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
+ *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
+static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
+ int index, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+ int err;
+
+ vport = kzalloc(sizeof(*vport), GFP_KERNEL);
+ if (!vport)
+ return -ENOMEM;
+
+ vport->dev = esw->dev;
+ vport->vport = vport_num;
+ vport->index = index;
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
+ err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
+ if (err)
+ goto insert_err;
+
+ esw->total_vports++;
+ return 0;
+
+insert_err:
+ kfree(vport);
+ return err;
+}
+
+static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ xa_erase(&esw->vports, vport->vport);
+ kfree(vport);
+}
+
+static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vport(esw, i, vport)
+ mlx5_esw_vport_free(esw, vport);
+ xa_destroy(&esw->vports);
+}
+
+static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ u16 max_host_pf_sfs;
+ u16 base_sf_num;
+ int idx = 0;
+ int err;
+ int i;
+
+ xa_init(&esw->vports);
+
+ err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
+ if (err)
+ goto err;
+ if (esw->first_host_vport == MLX5_VPORT_PF)
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+
+ for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+ }
+ base_sf_num = mlx5_sf_start_function_id(dev);
+ for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
+ idx++;
+ }
+
+ err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
+ if (err)
+ goto err;
+ for (i = 0; i < max_host_pf_sfs; i++) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
+ idx++;
+ }
+
+ if (mlx5_ecpf_vport_exists(dev)) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
+ if (err)
+ goto err;
+ idx++;
+ }
+ err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
+ if (err)
+ goto err;
+ return 0;
+
+err:
+ mlx5_esw_vports_cleanup(esw);
+ return err;
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int total_vports;
- int err, i;
+ int err;
if (!MLX5_VPORT_MANAGER(dev))
return 0;
- total_vports = mlx5_eswitch_get_total_vports(dev);
-
- esw_info(dev,
- "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
- total_vports,
- MLX5_MAX_UC_PER_VPORT(dev),
- MLX5_MAX_MC_PER_VPORT(dev));
-
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw)
return -ENOMEM;
@@ -1812,18 +1735,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
- esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
- GFP_KERNEL);
- if (!esw->vports) {
- err = -ENOMEM;
+ err = mlx5_esw_vports_init(esw);
+ if (err)
goto abort;
- }
-
- esw->total_vports = total_vports;
err = esw_offloads_init_reps(esw);
if (err)
- goto abort;
+ goto reps_err;
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
@@ -1834,15 +1752,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
ida_init(&esw->offloads.vport_metadata_ida);
xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock);
- mutex_init(&esw->mode_lock);
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
- vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
- vport->dev = dev;
- INIT_WORK(&vport->vport_change_handler,
- esw_vport_change_handler);
- }
+ init_rwsem(&esw->mode_lock);
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1850,12 +1760,19 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
+
+ esw_info(dev,
+ "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
+ esw->total_vports,
+ MLX5_MAX_UC_PER_VPORT(dev),
+ MLX5_MAX_MC_PER_VPORT(dev));
return 0;
+
+reps_err:
+ mlx5_esw_vports_cleanup(esw);
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
- esw_offloads_cleanup_reps(esw);
- kfree(esw->vports);
kfree(esw);
return err;
}
@@ -1869,8 +1786,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
- esw_offloads_cleanup_reps(esw);
- mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
xa_destroy(&esw->offloads.vhca_map);
@@ -1878,7 +1793,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
- kfree(esw->vports);
+ esw_offloads_cleanup_reps(esw);
+ mlx5_esw_vports_cleanup(esw);
kfree(esw);
}
@@ -1937,8 +1853,29 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return false;
+
+ return xa_get_mark(&esw->vports, vport_num, mark);
+}
+
+bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
+}
+
+bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
+}
+
static bool
-is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
+is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
@@ -2023,7 +1960,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int other_vport = 1;
int err = 0;
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
@@ -2034,6 +1971,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
vport = 0;
}
mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
@@ -2067,8 +2008,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
- ivi->min_tx_rate = evport->info.min_rate;
- ivi->max_tx_rate = evport->info.max_rate;
+ ivi->min_tx_rate = evport->qos.min_rate;
+ ivi->max_tx_rate = evport->qos.max_rate;
mutex_unlock(&esw->state_lock);
return 0;
@@ -2101,196 +2042,17 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
return err;
}
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
- u16 vport, u16 vlan, u8 qos)
-{
- u8 set_flags = 0;
- int err;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
-
- if (vlan || qos)
- set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
-
- mutex_lock(&esw->state_lock);
- err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
- mutex_unlock(&esw->state_lock);
-
- return err;
-}
-
-int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
- u16 vport, bool spoofchk)
-{
- struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
- bool pschk;
- int err = 0;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
- if (IS_ERR(evport))
- return PTR_ERR(evport);
-
- mutex_lock(&esw->state_lock);
- pschk = evport->info.spoofchk;
- evport->info.spoofchk = spoofchk;
- if (pschk && !is_valid_ether_addr(evport->info.mac))
- mlx5_core_warn(esw->dev,
- "Spoofchk in set while MAC is invalid, vport(%d)\n",
- evport->vport);
- if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
- err = esw_acl_ingress_lgcy_setup(esw, evport);
- if (err)
- evport->info.spoofchk = pschk;
- mutex_unlock(&esw->state_lock);
-
- return err;
-}
-
-static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.legacy.vepa_uplink_rule)
- mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
-
- if (esw->fdb_table.legacy.vepa_star_rule)
- mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
-
- esw->fdb_table.legacy.vepa_uplink_rule = NULL;
- esw->fdb_table.legacy.vepa_star_rule = NULL;
-}
-
-static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
- u8 setting)
-{
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *flow_rule;
- struct mlx5_flow_spec *spec;
- int err = 0;
- void *misc;
-
- if (!setting) {
- esw_cleanup_vepa_rules(esw);
- return 0;
- }
-
- if (esw->fdb_table.legacy.vepa_uplink_rule)
- return 0;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- /* Uplink rule forward uplink traffic to FDB */
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
-
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
-
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = esw->fdb_table.legacy.fdb;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
- &flow_act, &dest, 1);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- goto out;
- } else {
- esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
- }
-
- /* Star rule to forward all traffic to uplink vport */
- memset(&dest, 0, sizeof(dest));
- dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport.num = MLX5_VPORT_UPLINK;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
- &flow_act, &dest, 1);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- goto out;
- } else {
- esw->fdb_table.legacy.vepa_star_rule = flow_rule;
- }
-
-out:
- kvfree(spec);
- if (err)
- esw_cleanup_vepa_rules(esw);
- return err;
-}
-
-int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
-{
- int err = 0;
-
- if (!esw)
- return -EOPNOTSUPP;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
-
- mutex_lock(&esw->state_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- err = _mlx5_eswitch_set_vepa_locked(esw, setting);
-
-out:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
-{
- if (!esw)
- return -EOPNOTSUPP;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
-
- if (esw->mode != MLX5_ESWITCH_LEGACY)
- return -EOPNOTSUPP;
-
- *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
- return 0;
-}
-
-int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
- u16 vport, bool setting)
-{
- struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
- if (IS_ERR(evport))
- return PTR_ERR(evport);
-
- mutex_lock(&esw->state_lock);
- evport->info.trusted = setting;
- if (evport->enabled)
- esw_vport_change_handle_locked(evport);
- mutex_unlock(&esw->state_lock);
-
- return 0;
-}
-
static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
struct mlx5_vport *evport;
u32 max_guarantee = 0;
- int i;
+ unsigned long i;
- mlx5_esw_for_all_vports(esw, i, evport) {
- if (!evport->enabled || evport->info.min_rate < max_guarantee)
+ mlx5_esw_for_each_vport(esw, i, evport) {
+ if (!evport->enabled || evport->qos.min_rate < max_guarantee)
continue;
- max_guarantee = evport->info.min_rate;
+ max_guarantee = evport->qos.min_rate;
}
if (max_guarantee)
@@ -2305,15 +2067,15 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
+ unsigned long i;
u32 bw_share;
int err;
- int i;
- mlx5_esw_for_all_vports(esw, i, evport) {
+ mlx5_esw_for_each_vport(esw, i, evport) {
if (!evport->enabled)
continue;
- vport_min_rate = evport->info.min_rate;
- vport_max_rate = evport->info.max_rate;
+ vport_min_rate = evport->qos.min_rate;
+ vport_max_rate = evport->qos.max_rate;
bw_share = 0;
if (divider)
@@ -2345,7 +2107,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
bool max_rate_supported;
int err = 0;
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
@@ -2360,68 +2122,24 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
mutex_lock(&esw->state_lock);
- if (min_rate == evport->info.min_rate)
+ if (min_rate == evport->qos.min_rate)
goto set_max_rate;
- previous_min_rate = evport->info.min_rate;
- evport->info.min_rate = min_rate;
+ previous_min_rate = evport->qos.min_rate;
+ evport->qos.min_rate = min_rate;
err = normalize_vports_min_rate(esw);
if (err) {
- evport->info.min_rate = previous_min_rate;
+ evport->qos.min_rate = previous_min_rate;
goto unlock;
}
set_max_rate:
- if (max_rate == evport->info.max_rate)
+ if (max_rate == evport->qos.max_rate)
goto unlock;
err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
if (!err)
- evport->info.max_rate = max_rate;
-
-unlock:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
- struct mlx5_vport *vport,
- struct mlx5_vport_drop_stats *stats)
-{
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- u64 rx_discard_vport_down, tx_discard_vport_down;
- u64 bytes = 0;
- int err = 0;
-
- if (esw->mode != MLX5_ESWITCH_LEGACY)
- return 0;
-
- mutex_lock(&esw->state_lock);
- if (!vport->enabled)
- goto unlock;
-
- if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
- mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
- &stats->rx_dropped, &bytes);
-
- if (vport->ingress.legacy.drop_counter)
- mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
- &stats->tx_dropped, &bytes);
-
- if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
- !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
- goto unlock;
-
- err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
- &rx_discard_vport_down,
- &tx_discard_vport_down);
- if (err)
- goto unlock;
-
- if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
- stats->rx_dropped += rx_discard_vport_down;
- if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
- stats->tx_dropped += tx_discard_vport_down;
+ evport->qos.max_rate = max_rate;
unlock:
mutex_unlock(&esw->state_lock);
@@ -2495,7 +2213,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
- err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
if (err)
goto free_out;
vf_stats->rx_dropped = stats.rx_dropped;
@@ -2510,7 +2228,7 @@ u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+ return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -2520,7 +2238,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
struct mlx5_eswitch *esw;
esw = dev->priv.eswitch;
- return ESW_ALLOWED(esw) ? esw->offloads.encap :
+ return mlx5_esw_allowed(esw) ? esw->offloads.encap :
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
@@ -2552,3 +2270,110 @@ void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifie
{
blocking_notifier_chain_unregister(&esw->n_head, nb);
}
+
+/**
+ * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
+ * @mdev: mlx5 core device.
+ *
+ * Should be called by esw resources callers.
+ *
+ * Return: true on success or false.
+ */
+bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ /* e.g. VF doesn't have eswitch so nothing to do */
+ if (!mlx5_esw_allowed(esw))
+ return true;
+
+ if (down_read_trylock(&esw->mode_lock) != 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * mlx5_esw_release() - Release a read lock on esw mode lock.
+ * @mdev: mlx5 core device.
+ */
+void mlx5_esw_release(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ if (mlx5_esw_allowed(esw))
+ up_read(&esw->mode_lock);
+}
+
+/**
+ * mlx5_esw_get() - Increase esw user count.
+ * @mdev: mlx5 core device.
+ */
+void mlx5_esw_get(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ if (mlx5_esw_allowed(esw))
+ atomic64_inc(&esw->user_count);
+}
+
+/**
+ * mlx5_esw_put() - Decrease esw user count.
+ * @mdev: mlx5 core device.
+ */
+void mlx5_esw_put(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ if (mlx5_esw_allowed(esw))
+ atomic64_dec_if_positive(&esw->user_count);
+}
+
+/**
+ * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
+ * @esw: eswitch device.
+ *
+ * Should be called by esw mode change routine.
+ *
+ * Return:
+ * * 0 - esw mode if successfully locked and refcount is 0.
+ * * -EBUSY - refcount is not 0.
+ * * -EINVAL - In the middle of switching mode or lock is already held.
+ */
+int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
+{
+ if (down_write_trylock(&esw->mode_lock) == 0)
+ return -EINVAL;
+
+ if (atomic64_read(&esw->user_count) > 0) {
+ up_write(&esw->mode_lock);
+ return -EBUSY;
+ }
+
+ return esw->mode;
+}
+
+/**
+ * mlx5_esw_unlock() - Release write lock on esw mode lock
+ * @esw: eswitch device.
+ */
+void mlx5_esw_unlock(struct mlx5_eswitch *esw)
+{
+ up_write(&esw->mode_lock);
+}
+
+/**
+ * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
+ *
+ * @dev: Pointer to core device
+ *
+ * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
+ */
+u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw;
+
+ esw = dev->priv.eswitch;
+ return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index fdf5c8c05c1b..64ccb2bc0b58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -46,6 +46,24 @@
#include "lib/fs_chains.h"
#include "sf/sf.h"
#include "en/tc_ct.h"
+#include "esw/sample.h"
+
+enum mlx5_mapped_obj_type {
+ MLX5_MAPPED_OBJ_CHAIN,
+ MLX5_MAPPED_OBJ_SAMPLE,
+};
+
+struct mlx5_mapped_obj {
+ enum mlx5_mapped_obj_type type;
+ union {
+ u32 chain;
+ struct {
+ u32 group_id;
+ u32 rate;
+ u32 trunc_size;
+ } sample;
+ };
+};
#ifdef CONFIG_MLX5_ESWITCH
@@ -118,13 +136,11 @@ struct mlx5_vport_drop_stats {
struct mlx5_vport_info {
u8 mac[ETH_ALEN];
u16 vlan;
- u8 qos;
u64 node_guid;
int link_state;
- u32 min_rate;
- u32 max_rate;
- bool spoofchk;
- bool trusted;
+ u8 qos;
+ u8 spoofchk: 1;
+ u8 trusted: 1;
};
/* Vport context events */
@@ -136,7 +152,6 @@ enum mlx5_eswitch_vport_event {
struct mlx5_vport {
struct mlx5_core_dev *dev;
- int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
struct mlx5_flow_handle *promisc_rule;
@@ -154,10 +169,14 @@ struct mlx5_vport {
bool enabled;
u32 esw_tsar_ix;
u32 bw_share;
+ u32 min_rate;
+ u32 max_rate;
} qos;
+ u16 vport;
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
+ int index;
struct devlink_port *dl_port;
};
@@ -206,10 +225,11 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads_restore;
struct mlx5_flow_group *restore_group;
struct mlx5_modify_hdr *restore_copy_hdr_id;
+ struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
- struct mlx5_eswitch_rep *vport_reps;
+ struct xarray vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */
@@ -259,7 +279,7 @@ struct mlx5_eswitch {
struct esw_mc_addr mc_promisc;
/* end of legacy */
struct workqueue_struct *work_queue;
- struct mlx5_vport *vports;
+ struct xarray vports;
u32 flags;
int total_vports;
int enabled_vports;
@@ -271,7 +291,8 @@ struct mlx5_eswitch {
/* Protects eswitch mode change that occurs via one or more
* user commands, i.e. sriov state change, devlink commands.
*/
- struct mutex mode_lock;
+ struct rw_semaphore mode_lock;
+ atomic64_t user_count;
struct {
bool enabled;
@@ -294,6 +315,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
@@ -356,6 +379,9 @@ void
mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
struct mlx5_termtbl_handle *tt);
+void
+mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -403,6 +429,7 @@ enum {
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
+ MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
};
struct mlx5_esw_flow_attr {
@@ -427,6 +454,7 @@ struct mlx5_esw_flow_attr {
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_rx_tun_attr *rx_tun_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
+ struct mlx5_sample_attr *sample;
};
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
@@ -494,6 +522,11 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
#define esw_debug(dev, format, ...) \
mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
+static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
+{
+ return esw && MLX5_ESWITCH_MANAGER(esw->dev);
+}
+
/* The returned number is valid only when the dev is eswitch manager. */
static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
{
@@ -513,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
-static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
-{
- /* PF and VF vports indices start from 0 to max_vfs */
- return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
-}
-
-static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
-{
- return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
-}
-
-static inline int
-mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num - mlx5_sf_start_function_id(esw->dev) +
- MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
-}
-
-static inline u16
-mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
-{
- return mlx5_sf_start_function_id(esw->dev) + idx -
- (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
-}
-
-static inline bool
-mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
-{
- return mlx5_sf_supported(esw->dev) &&
- vport_num >= mlx5_sf_start_function_id(esw->dev) &&
- (vport_num < (mlx5_sf_start_function_id(esw->dev) +
- mlx5_sf_max_functions(esw->dev)));
-}
-
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev);
}
-static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
-{
- /* Uplink always locate at the last element of the array.*/
- return esw->total_vports - 1;
-}
-
-static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
-{
- return esw->total_vports - 2;
-}
-
-static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
- u16 vport_num)
-{
- if (vport_num == MLX5_VPORT_ECPF) {
- if (!mlx5_ecpf_vport_exists(esw->dev))
- esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
- return mlx5_eswitch_ecpf_idx(esw);
- }
-
- if (vport_num == MLX5_VPORT_UPLINK)
- return mlx5_eswitch_uplink_idx(esw);
-
- if (mlx5_esw_is_sf_vport(esw, vport_num))
- return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
-
- /* PF and VF vports start from 0 to max_vfs */
- return vport_num;
-}
-
-static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
- int index)
-{
- if (index == mlx5_eswitch_ecpf_idx(esw) &&
- mlx5_ecpf_vport_exists(esw->dev))
- return MLX5_VPORT_ECPF;
-
- if (index == mlx5_eswitch_uplink_idx(esw))
- return MLX5_VPORT_UPLINK;
-
- /* SF vports indices are after VFs and before ECPF */
- if (mlx5_sf_supported(esw->dev) &&
- index > mlx5_core_max_vfs(esw->dev))
- return mlx5_esw_sf_vport_index_to_num(esw, index);
-
- /* PF and VF vports start from 0 to max_vfs */
- return index;
-}
-
static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
u16 vport_num)
@@ -617,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
-/* The vport getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
+/* Each mark identifies eswitch vport type.
+ * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
+ * a single mark.
+ * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
+ * MLX5_ESW_VPT_SF identifies SF vport.
*/
-#define mlx5_esw_for_all_vports(esw, i, vport) \
- for ((i) = MLX5_VPORT_PF; \
- (vport) = &(esw)->vports[i], \
- (i) < (esw)->total_vports; (i)++)
-
-#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
- for ((i) = (esw)->total_vports - 1; \
- (vport) = &(esw)->vports[i], \
- (i) >= MLX5_VPORT_PF; (i)--)
-
-#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (vport) = &(esw)->vports[(i)], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
- for ((i) = (nvfs); \
- (vport) = &(esw)->vports[(i)], \
- (i) >= MLX5_VPORT_FIRST_VF; (i)--)
-
-/* The rep getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
+#define MLX5_ESW_VPT_HOST_FN XA_MARK_0
+#define MLX5_ESW_VPT_VF XA_MARK_1
+#define MLX5_ESW_VPT_SF XA_MARK_2
+
+/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
+ * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
*/
-#define mlx5_esw_for_all_reps(esw, i, rep) \
- for ((i) = MLX5_VPORT_PF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) < (esw)->total_vports; (i)++)
-
-#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
- for ((i) = (nvfs); \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) >= MLX5_VPORT_FIRST_VF; (i)--)
-
-#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
- for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
-
-#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
- for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
-
-/* Includes host PF (vport 0) if it's not esw manager. */
-#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
- for ((i) = (esw)->first_host_vport; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
- for ((i) = (nvfs); \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) >= (esw)->first_host_vport; (i)--)
-
-#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
- for ((vport) = (esw)->first_host_vport; \
- (vport) <= (nvfs); (vport)++)
-
-#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
- for ((vport) = (nvfs); \
- (vport) >= (esw)->first_host_vport; (vport)--)
-
-#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
- for ((i) = mlx5_esw_sf_start_idx(esw); \
- (rep) = &(esw)->offloads.vport_reps[(i)], \
- (i) < mlx5_esw_sf_end_idx(esw); (i++))
+
+#define mlx5_esw_for_each_vport(esw, index, vport) \
+ xa_for_each(&((esw)->vports), index, vport)
+
+#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
+ for (index = 0, entry = xa_find(xa, &index, last, filter); \
+ entry; entry = xa_find_after(xa, &index, last, filter))
+
+#define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
+ mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
+
+#define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
+ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
+
+#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
+ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
-bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
+bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
+bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
@@ -712,13 +622,26 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
-void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
+struct esw_vport_tbl_namespace {
+ int max_fte;
+ int max_num_groups;
+ u32 flags;
+};
+
+struct mlx5_vport_tbl_attr {
+ u16 chain;
+ u16 prio;
+ u16 vport;
+ const struct esw_vport_tbl_namespace *vport_ns;
+};
+
+struct mlx5_flow_table *
+mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
+void
+mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
-u32
-esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
@@ -739,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum);
+ u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum);
+ u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
@@ -761,6 +685,18 @@ struct mlx5_esw_event_info {
int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
+
+bool mlx5_esw_hold(struct mlx5_core_dev *dev);
+void mlx5_esw_release(struct mlx5_core_dev *dev);
+void mlx5_esw_get(struct mlx5_core_dev *dev);
+void mlx5_esw_put(struct mlx5_core_dev *dev);
+int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
+void mlx5_esw_unlock(struct mlx5_eswitch *esw);
+
+void esw_vport_change_handle_locked(struct mlx5_vport *vport);
+
+bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -781,6 +717,13 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline unsigned int
+mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ return vport_num;
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d4a2f8d1ee9f..db1e74280e57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -40,7 +40,6 @@
#include "eswitch.h"
#include "esw/indir_table.h"
#include "esw/acl/ofld.h"
-#include "esw/indir_table.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -48,6 +47,17 @@
#include "lib/eq.h"
#include "lib/fs_chains.h"
#include "en_tc.h"
+#include "en/mapping.h"
+
+#define mlx5_esw_for_each_rep(esw, i, rep) \
+ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+
+#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
+ xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
+
+#define mlx5_esw_for_each_vf_rep(esw, index, rep) \
+ mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
+ rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
@@ -55,192 +65,19 @@
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
-/* Per vport tables */
-
-#define MLX5_ESW_VPORT_TABLE_SIZE 128
-
-/* This struct is used as a key to the hash table and we need it to be packed
- * so hash result is consistent
- */
-struct mlx5_vport_key {
- u32 chain;
- u16 prio;
- u16 vport;
- u16 vhca_id;
-} __packed;
-
-struct mlx5_vport_tbl_attr {
- u16 chain;
- u16 prio;
- u16 vport;
-};
-
-struct mlx5_vport_table {
- struct hlist_node hlist;
- struct mlx5_flow_table *fdb;
- u32 num_rules;
- struct mlx5_vport_key key;
-};
-
+#define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
-static struct mlx5_flow_table *
-esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_table *fdb;
-
- ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
- ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
- ft_attr.prio = FDB_PER_VPORT;
- fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
- PTR_ERR(fdb));
- }
-
- return fdb;
-}
-
-static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
- struct mlx5_vport_tbl_attr *attr,
- struct mlx5_vport_key *key)
-{
- key->vport = attr->vport;
- key->chain = attr->chain;
- key->prio = attr->prio;
- key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
- return jhash(key, sizeof(*key), 0);
-}
-
-/* caller must hold vports.lock */
-static struct mlx5_vport_table *
-esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
-{
- struct mlx5_vport_table *e;
-
- hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
- if (!memcmp(&e->key, skey, sizeof(*skey)))
- return e;
-
- return NULL;
-}
-
-static void
-esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
-{
- struct mlx5_vport_table *e;
- struct mlx5_vport_key key;
- u32 hkey;
-
- mutex_lock(&esw->fdb_table.offloads.vports.lock);
- hkey = flow_attr_to_vport_key(esw, attr, &key);
- e = esw_vport_tbl_lookup(esw, &key, hkey);
- if (!e || --e->num_rules)
- goto out;
-
- hash_del(&e->hlist);
- mlx5_destroy_flow_table(e->fdb);
- kfree(e);
-out:
- mutex_unlock(&esw->fdb_table.offloads.vports.lock);
-}
-
-static struct mlx5_flow_table *
-esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *fdb;
- struct mlx5_vport_table *e;
- struct mlx5_vport_key skey;
- u32 hkey;
-
- mutex_lock(&esw->fdb_table.offloads.vports.lock);
- hkey = flow_attr_to_vport_key(esw, attr, &skey);
- e = esw_vport_tbl_lookup(esw, &skey, hkey);
- if (e) {
- e->num_rules++;
- goto out;
- }
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e) {
- fdb = ERR_PTR(-ENOMEM);
- goto err_alloc;
- }
-
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
- if (!ns) {
- esw_warn(dev, "Failed to get FDB namespace\n");
- fdb = ERR_PTR(-ENOENT);
- goto err_ns;
- }
-
- fdb = esw_vport_tbl_create(esw, ns);
- if (IS_ERR(fdb))
- goto err_ns;
-
- e->fdb = fdb;
- e->num_rules = 1;
- e->key = skey;
- hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
-out:
- mutex_unlock(&esw->fdb_table.offloads.vports.lock);
- return e->fdb;
-
-err_ns:
- kfree(e);
-err_alloc:
- mutex_unlock(&esw->fdb_table.offloads.vports.lock);
- return fdb;
-}
-
-int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
-{
- struct mlx5_vport_tbl_attr attr;
- struct mlx5_flow_table *fdb;
- struct mlx5_vport *vport;
- int i;
-
- attr.chain = 0;
- attr.prio = 1;
- mlx5_esw_for_all_vports(esw, i, vport) {
- attr.vport = vport->vport;
- fdb = esw_vport_tbl_get(esw, &attr);
- if (IS_ERR(fdb))
- goto out;
- }
- return 0;
-
-out:
- mlx5_esw_vport_tbl_put(esw);
- return PTR_ERR(fdb);
-}
-
-void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
-{
- struct mlx5_vport_tbl_attr attr;
- struct mlx5_vport *vport;
- int i;
-
- attr.chain = 0;
- attr.prio = 1;
- mlx5_esw_for_all_vports(esw, i, vport) {
- attr.vport = vport->vport;
- esw_vport_tbl_put(esw, &attr);
- }
-}
-
-/* End: Per vport tables */
+static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
+ .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
+ .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
+ .flags = 0,
+};
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
- int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
-
- WARN_ON(idx > esw->total_vports - 1);
- return &esw->offloads.vport_reps[idx];
+ return xa_load(&esw->offloads.vport_reps, vport_num);
}
static void
@@ -256,6 +93,26 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
+/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
+ * are not needed as well in the following process. So clear them all for simplicity.
+ */
+void
+mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
+{
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ void *misc2;
+
+ misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
+
+ misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
+
+ if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
+ spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
+ }
+}
+
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -327,6 +184,19 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
}
static int
+esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_esw_flow_attr *esw_attr,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest[i].sampler_id = esw_attr->sample->sampler_id;
+
+ return 0;
+}
+
+static int
esw_setup_ft_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
@@ -561,7 +431,10 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
- if (attr->dest_ft) {
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
+ esw_setup_sampler_dest(dest, flow_act, esw_attr, *i);
+ (*i)++;
+ } else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
} else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
@@ -664,12 +537,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- if (split) {
+ /* esw_attr->sample is allocated only when there is a sample action */
+ if (esw_attr->sample && esw_attr->sample->sample_default_tbl) {
+ fdb = esw_attr->sample->sample_default_tbl;
+ } else if (split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
- fdb = esw_vport_tbl_get(esw, &fwd_attr);
+ fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
} else {
if (attr->chain || attr->prio)
fdb = mlx5_chains_get_table(chains, attr->chain,
@@ -701,7 +578,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
err_add_rule:
if (split)
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
@@ -734,7 +611,8 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
- fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
+ fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -779,7 +657,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
@@ -814,15 +692,16 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
}
if (fwd_rule) {
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
} else {
if (split)
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
esw_cleanup_dests(esw, attr);
@@ -848,10 +727,11 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{
struct mlx5_eswitch_rep *rep;
- int i, err = 0;
+ unsigned long i;
+ int err = 0;
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
- mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
+ mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
@@ -1043,7 +923,8 @@ out:
}
struct mlx5_flow_handle *
-mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch_rep *rep,
u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
@@ -1061,21 +942,30 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
/* source vport is the esw manager */
- MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport);
+ if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(rep->esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport.num = vport;
+ dest.vport.num = rep->vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
+ esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
+ PTR_ERR(flow_rule));
out:
kvfree(spec);
return flow_rule;
@@ -1090,13 +980,13 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
{
struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
- int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
+ int i = 0, num_vfs = esw->esw_funcs.num_vfs;
if (!num_vfs || !flows)
return;
- mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
- mlx5_del_flow_rules(flows[i++]);
+ for (i = 0; i < num_vfs; i++)
+ mlx5_del_flow_rules(flows[i]);
kvfree(flows);
}
@@ -1104,12 +994,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
static int
mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
{
- int num_vfs, vport_num, rule_idx = 0, err = 0;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
+ int num_vfs, rule_idx = 0, err = 0;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_handle **flows;
struct mlx5_flow_spec *spec;
+ struct mlx5_vport *vport;
+ unsigned long i;
+ u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs;
flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
@@ -1133,7 +1026,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
+ mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+ vport_num = vport->vport;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
@@ -1275,12 +1169,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
- struct mlx5_flow_handle *flow;
- struct mlx5_flow_spec *spec;
/* total vports is the same for both e-switches */
int nvports = esw->total_vports;
+ struct mlx5_flow_handle *flow;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_vport *vport;
+ unsigned long i;
void *misc;
- int err, i;
+ int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -1299,6 +1195,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc_parameters);
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF);
@@ -1308,10 +1205,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
- flows[MLX5_VPORT_PF] = flow;
+ flows[vport->index] = flow;
}
if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@@ -1319,13 +1217,13 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
- flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
+ flows[vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
esw_set_peer_miss_rule_source_port(esw,
peer_dev->priv.eswitch,
- spec, i);
+ spec, vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@@ -1333,7 +1231,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_vf_flow_err;
}
- flows[i] = flow;
+ flows[vport->index] = flow;
}
esw->fdb_table.offloads.peer_miss_rules = flows;
@@ -1342,15 +1240,20 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_vf_flow_err:
- nvports = --i;
- mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
- mlx5_del_flow_rules(flows[i]);
-
- if (mlx5_ecpf_vport_exists(esw->dev))
- mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
+ if (!flows[vport->index])
+ continue;
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(esw->dev))
- mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
kvfree(flows);
@@ -1362,20 +1265,23 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
{
struct mlx5_flow_handle **flows;
- int i;
+ struct mlx5_vport *vport;
+ unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules;
- mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
- mlx5_core_max_vfs(esw->dev))
- mlx5_del_flow_rules(flows[i]);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
+ mlx5_del_flow_rules(flows[vport->index]);
- if (mlx5_ecpf_vport_exists(esw->dev))
- mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev))
- mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
kvfree(flows);
}
@@ -1453,14 +1359,14 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return ERR_PTR(-EOPNOTSUPP);
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
- ESW_CHAIN_TAG_METADATA_MASK);
+ ESW_REG_C0_USER_DATA_METADATA_MASK);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
@@ -1476,7 +1382,7 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
dest.ft = esw->offloads.ft_offloads;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
- kfree(spec);
+ kvfree(spec);
if (IS_ERR(flow_rule))
esw_warn(esw->dev,
@@ -1486,12 +1392,6 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
return flow_rule;
}
-u32
-esw_get_max_restore_tag(struct mlx5_eswitch *esw)
-{
- return ESW_CHAIN_TAG_METADATA_MASK;
-}
-
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -1521,6 +1421,44 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
}
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport_tbl_attr attr;
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ attr.chain = 0;
+ attr.prio = 1;
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ attr.vport = vport->vport;
+ attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
+ mlx5_esw_vporttbl_put(esw, &attr);
+ }
+}
+
+static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport_tbl_attr attr;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ attr.chain = 0;
+ attr.prio = 1;
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ attr.vport = vport->vport;
+ attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
+ fdb = mlx5_esw_vporttbl_get(esw, &attr);
+ if (IS_ERR(fdb))
+ goto out;
+ }
+ return 0;
+
+out:
+ esw_vport_tbl_put(esw);
+ return PTR_ERR(fdb);
+}
+
#define fdb_modify_header_fwd_to_table_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
@@ -1570,7 +1508,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
attr.max_ft_sz = fdb_max;
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
- attr.max_restore_tag = esw_get_max_restore_tag(esw);
+ attr.mapping = esw->offloads.reg_c0_obj_pool;
chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(chains)) {
@@ -1598,7 +1536,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
/* Open level 1 for split fdb rules now if prios isn't supported */
if (!mlx5_chains_prios_supported(chains)) {
- err = mlx5_esw_vport_tbl_get(esw);
+ err = esw_vport_tbl_get(esw);
if (err)
goto level_1_err;
}
@@ -1622,7 +1560,7 @@ static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
- mlx5_esw_vport_tbl_put(esw);
+ esw_vport_tbl_put(esw);
mlx5_chains_put_table(chains, 0, 1, 0);
mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
mlx5_chains_destroy(chains);
@@ -1709,6 +1647,12 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1865,6 +1809,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
+ atomic64_set(&esw->user_count, 0);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
@@ -1988,12 +1933,12 @@ out:
return flow_rule;
}
-
-static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
+static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
- int vport;
+ struct mlx5_vport *vport;
+ unsigned long i;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
@@ -2014,8 +1959,8 @@ static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode
query_vports:
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
- mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
- mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+ mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
if (prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
@@ -2067,7 +2012,7 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
goto out_free;
}
- ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
+ ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
@@ -2082,7 +2027,7 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
- ESW_CHAIN_TAG_METADATA_MASK);
+ ESW_REG_C0_USER_DATA_METADATA_MASK);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ft_attr.max_fte - 1);
@@ -2158,34 +2103,82 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ xa_mark_t mark)
{
- kfree(esw->offloads.vport_reps);
+ bool mark_set;
+
+ /* Copy the mark from vport to its rep */
+ mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
+ if (mark_set)
+ xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
}
-int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
{
- int total_vports = esw->total_vports;
struct mlx5_eswitch_rep *rep;
- int vport_index;
- u8 rep_type;
+ int rep_type;
+ int err;
- esw->offloads.vport_reps = kcalloc(total_vports,
- sizeof(struct mlx5_eswitch_rep),
- GFP_KERNEL);
- if (!esw->offloads.vport_reps)
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
return -ENOMEM;
- mlx5_esw_for_all_reps(esw, vport_index, rep) {
- rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
- rep->vport_index = vport_index;
+ rep->vport = vport->vport;
+ rep->vport_index = vport->index;
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+ atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_data[rep_type].state,
- REP_UNREGISTERED);
- }
+ err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
+ if (err)
+ goto insert_err;
+ mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
+ mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
+ mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
return 0;
+
+insert_err:
+ kfree(rep);
+ return err;
+}
+
+static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ xa_erase(&esw->offloads.vport_reps, rep->vport);
+ kfree(rep);
+}
+
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+ struct mlx5_eswitch_rep *rep;
+ unsigned long i;
+
+ mlx5_esw_for_each_rep(esw, i, rep)
+ mlx5_esw_offloads_rep_cleanup(esw, rep);
+ xa_destroy(&esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ xa_init(&esw->offloads.vport_reps);
+
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ err = mlx5_esw_offloads_rep_init(esw, vport);
+ if (err)
+ goto err;
+ }
+ return 0;
+
+err:
+ esw_offloads_cleanup_reps(esw);
+ return err;
}
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
@@ -2199,7 +2192,7 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
mlx5_esw_for_each_sf_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type);
@@ -2208,11 +2201,11 @@ static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
__unload_reps_sf_vport(esw, rep_type);
- mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
+ mlx5_esw_for_each_vf_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type);
if (mlx5_ecpf_vport_exists(esw->dev)) {
@@ -2270,9 +2263,11 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
- err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
- if (err)
- return err;
+ if (vport_num != MLX5_VPORT_UPLINK) {
+ err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
+ if (err)
+ return err;
+ }
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
@@ -2280,7 +2275,8 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
return err;
load_err:
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ if (vport_num != MLX5_VPORT_UPLINK)
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
return err;
}
@@ -2290,7 +2286,9 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
return;
mlx5_esw_offloads_rep_unload(esw, vport_num);
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+
+ if (vport_num != MLX5_VPORT_UPLINK)
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
@@ -2299,13 +2297,8 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw)
{
- int err;
-
- err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
- if (err)
- return err;
- return 0;
+ return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
}
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
@@ -2430,8 +2423,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
-static bool
-esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
{
if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
return false;
@@ -2500,25 +2492,25 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int i;
+ unsigned long i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return;
- mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ mlx5_esw_for_each_vport(esw, i, vport)
esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
+ unsigned long i;
int err;
- int i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- mlx5_esw_for_all_vports(esw, i, vport) {
+ mlx5_esw_for_each_vport(esw, i, vport) {
err = esw_offloads_vport_metadata_setup(esw, vport);
if (err)
goto metadata_err;
@@ -2531,6 +2523,28 @@ metadata_err:
return err;
}
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
+{
+ int err = 0;
+
+ down_write(&esw->mode_lock);
+ if (esw->mode != MLX5_ESWITCH_NONE) {
+ err = -EBUSY;
+ goto done;
+ }
+ if (!mlx5_esw_vport_match_metadata_supported(esw)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ if (enable)
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ else
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
+done:
+ up_write(&esw->mode_lock);
+ return err;
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
@@ -2565,6 +2579,9 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
return esw_vport_create_offloads_acl_tables(esw, vport);
}
@@ -2573,6 +2590,9 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (IS_ERR(vport))
+ return;
+
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
@@ -2584,6 +2604,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
+ atomic64_set(&esw->user_count, 0);
indir = mlx5_esw_indir_table_init();
if (IS_ERR(indir)) {
@@ -2726,10 +2747,25 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
return 0;
}
+bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
+{
+ /* Local controller is always valid */
+ if (controller == 0)
+ return true;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return false;
+
+ /* External host number starts with zero in device */
+ return (controller == esw->offloads.host_number + 1);
+}
+
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
+ struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport;
- int err, i;
+ unsigned long i;
+ int err;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2744,9 +2780,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_metadata;
- if (esw_check_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
err = esw_offloads_metadata_init(esw);
if (err)
goto err_metadata;
@@ -2755,6 +2788,15 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
+ reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj),
+ ESW_REG_C0_USER_DATA_METADATA_MASK,
+ true);
+ if (IS_ERR(reg_c0_obj_pool)) {
+ err = PTR_ERR(reg_c0_obj_pool);
+ goto err_pool;
+ }
+ esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
+
err = esw_offloads_steering_init(esw);
if (err)
goto err_steering_init;
@@ -2781,11 +2823,12 @@ err_vports:
err_uplink:
esw_offloads_steering_cleanup(esw);
err_steering_init:
+ mapping_destroy(reg_c0_obj_pool);
+err_pool:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
@@ -2819,8 +2862,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ mapping_destroy(esw->offloads.reg_c0_obj_pool);
esw_offloads_metadata_uninit(esw);
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
@@ -2925,8 +2968,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- mutex_lock(&esw->mode_lock);
- cur_mlx5_mode = esw->mode;
+ err = mlx5_esw_try_lock(esw);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
+ return err;
+ }
+ cur_mlx5_mode = err;
+ err = 0;
+
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
@@ -2938,7 +2987,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
unlock:
- mutex_unlock(&esw->mode_lock);
+ mlx5_esw_unlock(esw);
return err;
}
@@ -2951,14 +3000,45 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_mode_to_devlink(esw->mode, mode);
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
+ return err;
+}
+
+static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_vport *vport;
+ u16 err_vport_num = 0;
+ unsigned long i;
+ int err = 0;
+
+ mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
+ if (err) {
+ err_vport_num = vport->vport;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set min inline on vport");
+ goto revert_inline_mode;
+ }
+ }
+ return 0;
+
+revert_inline_mode:
+ mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ if (vport->vport == err_vport_num)
+ break;
+ mlx5_modify_nic_vport_min_inline(dev,
+ vport->vport,
+ esw->offloads.inline_mode);
+ }
return err;
}
@@ -2966,15 +3046,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- int err, vport, num_vport;
struct mlx5_eswitch *esw;
u8 mlx5_mode;
+ int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto out;
@@ -3003,27 +3083,16 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
- mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
- err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to set min inline on vport");
- goto revert_inline_mode;
- }
- }
+ err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
+ if (err)
+ goto out;
esw->offloads.inline_mode = mlx5_mode;
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return 0;
-revert_inline_mode:
- num_vport = --vport;
- mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
- mlx5_modify_nic_vport_min_inline(dev,
- vport,
- esw->offloads.inline_mode);
out:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3036,14 +3105,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3059,7 +3128,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
@@ -3105,7 +3174,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
}
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3120,14 +3189,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
*encap = esw->offloads.encap;
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return 0;
}
@@ -3152,11 +3221,12 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
{
struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
esw->offloads.rep_ops[rep_type] = ops;
- mlx5_esw_for_all_reps(esw, i, rep) {
- if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
+ rep->esw = esw;
rep_data = &rep->rep_data[rep_type];
atomic_set(&rep_data->state, REP_REGISTERED);
}
@@ -3167,12 +3237,12 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
__unload_reps_all_vport(esw, rep_type);
- mlx5_esw_for_all_reps(esw, i, rep)
+ mlx5_esw_for_each_rep(esw, i, rep)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
@@ -3213,12 +3283,6 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
-bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num >= MLX5_VPORT_FIRST_VF &&
- vport_num <= esw->dev->priv.sriov.max_vfs;
-}
-
bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
{
return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
@@ -3244,7 +3308,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum)
+ u16 vport_num, u32 controller, u32 sfnum)
{
int err;
@@ -3252,7 +3316,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err)
return err;
- err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
+ err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
if (err)
goto devlink_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index ec679560a95d..a81ece94f599 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
- esw_warn(dev, "Failed to create termination table\n");
+ esw_warn(dev, "Failed to create termination table (error %d)\n",
+ IS_ERR(tt->termtbl));
return -EOPNOTSUPP;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
- esw_warn(dev, "Failed to create termination table rule\n");
+ esw_warn(dev, "Failed to create termination table rule (error %d)\n",
+ IS_ERR(tt->rule));
goto add_flow_err;
}
return 0;
@@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
- if (err) {
- esw_warn(esw->dev, "Failed to create termination table\n");
+ if (err)
goto tt_create_err;
- }
+
hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
tt_add_ref:
tt->ref_count++;
@@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
- esw_warn(esw->dev, "Failed to create termination table\n");
+ esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
+ IS_ERR(tt));
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 22bee4990232..0bba92cf5dc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -707,7 +707,7 @@ static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
}
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
- err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL);
+ err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
if (err < 0) {
context = ERR_PTR(err);
goto exists;
@@ -758,7 +758,7 @@ delete_hash:
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+ ida_free(&fipsec->halloc, sa_ctx->sa_handle);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
@@ -850,9 +850,9 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
return;
}
- if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
+ if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+ ida_free(&fipsec->halloc, sa_ctx->sa_handle);
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
@@ -1085,6 +1085,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
if (IS_ERR(rule->ctx)) {
int err = PTR_ERR(rule->ctx);
+
kfree(rule);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 66ad599bd488..f74d2c834037 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -105,7 +105,7 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 7
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
@@ -590,7 +590,7 @@ static void del_sw_fte(struct fs_node *node)
&fte->hash,
rhash_fte);
WARN_ON(err);
- ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
+ ida_free(&fg->fte_allocator, fte->index - fg->start_index);
kmem_cache_free(steering->ftes_cache, fte);
}
@@ -640,7 +640,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
int index;
int ret;
- index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
+ index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
if (index < 0)
return index;
@@ -656,7 +656,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return 0;
err_ida_remove:
- ida_simple_remove(&fg->fte_allocator, index);
+ ida_free(&fg->fte_allocator, index);
return ret;
}
@@ -2229,17 +2229,21 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
+ if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (vport >= steering->esw_egress_acl_vports)
+ return NULL;
if (steering->esw_egress_root_ns &&
steering->esw_egress_root_ns[vport])
return &steering->esw_egress_root_ns[vport]->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (vport >= steering->esw_ingress_acl_vports)
+ return NULL;
if (steering->esw_ingress_root_ns &&
steering->esw_ingress_root_ns[vport])
return &steering->esw_ingress_root_ns[vport]->ns;
@@ -2395,14 +2399,12 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node)
{
- int i;
- struct mlx5_flow_namespace *fs_ns;
int err;
+ int i;
- fs_get_obj(fs_ns, fs_parent_node);
for (i = 0; i < init_node->ar_size; i++) {
err = init_root_tree_recursive(steering, &init_node->children[i],
- &fs_ns->node,
+ fs_parent_node,
init_node, i);
if (err)
return err;
@@ -2573,43 +2575,11 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
-static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_egress_root_ns)
- return;
-
- for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
-}
-
-static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_ingress_root_ns)
- return;
-
- for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
-
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
-}
-
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_egress_acls_root_ns(dev);
- cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
@@ -2854,10 +2824,9 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo
return PTR_ERR_OR_ZERO(prio);
}
-static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
@@ -2873,7 +2842,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
if (err)
goto cleanup_root_ns;
}
-
+ steering->esw_egress_acl_vports = total_vports;
return 0;
cleanup_root_ns:
@@ -2884,10 +2853,24 @@ cleanup_root_ns:
return err;
}
-static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_egress_root_ns)
+ return;
+
+ for (i = 0; i < steering->esw_egress_acl_vports; i++)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+ kfree(steering->esw_egress_root_ns);
+ steering->esw_egress_root_ns = NULL;
+}
+
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
@@ -2903,7 +2886,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
if (err)
goto cleanup_root_ns;
}
-
+ steering->esw_ingress_acl_vports = total_vports;
return 0;
cleanup_root_ns:
@@ -2914,6 +2897,21 @@ cleanup_root_ns:
return err;
}
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_ingress_root_ns)
+ return;
+
+ for (i = 0; i < steering->esw_ingress_acl_vports; i++)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+ kfree(steering->esw_ingress_root_ns);
+ steering->esw_ingress_root_ns = NULL;
+}
+
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@ -2976,16 +2974,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
if (err)
goto err;
}
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acls_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acls_root_ns(dev);
- if (err)
- goto err;
- }
}
if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b24a9849c45e..e577a2c424af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -129,6 +129,8 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
+ int esw_egress_acl_vports;
+ int esw_ingress_acl_vports;
};
struct fs_node {
@@ -287,6 +289,11 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index f43caefd07a1..18e5aec14641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -497,13 +497,13 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
- GFP_KERNEL);
+ bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
+ GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
- bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
+ bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
if (!bulk->bitmask)
goto err_alloc_bitmask;
@@ -521,9 +521,9 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
return bulk;
err_mlx5_cmd_bulk_alloc:
- kfree(bulk->bitmask);
+ kvfree(bulk->bitmask);
err_alloc_bitmask:
- kfree(bulk);
+ kvfree(bulk);
err_alloc_bulk:
return ERR_PTR(err);
}
@@ -537,8 +537,8 @@ mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
}
mlx5_cmd_fc_free(dev, bulk->base_id);
- kfree(bulk->bitmask);
- kfree(bulk);
+ kvfree(bulk->bitmask);
+ kvfree(bulk);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index f9042e147c7f..d5d57630015f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -104,7 +104,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- mlx5_load_one(dev, false);
+ mlx5_load_one(dev);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
@@ -119,7 +119,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
int err;
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
err = mlx5_health_wait_pci_up(dev);
if (err)
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
@@ -199,16 +199,11 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
fw_live_patch_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- struct mlx5_fw_tracer *tracer;
mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev));
- tracer = dev->tracer;
- if (IS_ERR_OR_NULL(tracer))
- return;
-
- if (mlx5_fw_tracer_reload(tracer))
+ if (mlx5_fw_tracer_reload(dev->tracer))
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
@@ -342,7 +337,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
}
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 0c32c485eb58..9ff163c5bcde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -335,12 +335,12 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
return -EIO;
}
mlx5_core_err(dev, "starting health recovery flow\n");
- mlx5_recover_device(dev);
- if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
- mlx5_health_check_fatal_sensors(dev)) {
+ if (mlx5_recover_device(dev) || mlx5_health_check_fatal_sensors(dev)) {
mlx5_core_err(dev, "health recovery failed\n");
return -EIO;
}
+
+ mlx5_core_info(dev, "health recovery succeeded\n");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 6f7cef47e04c..612a7f69366d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -33,6 +33,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
#include "en.h"
+#include "en/params.h"
#include "ipoib.h"
#define IB_DEFAULT_Q_KEY 0xb1b
@@ -372,6 +373,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_nch = priv->max_nch;
int err;
mlx5e_create_q_counters(priv);
@@ -386,7 +388,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_rqts;
@@ -394,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_tirs;
@@ -405,11 +407,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
return 0;
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -421,10 +423,12 @@ err_destroy_q_counters:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
+ u16 max_nch = priv->max_nch;
+
mlx5i_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -469,6 +473,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
+ .rx_ptp_support = false,
};
/* mlx5i netdev NDos */
@@ -476,28 +481,19 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- struct mlx5e_channels new_channels = {};
- struct mlx5e_params *params;
+ struct mlx5e_params new_params;
int err = 0;
mutex_lock(&priv->state_lock);
- params = &priv->channels.params;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- params->sw_mtu = new_mtu;
- netdev->mtu = params->sw_mtu;
- goto out;
- }
-
- new_channels.params = *params;
- new_channels.params.sw_mtu = new_mtu;
+ new_params = priv->channels.params;
+ new_params.sw_mtu = new_mtu;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
if (err)
goto out;
- netdev->mtu = new_channels.params.sw_mtu;
+ netdev->mtu = new_params.sw_mtu;
out:
mutex_unlock(&priv->state_lock);
@@ -710,7 +706,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
{
- return mdev->mlx5e_res.pdn != 0;
+ return mdev->mlx5e_res.hw_objs.pdn != 0;
}
static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 3d0a18a0bed4..18ee21b06a00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -350,6 +350,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .rx_ptp_support = false,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 83a05371e2aa..b8748390335f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -603,8 +603,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (err)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
-
- return;
}
/* Must be called with intf_mutex held */
@@ -768,7 +766,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
- if (ldev && __mlx5_lag_is_roce(ldev)) {
+ if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 88e58ac902de..2c41a6920264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -35,7 +35,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
}
/**
- * Set lag port affinity
+ * mlx5_lag_set_port_affinity
*
* @ldev: lag device
* @port:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 1e7f26b240de..ce696d523493 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -645,16 +645,19 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
-static int mlx5_init_pin_config(struct mlx5_clock *clock)
+static void mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
+ if (!clock->ptp_info.n_pins)
+ return;
+
clock->ptp_info.pin_config =
kcalloc(clock->ptp_info.n_pins,
sizeof(*clock->ptp_info.pin_config),
GFP_KERNEL);
if (!clock->ptp_info.pin_config)
- return -ENOMEM;
+ return;
clock->ptp_info.enable = mlx5_ptp_enable;
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
@@ -667,8 +670,6 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
clock->ptp_info.pin_config[i].chan = 0;
}
-
- return 0;
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
@@ -859,6 +860,17 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
}
}
+static void mlx5_init_pps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return;
+
+ mlx5_get_pps_caps(mdev);
+ mlx5_init_pin_config(clock);
+}
+
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
@@ -876,10 +888,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->ptp_info = mlx5_ptp_clock_info;
/* Initialize 1PPS data structures */
- if (MLX5_PPS_CAP(mdev))
- mlx5_get_pps_caps(mdev);
- if (clock->ptp_info.n_pins)
- mlx5_init_pin_config(clock);
+ mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
&mdev->pdev->dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index a12c7da618a7..ceae6bc378e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -105,4 +105,15 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
}
#endif
+static inline cqe_ts_to_ns mlx5_rq_ts_translator(struct mlx5_core_dev *mdev)
+{
+ return mlx5_is_real_time_rq(mdev) ? mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
+}
+
+static inline cqe_ts_to_ns mlx5_sq_ts_translator(struct mlx5_core_dev *mdev)
+{
+ return mlx5_is_real_time_sq(mdev) ? mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index 57eb91bcbca7..e995f8378df7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -46,7 +46,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
- MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 81f2cc4ca1da..f607a3858ef5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -22,15 +22,15 @@ struct mlx5_cq_table {
};
struct mlx5_eq {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
struct mlx5_core_dev *dev;
struct mlx5_cq_table cq_table;
__be32 __iomem *doorbell;
u32 cons_index;
- struct mlx5_frag_buf buf;
unsigned int vecidx;
unsigned int irqn;
u8 eqn;
- int nent;
struct mlx5_rsc_debug *dbg;
};
@@ -47,16 +47,21 @@ struct mlx5_eq_comp {
struct list_head list;
};
+static inline u32 eq_get_size(struct mlx5_eq *eq)
+{
+ return eq->fbc.sz_m1 + 1;
+}
+
static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
{
- return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
+ return mlx5_frag_buf_get_wqe(&eq->fbc, entry);
}
static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
{
- struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
+ struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & eq->fbc.sz_m1);
- return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
+ return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe;
}
static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 381325b4a863..00ef10a1a9f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -7,15 +7,11 @@
#include "lib/fs_chains.h"
#include "en/mapping.h"
-#include "mlx5_core.h"
#include "fs_core.h"
-#include "eswitch.h"
-#include "en.h"
#include "en_tc.h"
#define chains_lock(chains) ((chains)->lock)
#define chains_ht(chains) ((chains)->chains_ht)
-#define chains_mapping(chains) ((chains)->chains_mapping)
#define prios_ht(chains) ((chains)->prios_ht)
#define ft_pool_left(chains) ((chains)->ft_left)
#define tc_default_ft(chains) ((chains)->tc_default_ft)
@@ -300,7 +296,7 @@ create_chain_restore(struct fs_chain *chain)
!mlx5_chains_prios_supported(chains))
return 0;
- err = mapping_add(chains_mapping(chains), &chain->chain, &index);
+ err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
if (err)
return err;
if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
@@ -310,10 +306,8 @@ create_chain_restore(struct fs_chain *chain)
*
* This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
*/
- err = mapping_add(chains_mapping(chains),
- &chain->chain, &index);
- mapping_remove(chains_mapping(chains),
- MLX5_FS_DEFAULT_FLOW_TAG);
+ err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
+ mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
if (err)
return err;
}
@@ -361,7 +355,7 @@ err_mod_hdr:
mlx5_del_flow_rules(chain->restore_rule);
err_rule:
/* Datapath can't find this mapping, so we can safely remove it */
- mapping_remove(chains_mapping(chains), chain->id);
+ mapping_remove(chains->chains_mapping, chain->id);
return err;
}
@@ -376,7 +370,7 @@ static void destroy_chain_restore(struct fs_chain *chain)
mlx5_del_flow_rules(chain->restore_rule);
mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
- mapping_remove(chains_mapping(chains), chain->id);
+ mapping_remove(chains->chains_mapping, chain->id);
}
static struct fs_chain *
@@ -797,7 +791,6 @@ static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{
struct mlx5_fs_chains *chains_priv;
- struct mapping_ctx *mapping;
u32 max_flow_counter;
int err;
@@ -816,6 +809,7 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
chains_priv->flags = attr->flags;
chains_priv->ns = attr->ns;
chains_priv->group_num = attr->max_grp_num;
+ chains_priv->chains_mapping = attr->mapping;
tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
@@ -832,20 +826,10 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
if (err)
goto init_prios_ht_err;
- mapping = mapping_create(sizeof(u32), attr->max_restore_tag,
- true);
- if (IS_ERR(mapping)) {
- err = PTR_ERR(mapping);
- goto mapping_err;
- }
- chains_mapping(chains_priv) = mapping;
-
mutex_init(&chains_lock(chains_priv));
return chains_priv;
-mapping_err:
- rhashtable_destroy(&prios_ht(chains_priv));
init_prios_ht_err:
rhashtable_destroy(&chains_ht(chains_priv));
init_chains_ht_err:
@@ -857,7 +841,6 @@ static void
mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
{
mutex_destroy(&chains_lock(chains));
- mapping_destroy(chains_mapping(chains));
rhashtable_destroy(&prios_ht(chains));
rhashtable_destroy(&chains_ht(chains));
@@ -884,25 +867,18 @@ int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
u32 *chain_mapping)
{
- return mapping_add(chains_mapping(chains), &chain, chain_mapping);
+ struct mapping_ctx *ctx = chains->chains_mapping;
+ struct mlx5_mapped_obj mapped_obj = {};
+
+ mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
+ mapped_obj.chain = chain;
+ return mapping_add(ctx, &mapped_obj, chain_mapping);
}
int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
{
- return mapping_remove(chains_mapping(chains), chain_mapping);
-}
-
-int mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag,
- u32 *chain)
-{
- int err;
+ struct mapping_ctx *ctx = chains->chains_mapping;
- err = mapping_find(chains_mapping(chains), tag, chain);
- if (err) {
- mlx5_core_warn(chains->dev, "Can't find chain for tag: %d\n", tag);
- return -ENOENT;
- }
-
- return 0;
+ return mapping_remove(ctx, chain_mapping);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
index 6d5be31b05dd..e96f345e7dae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -7,6 +7,7 @@
#include <linux/mlx5/fs.h>
struct mlx5_fs_chains;
+struct mlx5_mapped_obj;
enum mlx5_chains_flags {
MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
@@ -20,7 +21,7 @@ struct mlx5_chains_attr {
u32 max_ft_sz;
u32 max_grp_num;
struct mlx5_flow_table *default_ft;
- u32 max_restore_tag;
+ struct mapping_ctx *mapping;
};
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
@@ -63,9 +64,6 @@ struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
-int
-mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, u32 *chain);
-
void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index a68738c8f4bc..3f9869c7e326 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -55,10 +55,6 @@ void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev)
int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
{
- if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
- mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n");
- return -EPERM;
- }
if (dev->roce.reserved_gids.start < count) {
mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
count);
@@ -79,7 +75,6 @@ int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
{
- WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up");
WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
count, dev->roce.reserved_gids.count);
@@ -93,12 +88,12 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
{
int end = dev->roce.reserved_gids.start +
- dev->roce.reserved_gids.count;
+ dev->roce.reserved_gids.count - 1;
int index = 0;
- index = ida_simple_get(&dev->roce.reserved_gids.ida,
- dev->roce.reserved_gids.start, end,
- GFP_KERNEL);
+ index = ida_alloc_range(&dev->roce.reserved_gids.ida,
+ dev->roce.reserved_gids.start, end,
+ GFP_KERNEL);
if (index < 0)
return index;
@@ -110,7 +105,7 @@ int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
{
mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
- ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index);
+ ida_free(&dev->roce.reserved_gids.ida, gid_index);
}
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index d046db7bb047..2f536c5d30b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -95,4 +95,13 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
return devlink_net(priv_to_devlink(dev));
}
+static inline void mlx5_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ mdev->mlx5e_res.uplink_netdev = netdev;
+}
+
+static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+{
+ return mdev->mlx5e_res.uplink_netdev;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c568896cfb23..c114365eb126 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -571,6 +571,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
mlx5_vhca_state_cap_handle(dev, set_hca_cap);
+ if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
+ MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -1235,7 +1239,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_put_uars_page(dev, dev->priv.uar);
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+int mlx5_init_one(struct mlx5_core_dev *dev)
{
int err = 0;
@@ -1247,16 +1251,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
/* remove any previous indication of internal error */
dev->state = MLX5_DEVICE_STATE_UP;
- err = mlx5_function_setup(dev, boot);
+ err = mlx5_function_setup(dev, true);
if (err)
goto err_function;
- if (boot) {
- err = mlx5_init_once(dev);
- if (err) {
- mlx5_core_err(dev, "sw objs init failed\n");
- goto function_teardown;
- }
+ err = mlx5_init_once(dev);
+ if (err) {
+ mlx5_core_err(dev, "sw objs init failed\n");
+ goto function_teardown;
}
err = mlx5_load(dev);
@@ -1265,16 +1267,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- if (boot) {
- err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
- if (err)
- goto err_devlink_reg;
-
- err = mlx5_register_device(dev);
- } else {
- err = mlx5_attach_device(dev);
- }
+ err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+ if (err)
+ goto err_devlink_reg;
+ err = mlx5_register_device(dev);
if (err)
goto err_register;
@@ -1282,16 +1279,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
return 0;
err_register:
- if (boot)
- mlx5_devlink_unregister(priv_to_devlink(dev));
+ mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg:
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
err_load:
- if (boot)
- mlx5_cleanup_once(dev);
+ mlx5_cleanup_once(dev);
function_teardown:
- mlx5_function_teardown(dev, boot);
+ mlx5_function_teardown(dev, true);
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
out:
@@ -1299,33 +1294,84 @@ out:
return err;
}
-void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
+void mlx5_uninit_one(struct mlx5_core_dev *dev)
{
mutex_lock(&dev->intf_state_mutex);
- if (cleanup) {
- mlx5_unregister_device(dev);
- mlx5_devlink_unregister(priv_to_devlink(dev));
- } else {
- mlx5_detach_device(dev);
- }
+ mlx5_unregister_device(dev);
+ mlx5_devlink_unregister(priv_to_devlink(dev));
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
__func__);
- if (cleanup)
- mlx5_cleanup_once(dev);
+ mlx5_cleanup_once(dev);
goto out;
}
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ mlx5_unload(dev);
+ mlx5_cleanup_once(dev);
+ mlx5_function_teardown(dev, true);
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+}
+int mlx5_load_one(struct mlx5_core_dev *dev)
+{
+ int err = 0;
+
+ mutex_lock(&dev->intf_state_mutex);
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_warn(dev, "interface is up, NOP\n");
+ goto out;
+ }
+ /* remove any previous indication of internal error */
+ dev->state = MLX5_DEVICE_STATE_UP;
+
+ err = mlx5_function_setup(dev, false);
+ if (err)
+ goto err_function;
+
+ err = mlx5_load(dev);
+ if (err)
+ goto err_load;
+
+ set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+
+ err = mlx5_attach_device(dev);
+ if (err)
+ goto err_attach;
+
+ mutex_unlock(&dev->intf_state_mutex);
+ return 0;
+
+err_attach:
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
+err_load:
+ mlx5_function_teardown(dev, false);
+err_function:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+ return err;
+}
- if (cleanup)
- mlx5_cleanup_once(dev);
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+ mutex_lock(&dev->intf_state_mutex);
- mlx5_function_teardown(dev, cleanup);
+ mlx5_detach_device(dev);
+
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_warn(dev, "%s: interface is down, NOP\n",
+ __func__);
+ goto out;
+ }
+
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ mlx5_unload(dev);
+ mlx5_function_teardown(dev, false);
out:
mutex_unlock(&dev->intf_state_mutex);
}
@@ -1397,7 +1443,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&dev->intf_state_mutex);
}
-static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
struct devlink *devlink;
@@ -1433,11 +1479,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto pci_init_err;
}
- err = mlx5_load_one(dev, true);
+ err = mlx5_init_one(dev);
if (err) {
- mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
+ mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
err);
- goto err_load_one;
+ goto err_init_one;
}
err = mlx5_crdump_enable(dev);
@@ -1449,7 +1495,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink_reload_enable(devlink);
return 0;
-err_load_one:
+err_init_one:
mlx5_pci_close(dev);
pci_init_err:
mlx5_mdev_uninit(dev);
@@ -1469,7 +1515,7 @@ static void remove_one(struct pci_dev *pdev)
devlink_reload_disable(devlink);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
- mlx5_unload_one(dev, true);
+ mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
mlx5_adev_idx_free(dev->priv.adev_idx);
@@ -1485,7 +1531,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
@@ -1555,7 +1601,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
mlx5_core_info(dev, "%s was called\n", __func__);
- err = mlx5_load_one(dev, false);
+ err = mlx5_load_one(dev);
if (err)
mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
__func__, err);
@@ -1627,7 +1673,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
err = mlx5_try_fast_unload(dev);
if (err)
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
mlx5_pci_disable_device(dev);
}
@@ -1635,7 +1681,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
return 0;
}
@@ -1644,7 +1690,7 @@ static int mlx5_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
@@ -1676,26 +1722,31 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
}
-void mlx5_recover_device(struct mlx5_core_dev *dev)
+int mlx5_recover_device(struct mlx5_core_dev *dev)
{
+ int ret = -EIO;
+
mlx5_pci_disable_device(dev);
if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
- mlx5_pci_resume(dev->pdev);
+ ret = mlx5_load_one(dev);
+ return ret;
}
static struct pci_driver mlx5_core_driver = {
.name = KBUILD_MODNAME,
.id_table = mlx5_core_pci_table,
- .probe = init_one,
+ .probe = probe_one,
.remove = remove_one,
.suspend = mlx5_suspend,
.resume = mlx5_resume,
.shutdown = shutdown,
.err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure,
+ .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
+ .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
};
static void mlx5_core_verify_params(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index efe403c7e354..a22b706eebd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -134,12 +134,13 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
-void mlx5_recover_device(struct mlx5_core_dev *dev);
+int mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
@@ -174,6 +175,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
+
+int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
+ int msix_vec_count);
+int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
+
struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
@@ -267,10 +273,18 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
-void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
-int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
+int mlx5_init_one(struct mlx5_core_dev *dev);
+void mlx5_uninit_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one(struct mlx5_core_dev *dev);
+int mlx5_load_one(struct mlx5_core_dev *dev);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
+static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index c0656d4782e1..110c0837f95b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -61,7 +61,7 @@ struct fw_page {
u32 function;
unsigned long bitmask;
struct list_head list;
- unsigned free_count;
+ unsigned int free_count;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index a61e09aff152..1f907df5b3a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -61,6 +61,79 @@ static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
return &irq_table->irq[vecidx];
}
+/**
+ * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
+ * to be ssigned to each VF.
+ * @dev: PF to work on
+ * @num_vfs: Number of enabled VFs
+ */
+int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
+{
+ int num_vf_msix, min_msix, max_msix;
+
+ num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+ if (!num_vf_msix)
+ return 0;
+
+ min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
+ max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
+
+ /* Limit maximum number of MSI-X vectors so the default configuration
+ * has some available in the pool. This will allow the user to increase
+ * the number of vectors in a VF without having to first size-down other
+ * VFs.
+ */
+ return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
+}
+
+/**
+ * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
+ * @dev: PF to work on
+ * @function_id: Internal PCI VF function IDd
+ * @msix_vec_count: Number of MSI-X vectors to set
+ */
+int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
+ int msix_vec_count)
+{
+ int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ int num_vf_msix, min_msix, max_msix;
+ void *hca_cap, *cap;
+ int ret;
+
+ num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+ if (!num_vf_msix)
+ return 0;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
+ return -EOPNOTSUPP;
+
+ min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
+ max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
+
+ if (msix_vec_count < min_msix)
+ return -EINVAL;
+
+ if (msix_vec_count > max_msix)
+ return -EOVERFLOW;
+
+ hca_cap = kzalloc(sz, GFP_KERNEL);
+ if (!hca_cap)
+ return -ENOMEM;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+ kfree(hca_cap);
+ return ret;
+}
+
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb)
{
@@ -94,7 +167,6 @@ static void irq_set_name(char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
vecidx - MLX5_IRQ_VEC_COMP_BASE);
- return;
}
static int request_irqs(struct mlx5_core_dev *dev, int nvec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 4bb219565c58..1ef2b6a848c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -353,69 +353,123 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset
*offset -= MLX5_EEPROM_PAGE_LENGTH;
}
-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data)
+static int mlx5_query_mcia(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params, u8 *data)
{
- int module_num, status, err, page_num = 0;
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- u16 i2c_addr = 0;
- u8 module_id;
+ int status, err;
void *ptr;
+ u16 size;
- err = mlx5_query_module_num(dev, &module_num);
+ size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES);
+
+ MLX5_SET(mcia_reg, in, l, 0);
+ MLX5_SET(mcia_reg, in, size, size);
+ MLX5_SET(mcia_reg, in, module, params->module_number);
+ MLX5_SET(mcia_reg, in, device_address, params->offset);
+ MLX5_SET(mcia_reg, in, page_number, params->page);
+ MLX5_SET(mcia_reg, in, i2c_device_address, params->i2c_address);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
if (err)
return err;
- err = mlx5_query_module_id(dev, module_num, &module_id);
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status) {
+ mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+ memcpy(data, ptr, size);
+
+ return size;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data)
+{
+ struct mlx5_module_eeprom_query_params query = {0};
+ u8 module_id;
+ int err;
+
+ err = mlx5_query_module_num(dev, &query.module_number);
+ if (err)
+ return err;
+
+ err = mlx5_query_module_id(dev, query.module_number, &module_id);
if (err)
return err;
switch (module_id) {
case MLX5_MODULE_ID_SFP:
- mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
- mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}
- if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
- size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+ query.size = size;
- MLX5_SET(mcia_reg, in, l, 0);
- MLX5_SET(mcia_reg, in, module, module_num);
- MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
- MLX5_SET(mcia_reg, in, page_number, page_num);
- MLX5_SET(mcia_reg, in, device_address, offset);
- MLX5_SET(mcia_reg, in, size, size);
+ return mlx5_query_mcia(dev, &query, data);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_MCIA, 0, 0);
+int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data)
+{
+ u8 module_id;
+ int err;
+
+ err = mlx5_query_module_num(dev, &params->module_number);
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
- return -EIO;
+ err = mlx5_query_module_id(dev, params->module_number, &module_id);
+ if (err)
+ return err;
+
+ switch (module_id) {
+ case MLX5_MODULE_ID_SFP:
+ if (params->page > 0)
+ return -EINVAL;
+ break;
+ case MLX5_MODULE_ID_QSFP:
+ case MLX5_MODULE_ID_QSFP28:
+ case MLX5_MODULE_ID_QSFP_PLUS:
+ if (params->page > 3)
+ return -EINVAL;
+ break;
+ case MLX5_MODULE_ID_DSFP:
+ break;
+ default:
+ mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+ return -EINVAL;
}
- ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
- memcpy(data, ptr, size);
+ if (params->i2c_address != MLX5_I2C_ADDR_HIGH &&
+ params->i2c_address != MLX5_I2C_ADDR_LOW) {
+ mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address);
+ return -EINVAL;
+ }
- return size;
+ return mlx5_query_mcia(dev, params, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 8e0dddc6383f..441b5453acae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -180,5 +180,4 @@ del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
- return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 99039c47ef33..7161220afe30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -117,6 +117,9 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
bool empty_found = false;
int i;
+ lockdep_assert_held(&table->rl_lock);
+ WARN_ON(!table->rl_entry);
+
for (i = 0; i < table->max_size; i++) {
if (dedicated) {
if (!table->rl_entry[i].refcount)
@@ -172,38 +175,103 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
+static int mlx5_rl_table_get(struct mlx5_rl_table *table)
+{
+ int i;
+
+ lockdep_assert_held(&table->rl_lock);
+
+ if (table->rl_entry) {
+ table->refcount++;
+ return 0;
+ }
+
+ table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
+ GFP_KERNEL);
+ if (!table->rl_entry)
+ return -ENOMEM;
+
+ /* The index represents the index in HW rate limit table
+ * Index 0 is reserved for unlimited rate
+ */
+ for (i = 0; i < table->max_size; i++)
+ table->rl_entry[i].index = i + 1;
+
+ table->refcount++;
+ return 0;
+}
+
+static void mlx5_rl_table_put(struct mlx5_rl_table *table)
+{
+ lockdep_assert_held(&table->rl_lock);
+ if (--table->refcount)
+ return;
+
+ kfree(table->rl_entry);
+ table->rl_entry = NULL;
+}
+
+static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
+{
+ int i;
+
+ if (!table->rl_entry)
+ return;
+
+ /* Clear all configured rates */
+ for (i = 0; i < table->max_size; i++)
+ if (table->rl_entry[i].refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
+ kfree(table->rl_entry);
+}
+
+static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry)
+{
+ entry->refcount++;
+}
+
+static void
+mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry)
+{
+ entry->refcount--;
+ if (!entry->refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+}
+
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
- int err = 0;
u32 rate;
+ int err;
- rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
- mutex_lock(&table->rl_lock);
+ if (!table->max_size)
+ return -EOPNOTSUPP;
+ rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
rate, table->min_rate, table->max_rate);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&table->rl_lock);
+ err = mlx5_rl_table_get(table);
+ if (err)
+ goto out;
+
entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
err = -ENOSPC;
- goto out;
+ goto rl_err;
}
- if (entry->refcount) {
- /* rate already configured */
- entry->refcount++;
- } else {
+ if (!entry->refcount) {
+ /* new rate limit */
memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
entry->uid = uid;
- /* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
mlx5_core_err(
@@ -214,14 +282,18 @@ int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
burst_upper_bound),
MLX5_GET(set_pp_rate_limit_context, rl_in,
typical_packet_size));
- goto out;
+ goto rl_err;
}
- entry->refcount = 1;
entry->dedicated = dedicated_entry;
}
+ mlx5_rl_entry_get(entry);
*index = entry->index;
+ mutex_unlock(&table->rl_lock);
+ return 0;
+rl_err:
+ mlx5_rl_table_put(table);
out:
mutex_unlock(&table->rl_lock);
return err;
@@ -235,10 +307,8 @@ void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
mutex_lock(&table->rl_lock);
entry = &table->rl_entry[index - 1];
- entry->refcount--;
- if (!entry->refcount)
- /* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+ mlx5_rl_entry_put(dev, entry);
+ mlx5_rl_table_put(table);
mutex_unlock(&table->rl_lock);
}
EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
@@ -286,12 +356,8 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
goto out;
}
-
- entry->refcount--;
- if (!entry->refcount)
- /* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry, false);
-
+ mlx5_rl_entry_put(dev, entry);
+ mlx5_rl_table_put(table);
out:
mutex_unlock(&table->rl_lock);
}
@@ -300,31 +366,19 @@ EXPORT_SYMBOL(mlx5_rl_remove_rate);
int mlx5_init_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- int i;
- mutex_init(&table->rl_lock);
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
table->max_size = 0;
return 0;
}
+ mutex_init(&table->rl_lock);
+
/* First entry is reserved for unlimited rate */
table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
- table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
- GFP_KERNEL);
- if (!table->rl_entry)
- return -ENOMEM;
-
- /* The index represents the index in HW rate limit table
- * Index 0 is reserved for unlimited rate
- */
- for (i = 0; i < table->max_size; i++)
- table->rl_entry[i].index = i + 1;
-
- /* Index 0 is reserved */
mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
table->max_size,
table->min_rate >> 10,
@@ -336,13 +390,10 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- int i;
- /* Clear all configured rates */
- for (i = 0; i < table->max_size; i++)
- if (table->rl_entry[i].refcount)
- mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
- false);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing))
+ return;
- kfree(dev->priv.rl_table.rl_entry);
+ mlx5_rl_table_free(dev, table);
+ mutex_destroy(&table->rl_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 90b524c59f3c..6a0c6f965ad1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev;
+ u16 max_functions;
u16 sf_index;
+ u16 base_id;
+
+ max_functions = mlx5_sf_max_functions(table->dev);
+ if (!max_functions)
+ return 0;
+
+ base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
+ if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
+ return 0;
- sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
+ sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_ALLOCATED:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index 4de02902aef1..149fd9e698cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -47,7 +47,7 @@ static inline void mlx5_sf_driver_unregister(void)
static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
{
- return 0;
+ return false;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index c4bf555c25ea..42c8ee03fe3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -41,14 +41,15 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
- err = mlx5_load_one(mdev, true);
+ err = mlx5_init_one(mdev);
if (err) {
- mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err);
- goto load_one_err;
+ mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
+ goto init_one_err;
}
+ devlink_reload_enable(devlink);
return 0;
-load_one_err:
+init_one_err:
iounmap(mdev->iseg);
remap_err:
mlx5_mdev_uninit(mdev);
@@ -63,7 +64,8 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
struct devlink *devlink;
devlink = priv_to_devlink(sf_dev->mdev);
- mlx5_unload_one(sf_dev->mdev, true);
+ devlink_reload_disable(devlink);
+ mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
mlx5_devlink_free(devlink);
@@ -73,7 +75,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- mlx5_unload_one(sf_dev->mdev, false);
+ mlx5_unload_one(sf_dev->mdev);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index c2ba41bb7a70..a8e73c9ed1ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -12,6 +12,7 @@
struct mlx5_sf {
struct devlink_port dl_port;
unsigned int port_index;
+ u32 controller;
u16 id;
u16 hw_fn_id;
u16 hw_state;
@@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
}
static struct mlx5_sf *
-mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack)
+mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
+ u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
{
unsigned int dl_port_index;
struct mlx5_sf *sf;
@@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
int id_err;
int err;
- id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum);
+ if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
+ return ERR_PTR(-EINVAL);
+ }
+
+ id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
if (id_err < 0) {
err = id_err;
goto id_err;
@@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
goto alloc_err;
}
sf->id = id_err;
- hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
sf->port_index = dl_port_index;
sf->hw_fn_id = hw_fn_id;
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
+ sf->controller = controller;
err = mlx5_sf_id_insert(table, sf);
if (err)
@@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
insert_err:
kfree(sf);
alloc_err:
- mlx5_sf_hw_table_sf_free(table->dev, id_err);
+ mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
id_err:
if (err == -EEXIST)
NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
@@ -103,7 +111,7 @@ id_err:
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
mlx5_sf_id_erase(table, sf);
- mlx5_sf_hw_table_sf_free(table->dev, sf->id);
+ mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
@@ -270,15 +278,14 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_sf *sf;
- u16 hw_fn_id;
int err;
- sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
+ sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
if (IS_ERR(sf))
return PTR_ERR(sf);
- hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id);
- err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum);
+ err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
+ new_attr->controller, new_attr->sfnum);
if (err)
goto esw_err;
*new_port_index = sf->port_index;
@@ -307,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
"User must provide unique sfnum. Driver does not support auto assignment");
return -EOPNOTSUPP;
}
- if (new_attr->controller_valid && new_attr->controller) {
+ if (new_attr->controller_valid && new_attr->controller &&
+ !mlx5_core_is_ecpf_esw_manager(dev)) {
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
@@ -353,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
* firmware gives confirmation that it is detached by the driver.
*/
mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
- mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
} else {
- mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
}
@@ -438,9 +446,6 @@ sf_err:
static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
{
- if (!mlx5_sf_max_functions(table->dev))
- return;
-
init_completion(&table->disable_complete);
refcount_set(&table->refcount, 1);
}
@@ -463,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
{
- if (!mlx5_sf_max_functions(table->dev))
- return;
-
if (!refcount_read(&table->refcount))
return;
@@ -492,14 +494,15 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
break;
default:
break;
- };
+ }
return 0;
}
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
{
- return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev);
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
+ mlx5_sf_hw_table_supported(dev);
}
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index a5a0f60bef66..ef5f892aafad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -5,8 +5,10 @@
#include "priv.h"
#include "sf.h"
#include "mlx5_ifc_vhca_event.h"
+#include "ecpf.h"
#include "vhca_event.h"
#include "mlx5_core.h"
+#include "eswitch.h"
struct mlx5_sf_hw {
u32 usr_sfnum;
@@ -14,60 +16,114 @@ struct mlx5_sf_hw {
u8 pending_delete: 1;
};
+struct mlx5_sf_hwc_table {
+ struct mlx5_sf_hw *sfs;
+ int max_fn;
+ u16 start_fn_id;
+};
+
+enum mlx5_sf_hwc_index {
+ MLX5_SF_HWC_LOCAL,
+ MLX5_SF_HWC_EXTERNAL,
+ MLX5_SF_HWC_MAX,
+};
+
struct mlx5_sf_hw_table {
struct mlx5_core_dev *dev;
- struct mlx5_sf_hw *sfs;
- int max_local_functions;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
struct notifier_block vhca_nb;
+ struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
};
-u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id)
+static struct mlx5_sf_hwc_table *
+mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
{
- return sw_id + mlx5_sf_start_function_id(dev);
+ int idx = !!controller;
+
+ return &dev->priv.sf_hw_table->hwc[idx];
}
-static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id)
+u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
{
- return hw_id - mlx5_sf_start_function_id(dev);
+ struct mlx5_sf_hwc_table *hwc;
+
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
+ return hwc->start_fn_id + sw_id;
}
-int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
+static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
+{
+ return hw_id - hwc->start_fn_id;
+}
+
+static struct mlx5_sf_hwc_table *
+mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
- int sw_id = -ENOSPC;
- u16 hw_fn_id;
- int err;
int i;
- if (!table->max_local_functions)
- return -EOPNOTSUPP;
+ for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
+ if (table->hwc[i].max_fn &&
+ fn_id >= table->hwc[i].start_fn_id &&
+ fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
+ return &table->hwc[i];
+ }
+ return NULL;
+}
+
+static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
+ u32 usr_sfnum)
+{
+ struct mlx5_sf_hwc_table *hwc;
+ int i;
+
+ hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ if (!hwc->sfs)
+ return -ENOSPC;
- mutex_lock(&table->table_lock);
/* Check if sf with same sfnum already exists or not. */
- for (i = 0; i < table->max_local_functions; i++) {
- if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) {
- err = -EEXIST;
- goto exist_err;
- }
+ for (i = 0; i < hwc->max_fn; i++) {
+ if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
+ return -EEXIST;
}
-
/* Find the free entry and allocate the entry from the array */
- for (i = 0; i < table->max_local_functions; i++) {
- if (!table->sfs[i].allocated) {
- table->sfs[i].usr_sfnum = usr_sfnum;
- table->sfs[i].allocated = true;
- sw_id = i;
- break;
+ for (i = 0; i < hwc->max_fn; i++) {
+ if (!hwc->sfs[i].allocated) {
+ hwc->sfs[i].usr_sfnum = usr_sfnum;
+ hwc->sfs[i].allocated = true;
+ return i;
}
}
- if (sw_id == -ENOSPC) {
- err = -ENOSPC;
+ return -ENOSPC;
+}
+
+static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
+{
+ struct mlx5_sf_hwc_table *hwc;
+
+ hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc->sfs[id].allocated = false;
+ hwc->sfs[id].pending_delete = false;
+}
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u16 hw_fn_id;
+ int sw_id;
+ int err;
+
+ if (!table)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&table->table_lock);
+ sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
+ if (sw_id < 0) {
+ err = sw_id;
goto exist_err;
}
- hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
- err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
+ err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
if (err)
goto err;
@@ -75,101 +131,161 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
if (err)
goto vhca_err;
+ if (controller) {
+ /* If this SF is for external controller, SF manager
+ * needs to arm firmware to receive the events.
+ */
+ err = mlx5_vhca_event_arm(dev, hw_fn_id);
+ if (err)
+ goto vhca_err;
+ }
+
mutex_unlock(&table->table_lock);
return sw_id;
vhca_err:
- mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err:
- table->sfs[i].allocated = false;
+ mlx5_sf_hw_table_id_free(table, controller, sw_id);
exist_err:
mutex_unlock(&table->table_lock);
return err;
}
-static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
{
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
u16 hw_fn_id;
- hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id);
- mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
- table->sfs[id].allocated = false;
- table->sfs[id].pending_delete = false;
+ mutex_lock(&table->table_lock);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
+ mlx5_cmd_dealloc_sf(dev, hw_fn_id);
+ mlx5_sf_hw_table_id_free(table, controller, id);
+ mutex_unlock(&table->table_lock);
}
-void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id)
+static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hwc_table *hwc, int idx)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
-
- mutex_lock(&table->table_lock);
- _mlx5_sf_hw_id_free(dev, id);
- mutex_unlock(&table->table_lock);
+ mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
+ hwc->sfs[idx].allocated = false;
+ hwc->sfs[idx].pending_delete = false;
}
-void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
{
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ struct mlx5_sf_hwc_table *hwc;
u16 hw_fn_id;
u8 state;
int err;
- hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
mutex_lock(&table->table_lock);
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
if (err)
goto err;
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
if (state == MLX5_VHCA_STATE_ALLOCATED) {
- mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
- table->sfs[id].allocated = false;
+ mlx5_cmd_dealloc_sf(dev, hw_fn_id);
+ hwc->sfs[id].allocated = false;
} else {
- table->sfs[id].pending_delete = true;
+ hwc->sfs[id].pending_delete = true;
}
err:
mutex_unlock(&table->table_lock);
}
-static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table)
+static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hwc_table *hwc)
{
int i;
- for (i = 0; i < table->max_local_functions; i++) {
- if (table->sfs[i].allocated)
- _mlx5_sf_hw_id_free(table->dev, i);
+ for (i = 0; i < hwc->max_fn; i++) {
+ if (hwc->sfs[i].allocated)
+ mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
}
}
+static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
+{
+ mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
+}
+
+static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
+{
+ struct mlx5_sf_hw *sfs;
+
+ if (!max_fn)
+ return 0;
+
+ sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
+ if (!sfs)
+ return -ENOMEM;
+
+ hwc->sfs = sfs;
+ hwc->max_fn = max_fn;
+ hwc->start_fn_id = base_id;
+ return 0;
+}
+
+static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
+{
+ kfree(hwc->sfs);
+}
+
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
- struct mlx5_sf_hw *sfs;
- int max_functions;
+ u16 max_ext_fn = 0;
+ u16 ext_base_id;
+ u16 max_fn = 0;
+ u16 base_id;
+ int err;
- if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev))
+ if (!mlx5_vhca_event_supported(dev))
+ return 0;
+
+ if (mlx5_sf_supported(dev))
+ max_fn = mlx5_sf_max_functions(dev);
+
+ err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
+ if (err)
+ return err;
+
+ if (!max_fn && !max_ext_fn)
return 0;
- max_functions = mlx5_sf_max_functions(dev);
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
- sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
- if (!sfs)
- goto table_err;
-
mutex_init(&table->table_lock);
table->dev = dev;
- table->sfs = sfs;
- table->max_local_functions = max_functions;
dev->priv.sf_hw_table = table;
- mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
+
+ base_id = mlx5_sf_start_function_id(dev);
+ err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
+ if (err)
+ goto table_err;
+
+ err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
+ max_ext_fn, ext_base_id);
+ if (err)
+ goto ext_err;
+
+ mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
return 0;
+ext_err:
+ mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
table_err:
+ mutex_destroy(&table->table_lock);
kfree(table);
- return -ENOMEM;
+ return err;
}
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
@@ -180,7 +296,8 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
return;
mutex_destroy(&table->table_lock);
- kfree(table->sfs);
+ mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
kfree(table);
}
@@ -188,21 +305,26 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
{
struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw;
u16 sw_id;
if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0;
- sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id);
- sf_hw = &table->sfs[sw_id];
+ hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
+ if (!hwc)
+ return 0;
+
+ sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
+ sf_hw = &hwc->sfs[sw_id];
mutex_lock(&table->table_lock);
/* SF driver notified through firmware that SF is finally detached.
* Hence recycle the sf hardware id for reuse.
*/
if (sf_hw->allocated && sf_hw->pending_delete)
- _mlx5_sf_hw_id_free(table->dev, sw_id);
+ mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
mutex_unlock(&table->table_lock);
return 0;
}
@@ -215,7 +337,7 @@ int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
return 0;
table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
- return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+ return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
}
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -225,7 +347,12 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */
- mlx5_sf_hw_dealloc_all(table);
+ mlx5_sf_hw_table_dealloc_all(table);
+}
+
+bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
+{
+ return !!dev->priv.sf_hw_table;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
index cb02a51d0986..7114f3fc335f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
@@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
-u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id);
+u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id);
-int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum);
-void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id);
-void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum);
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
+bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 3094d20297a9..2338989d4403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -71,8 +71,7 @@ static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
- int vf;
+ int err, vf, num_msix_count;
if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca;
@@ -85,12 +84,22 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
}
enable_vfs_hca:
+ num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
for (vf = 0; vf < num_vfs; vf++) {
err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
continue;
}
+
+ err = mlx5_set_msix_vec_count(dev, vf + 1, num_msix_count);
+ if (err) {
+ mlx5_core_warn(dev,
+ "failed to set MSI-X vector counts VF %d, err %d\n",
+ vf, err);
+ continue;
+ }
+
sriov->vfs_ctx[vf].enabled = 1;
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
err = sriov_restore_guids(dev, vf);
@@ -178,6 +187,41 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
return err ? err : num_vfs;
}
+int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count)
+{
+ struct pci_dev *pf = pci_physfn(vf);
+ struct mlx5_core_sriov *sriov;
+ struct mlx5_core_dev *dev;
+ int num_vf_msix, id;
+
+ dev = pci_get_drvdata(pf);
+ num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+ if (!num_vf_msix)
+ return -EOPNOTSUPP;
+
+ if (!msix_vec_count)
+ msix_vec_count =
+ mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf));
+
+ sriov = &dev->priv.sriov;
+
+ /* Reversed translation of PCI VF function number to the internal
+ * function_id, which exists in the name of virtfn symlink.
+ */
+ for (id = 0; id < pci_num_vf(pf); id++) {
+ if (!sriov->vfs_ctx[id].enabled)
+ continue;
+
+ if (vf->devfn == pci_iov_virtfn_devfn(pf, id))
+ break;
+ }
+
+ if (id == pci_num_vf(pf) || !sriov->vfs_ctx[id].enabled)
+ return -EINVAL;
+
+ return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count);
+}
+
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 28a7971cac6a..949879cf2092 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -313,8 +313,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
* table, since there is an *assumption* that in such case FW
* will recalculate the CS.
*/
- if (dest_action->dest_tbl.is_fw_tbl) {
- *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr;
+ if (dest_action->dest_tbl->is_fw_tbl) {
+ *final_icm_addr = dest_action->dest_tbl->fw_tbl.rx_icm_addr;
} else {
mlx5dr_dbg(dmn,
"Destination FT should be terminating when modify TTL is used\n");
@@ -326,8 +326,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
/* If destination is vport we will get the FW flow table
* that recalculates the CS and forwards to the vport.
*/
- ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
- dest_action->vport.caps->num,
+ ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+ dest_action->vport->caps->num,
final_icm_addr);
if (ret) {
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
@@ -369,6 +369,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type);
for (i = 0; i < num_actions; i++) {
+ struct mlx5dr_action_dest_tbl *dest_tbl;
struct mlx5dr_action *action;
int max_actions_type = 1;
u32 action_type;
@@ -382,37 +383,38 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_FT:
dest_action = action;
- if (!action->dest_tbl.is_fw_tbl) {
- if (action->dest_tbl.tbl->dmn != dmn) {
+ dest_tbl = action->dest_tbl;
+ if (!dest_tbl->is_fw_tbl) {
+ if (dest_tbl->tbl->dmn != dmn) {
mlx5dr_err(dmn,
"Destination table belongs to a different domain\n");
goto out_invalid_arg;
}
- if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+ if (dest_tbl->tbl->level <= matcher->tbl->level) {
mlx5_core_warn_once(dmn->mdev,
"Connecting table to a lower/same level destination table\n");
mlx5dr_dbg(dmn,
"Connecting table at level %d to a destination table at level %d\n",
matcher->tbl->level,
- action->dest_tbl.tbl->level);
+ dest_tbl->tbl->level);
}
attr.final_icm_addr = rx_rule ?
- action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
- action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr;
+ dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
+ dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
} else {
struct mlx5dr_cmd_query_flow_table_details output;
int ret;
/* get the relevant addresses */
- if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
+ if (!action->dest_tbl->fw_tbl.rx_icm_addr) {
ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
- action->dest_tbl.fw_tbl.type,
- action->dest_tbl.fw_tbl.id,
+ dest_tbl->fw_tbl.type,
+ dest_tbl->fw_tbl.id,
&output);
if (!ret) {
- action->dest_tbl.fw_tbl.tx_icm_addr =
+ dest_tbl->fw_tbl.tx_icm_addr =
output.sw_owner_icm_root_1;
- action->dest_tbl.fw_tbl.rx_icm_addr =
+ dest_tbl->fw_tbl.rx_icm_addr =
output.sw_owner_icm_root_0;
} else {
mlx5dr_err(dmn,
@@ -422,50 +424,50 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
}
attr.final_icm_addr = rx_rule ?
- action->dest_tbl.fw_tbl.rx_icm_addr :
- action->dest_tbl.fw_tbl.tx_icm_addr;
+ dest_tbl->fw_tbl.rx_icm_addr :
+ dest_tbl->fw_tbl.tx_icm_addr;
}
break;
case DR_ACTION_TYP_QP:
mlx5dr_info(dmn, "Domain doesn't support QP\n");
goto out_invalid_arg;
case DR_ACTION_TYP_CTR:
- attr.ctr_id = action->ctr.ctr_id +
- action->ctr.offeset;
+ attr.ctr_id = action->ctr->ctr_id +
+ action->ctr->offeset;
break;
case DR_ACTION_TYP_TAG:
- attr.flow_tag = action->flow_tag;
+ attr.flow_tag = action->flow_tag->flow_tag;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- attr.decap_index = action->rewrite.index;
- attr.decap_actions = action->rewrite.num_of_actions;
+ attr.decap_index = action->rewrite->index;
+ attr.decap_actions = action->rewrite->num_of_actions;
attr.decap_with_vlan =
attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
break;
case DR_ACTION_TYP_MODIFY_HDR:
- attr.modify_index = action->rewrite.index;
- attr.modify_actions = action->rewrite.num_of_actions;
- recalc_cs_required = action->rewrite.modify_ttl &&
+ attr.modify_index = action->rewrite->index;
+ attr.modify_actions = action->rewrite->num_of_actions;
+ recalc_cs_required = action->rewrite->modify_ttl &&
!mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
- attr.reformat_size = action->reformat.reformat_size;
- attr.reformat_id = action->reformat.reformat_id;
+ attr.reformat_size = action->reformat->reformat_size;
+ attr.reformat_id = action->reformat->reformat_id;
break;
case DR_ACTION_TYP_VPORT:
- attr.hit_gvmi = action->vport.caps->vhca_gvmi;
+ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
if (rx_rule) {
/* Loopback on WIRE vport is not supported */
- if (action->vport.caps->num == WIRE_PORT)
+ if (action->vport->caps->num == WIRE_PORT)
goto out_invalid_arg;
- attr.final_icm_addr = action->vport.caps->icm_address_rx;
+ attr.final_icm_addr = action->vport->caps->icm_address_rx;
} else {
- attr.final_icm_addr = action->vport.caps->icm_address_tx;
+ attr.final_icm_addr = action->vport->caps->icm_address_tx;
}
break;
case DR_ACTION_TYP_POP_VLAN:
@@ -477,7 +479,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (attr.vlans.count == MLX5DR_MAX_VLANS)
return -EINVAL;
- attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
+ attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
break;
default:
goto out_invalid_arg;
@@ -530,17 +532,37 @@ out_invalid_arg:
return -EINVAL;
}
+static unsigned int action_size[DR_ACTION_TYP_MAX] = {
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = sizeof(struct mlx5dr_action_rewrite),
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_FT] = sizeof(struct mlx5dr_action_dest_tbl),
+ [DR_ACTION_TYP_CTR] = sizeof(struct mlx5dr_action_ctr),
+ [DR_ACTION_TYP_TAG] = sizeof(struct mlx5dr_action_flow_tag),
+ [DR_ACTION_TYP_MODIFY_HDR] = sizeof(struct mlx5dr_action_rewrite),
+ [DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport),
+ [DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan),
+};
+
static struct mlx5dr_action *
dr_action_create_generic(enum mlx5dr_action_type action_type)
{
struct mlx5dr_action *action;
+ int extra_size;
+
+ if (action_type < DR_ACTION_TYP_MAX)
+ extra_size = action_size[action_type];
+ else
+ return NULL;
- action = kzalloc(sizeof(*action), GFP_KERNEL);
+ action = kzalloc(sizeof(*action) + extra_size, GFP_KERNEL);
if (!action)
return NULL;
action->action_type = action_type;
refcount_set(&action->refcount, 1);
+ action->data = action + 1;
return action;
}
@@ -559,10 +581,10 @@ mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
if (!action)
return NULL;
- action->dest_tbl.is_fw_tbl = true;
- action->dest_tbl.fw_tbl.dmn = dmn;
- action->dest_tbl.fw_tbl.id = table_num;
- action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+ action->dest_tbl->is_fw_tbl = true;
+ action->dest_tbl->fw_tbl.dmn = dmn;
+ action->dest_tbl->fw_tbl.id = table_num;
+ action->dest_tbl->fw_tbl.type = FS_FT_FDB;
refcount_inc(&dmn->refcount);
return action;
@@ -579,7 +601,7 @@ mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
if (!action)
goto dec_ref;
- action->dest_tbl.tbl = tbl;
+ action->dest_tbl->tbl = tbl;
return action;
@@ -624,12 +646,12 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
case DR_ACTION_TYP_VPORT:
hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- hw_dests[i].vport.num = dest_action->vport.caps->num;
- hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi;
+ hw_dests[i].vport.num = dest_action->vport->caps->num;
+ hw_dests[i].vport.vhca_id = dest_action->vport->caps->vhca_gvmi;
if (reformat_action) {
reformat_req = true;
hw_dests[i].vport.reformat_id =
- reformat_action->reformat.reformat_id;
+ reformat_action->reformat->reformat_id;
ref_actions[num_of_ref++] = reformat_action;
hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@@ -637,10 +659,10 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
case DR_ACTION_TYP_FT:
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- if (dest_action->dest_tbl.is_fw_tbl)
- hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id;
+ if (dest_action->dest_tbl->is_fw_tbl)
+ hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
else
- hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id;
+ hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
break;
default:
@@ -657,8 +679,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
hw_dests,
num_of_dests,
reformat_req,
- &action->dest_tbl.fw_tbl.id,
- &action->dest_tbl.fw_tbl.group_id);
+ &action->dest_tbl->fw_tbl.id,
+ &action->dest_tbl->fw_tbl.group_id);
if (ret)
goto free_action;
@@ -667,11 +689,11 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
for (i = 0; i < num_of_ref; i++)
refcount_inc(&ref_actions[i]->refcount);
- action->dest_tbl.is_fw_tbl = true;
- action->dest_tbl.fw_tbl.dmn = dmn;
- action->dest_tbl.fw_tbl.type = FS_FT_FDB;
- action->dest_tbl.fw_tbl.ref_actions = ref_actions;
- action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref;
+ action->dest_tbl->is_fw_tbl = true;
+ action->dest_tbl->fw_tbl.dmn = dmn;
+ action->dest_tbl->fw_tbl.type = FS_FT_FDB;
+ action->dest_tbl->fw_tbl.ref_actions = ref_actions;
+ action->dest_tbl->fw_tbl.num_of_ref_actions = num_of_ref;
kfree(hw_dests);
@@ -696,10 +718,10 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
if (!action)
return NULL;
- action->dest_tbl.is_fw_tbl = 1;
- action->dest_tbl.fw_tbl.type = ft->type;
- action->dest_tbl.fw_tbl.id = ft->id;
- action->dest_tbl.fw_tbl.dmn = dmn;
+ action->dest_tbl->is_fw_tbl = 1;
+ action->dest_tbl->fw_tbl.type = ft->type;
+ action->dest_tbl->fw_tbl.id = ft->id;
+ action->dest_tbl->fw_tbl.dmn = dmn;
refcount_inc(&dmn->refcount);
@@ -715,7 +737,7 @@ mlx5dr_action_create_flow_counter(u32 counter_id)
if (!action)
return NULL;
- action->ctr.ctr_id = counter_id;
+ action->ctr->ctr_id = counter_id;
return action;
}
@@ -728,7 +750,7 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
if (!action)
return NULL;
- action->flow_tag = tag_value & 0xffffff;
+ action->flow_tag->flow_tag = tag_value & 0xffffff;
return action;
}
@@ -794,8 +816,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
if (ret)
return ret;
- action->reformat.reformat_id = reformat_id;
- action->reformat.reformat_size = data_sz;
+ action->reformat->reformat_id = reformat_id;
+ action->reformat->reformat_size = data_sz;
return 0;
}
case DR_ACTION_TYP_TNL_L2_TO_L2:
@@ -811,28 +833,28 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
data, data_sz,
hw_actions,
ACTION_CACHE_LINE_SIZE,
- &action->rewrite.num_of_actions);
+ &action->rewrite->num_of_actions);
if (ret) {
mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
return ret;
}
- action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
- DR_CHUNK_SIZE_8);
- if (!action->rewrite.chunk) {
+ action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
+ DR_CHUNK_SIZE_8);
+ if (!action->rewrite->chunk) {
mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
return -ENOMEM;
}
- action->rewrite.data = (void *)hw_actions;
- action->rewrite.index = (action->rewrite.chunk->icm_addr -
+ action->rewrite->data = (void *)hw_actions;
+ action->rewrite->index = (action->rewrite->chunk->icm_addr -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
- mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
return ret;
}
return 0;
@@ -867,7 +889,7 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
if (!action)
return NULL;
- action->push_vlan.vlan_hdr = vlan_hdr_h;
+ action->push_vlan->vlan_hdr = vlan_hdr_h;
return action;
}
@@ -898,7 +920,7 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
if (!action)
goto dec_ref;
- action->reformat.dmn = dmn;
+ action->reformat->dmn = dmn;
ret = dr_action_create_reformat_action(dmn,
data_sz,
@@ -1104,17 +1126,17 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
- action->rewrite.allow_rx = 0;
+ action->rewrite->allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
sw_field);
return -EINVAL;
}
} else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
- action->rewrite.allow_tx = 0;
+ action->rewrite->allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
sw_field);
@@ -1122,7 +1144,7 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
}
}
- if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
return -EINVAL;
}
@@ -1135,7 +1157,7 @@ dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
@@ -1153,7 +1175,7 @@ static int
dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
u16 sw_fields[2];
int i;
@@ -1162,14 +1184,14 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
for (i = 0; i < 2; i++) {
if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
- action->rewrite.allow_rx = 0;
+ action->rewrite->allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
sw_fields[i]);
return -EINVAL;
}
} else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
- action->rewrite.allow_tx = 0;
+ action->rewrite->allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
sw_fields[i]);
@@ -1178,7 +1200,7 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
}
}
- if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
return -EINVAL;
}
@@ -1190,7 +1212,7 @@ static int
dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
u8 action_type;
int ret;
@@ -1239,7 +1261,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
{
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
@@ -1249,8 +1271,8 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
*modify_ttl = false;
- action->rewrite.allow_rx = 1;
- action->rewrite.allow_tx = 1;
+ action->rewrite->allow_rx = 1;
+ action->rewrite->allow_tx = 1;
for (i = 0; i < num_sw_actions; i++) {
sw_action = &sw_actions[i];
@@ -1358,13 +1380,13 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
if (ret)
goto free_hw_actions;
- action->rewrite.chunk = chunk;
- action->rewrite.modify_ttl = modify_ttl;
- action->rewrite.data = (u8 *)hw_actions;
- action->rewrite.num_of_actions = num_hw_actions;
- action->rewrite.index = (chunk->icm_addr -
- dmn->info.caps.hdr_modify_icm_addr) /
- ACTION_CACHE_LINE_SIZE;
+ action->rewrite->chunk = chunk;
+ action->rewrite->modify_ttl = modify_ttl;
+ action->rewrite->data = (u8 *)hw_actions;
+ action->rewrite->num_of_actions = num_hw_actions;
+ action->rewrite->index = (chunk->icm_addr -
+ dmn->info.caps.hdr_modify_icm_addr) /
+ ACTION_CACHE_LINE_SIZE;
ret = mlx5dr_send_postsend_action(dmn, action);
if (ret)
@@ -1399,7 +1421,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
if (!action)
goto dec_ref;
- action->rewrite.dmn = dmn;
+ action->rewrite->dmn = dmn;
ret = dr_action_create_modify_action(dmn,
actions_sz,
@@ -1451,8 +1473,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
if (!action)
return NULL;
- action->vport.dmn = vport_dmn;
- action->vport.caps = vport_cap;
+ action->vport->dmn = vport_dmn;
+ action->vport->caps = vport_cap;
return action;
}
@@ -1464,44 +1486,44 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
switch (action->action_type) {
case DR_ACTION_TYP_FT:
- if (action->dest_tbl.is_fw_tbl)
- refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount);
+ if (action->dest_tbl->is_fw_tbl)
+ refcount_dec(&action->dest_tbl->fw_tbl.dmn->refcount);
else
- refcount_dec(&action->dest_tbl.tbl->refcount);
+ refcount_dec(&action->dest_tbl->tbl->refcount);
- if (action->dest_tbl.is_fw_tbl &&
- action->dest_tbl.fw_tbl.num_of_ref_actions) {
+ if (action->dest_tbl->is_fw_tbl &&
+ action->dest_tbl->fw_tbl.num_of_ref_actions) {
struct mlx5dr_action **ref_actions;
int i;
- ref_actions = action->dest_tbl.fw_tbl.ref_actions;
- for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++)
+ ref_actions = action->dest_tbl->fw_tbl.ref_actions;
+ for (i = 0; i < action->dest_tbl->fw_tbl.num_of_ref_actions; i++)
refcount_dec(&ref_actions[i]->refcount);
kfree(ref_actions);
- mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn,
- action->dest_tbl.fw_tbl.id,
- action->dest_tbl.fw_tbl.group_id);
+ mlx5dr_fw_destroy_md_tbl(action->dest_tbl->fw_tbl.dmn,
+ action->dest_tbl->fw_tbl.id,
+ action->dest_tbl->fw_tbl.group_id);
}
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
- refcount_dec(&action->reformat.dmn->refcount);
+ refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- mlx5dr_icm_free_chunk(action->rewrite.chunk);
- refcount_dec(&action->reformat.dmn->refcount);
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ refcount_dec(&action->rewrite->dmn->refcount);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
- mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev,
- action->reformat.reformat_id);
- refcount_dec(&action->reformat.dmn->refcount);
+ mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
+ action->reformat->reformat_id);
+ refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_MODIFY_HDR:
- mlx5dr_icm_free_chunk(action->rewrite.chunk);
- kfree(action->rewrite.data);
- refcount_dec(&action->rewrite.dmn->refcount);
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ kfree(action->rewrite->data);
+ refcount_dec(&action->rewrite->dmn->refcount);
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 30b0136b5bc7..5970cb8fc0c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -85,15 +85,53 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
return 0;
}
+static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
+ u16 vport, bool *roce_en)
+{
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
+ int err;
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *roce_en = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.roce_en);
+ return 0;
+}
+
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps)
{
+ bool roce_en;
+ int err;
+
caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
+ if (MLX5_CAP_GEN(mdev, roce)) {
+ err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
+ if (err)
+ return err;
+
+ caps->roce_caps.roce_en = roce_en;
+ caps->roce_caps.fl_rc_qp_when_roce_disabled =
+ MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
+ caps->roce_caps.fl_rc_qp_when_roce_enabled =
+ MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
+ }
+
+ caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
+
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
@@ -106,6 +144,34 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
}
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+ caps->flex_parser_id_geneve_tlv_option_0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+ caps->flex_parser_id_mpls_over_gre =
+ MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
+
+ if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ caps->flex_parser_id_mpls_over_udp =
+ MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
+ caps->flex_parser_id_gtpu_dw_0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
+ caps->flex_parser_id_gtpu_teid =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
+ caps->flex_parser_id_gtpu_dw_2 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
+ caps->flex_parser_id_gtpu_first_ext_dw_0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
+
caps->nic_rx_drop_address =
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
caps->nic_tx_drop_address =
@@ -287,7 +353,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
u32 *in;
int err;
- in = kzalloc(inlen, GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -302,7 +368,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
*group_id = MLX5_GET(create_flow_group_out, out, group_id);
out:
- kfree(in);
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 15673cd10039..6f6191d1d5a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -92,15 +92,17 @@ static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
misc->gre_k_present || misc->gre_s_present);
}
-#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET(_misc2, gre_udp) ( \
- (_misc2).outer_first_mpls_over_##gre_udp##_label || \
- (_misc2).outer_first_mpls_over_##gre_udp##_exp || \
- (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \
- (_misc2).outer_first_mpls_over_##gre_udp##_ttl)
-
-#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \
- DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
- DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
+#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
static bool
dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
@@ -133,6 +135,11 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
misc->geneve_opt_len;
}
+static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
+{
+ return misc3->geneve_tlv_option_0_data;
+}
+
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
@@ -148,6 +155,109 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
dr_matcher_supp_tnl_geneve(&dmn->info.caps);
}
+static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
+{
+ return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
+}
+
+static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
+ dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_dw_0 &&
+ dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_teid &&
+ dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_dw_2 &&
+ dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_first_ext_dw_0 &&
+ dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
+}
+
+static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+ return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
+ dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
+ (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
+ dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
+ (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
+ dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
+ (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
+ dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
+}
+
+static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+ return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
+ dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
+ (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
+ dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
+ (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
+ dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
+ (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
+ dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
+}
+
+static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
+ dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
+ dr_mask_is_tnl_gtpu(mask, dmn);
+}
+
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
@@ -199,6 +309,65 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
return (misc->source_sqn || misc->source_port);
}
+static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
+ u32 flex_parser_value)
+{
+ if (flex_parser_id)
+ return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
+
+ /* Using flex_parser 0 means that id is zero, thus value must be set. */
+ return flex_parser_value;
+}
+
+static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
+{
+ return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
+ misc4->prog_sample_field_value_0) ||
+ dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
+ misc4->prog_sample_field_value_1) ||
+ dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
+ misc4->prog_sample_field_value_2) ||
+ dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
+ misc4->prog_sample_field_value_3));
+}
+
+static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
+{
+ return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
+ flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
+}
+
+static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
+{
+ return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
+ dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
+ dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
+ dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
+}
+
+static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
+}
+
+static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
+ dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
+}
+
+static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
+ dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
+}
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
@@ -251,6 +420,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
mask.misc3 = matcher->mask.misc3;
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
+ mask.misc4 = matcher->mask.misc4;
+
ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
&matcher->mask, NULL);
if (ret)
@@ -321,9 +493,28 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
&mask, inner, rx);
- else if (dr_mask_is_tnl_geneve(&mask, dmn))
+ else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
&mask, inner, rx);
+ if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
+ mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ } else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
+ if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
+ mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+
+ if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
+ mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+
+ if (dr_mask_is_tnl_gtpu(&mask, dmn))
+ mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
+ }
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
@@ -333,17 +524,20 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
- &mask, inner, rx);
+ if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+
+ if (dr_mask_is_icmp(&mask, dmn))
+ mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
- if (dr_mask_is_icmp(&mask, dmn)) {
- ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
- &mask, &dmn->info.caps,
- inner, rx);
- if (ret)
- return ret;
- }
if (dr_mask_is_tnl_gre_set(&mask.misc))
mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
&mask, inner, rx);
@@ -404,10 +598,26 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
- &mask, inner, rx);
+ if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
}
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+ if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
+ mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
+ &mask, false, rx);
+
+ if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
+ mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
+ &mask, false, rx);
+ }
+
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b337d6626bff..43356fad53de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -952,6 +952,17 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false;
}
}
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc4);
+ e_idx = min(s_idx + sizeof(param->misc4), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_err(matcher->tbl->dmn,
+ "Rule misc4 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 8a6a56f9dc4e..12cf323a5943 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr {
u8 min_rnr_timer;
u8 sgid_index;
u16 udp_src_port;
+ u8 fl:1;
};
struct dr_qp_rts_attr {
@@ -45,6 +46,7 @@ struct dr_qp_init_attr {
u32 pdn;
u32 max_send_wr;
struct mlx5_uars_page *uar;
+ u8 isolate_vl_tc:1;
};
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
@@ -157,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
MLX5_SET(qpc, qpc, pd, attr->pdn);
MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
MLX5_SET(qpc, qpc, log_page_size,
@@ -213,7 +216,7 @@ static void dr_destroy_qp(struct mlx5_core_dev *mdev,
static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
{
dma_wmb();
- *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff);
+ *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
/* After wmb() the hw aware of new work */
wmb();
@@ -223,7 +226,7 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
u32 rkey, struct dr_data_seg *data_seg,
- u32 opcode, int nreq)
+ u32 opcode, bool notify_hw)
{
struct mlx5_wqe_raddr_seg *wq_raddr;
struct mlx5_wqe_ctrl_seg *wq_ctrl;
@@ -255,16 +258,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
- if (nreq)
+ if (notify_hw)
dr_cmd_notify_hw(dr_qp, wq_ctrl);
}
static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
{
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
- &send_info->write, MLX5_OPCODE_RDMA_WRITE, 0);
+ &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
- &send_info->read, MLX5_OPCODE_RDMA_READ, 1);
+ &send_info->read, MLX5_OPCODE_RDMA_READ, true);
}
/**
@@ -406,7 +409,7 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
alloc_size = *num_stes * DR_STE_SIZE;
}
- *data = kzalloc(alloc_size, GFP_KERNEL);
+ *data = kvzalloc(alloc_size, GFP_KERNEL);
if (!*data)
return -ENOMEM;
@@ -505,7 +508,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
}
out_free:
- kfree(data);
+ kvfree(data);
return ret;
}
@@ -562,7 +565,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
}
out_free:
- kfree(data);
+ kvfree(data);
return ret;
}
@@ -572,12 +575,12 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct postsend_info send_info = {};
int ret;
- send_info.write.addr = (uintptr_t)action->rewrite.data;
- send_info.write.length = action->rewrite.num_of_actions *
+ send_info.write.addr = (uintptr_t)action->rewrite->data;
+ send_info.write.length = action->rewrite->num_of_actions *
DR_MODIFY_ACTION_SIZE;
send_info.write.lkey = 0;
- send_info.remote_addr = action->rewrite.chunk->mr_addr;
- send_info.rkey = action->rewrite.chunk->rkey;
+ send_info.remote_addr = action->rewrite->chunk->mr_addr;
+ send_info.rkey = action->rewrite->chunk->rkey;
ret = dr_postsend_icm_data(dmn, &send_info);
@@ -650,6 +653,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
attr->udp_src_port);
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
+ MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
MLX5_SET(qpc, qpc, min_rnr_nak, 1);
MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
@@ -658,6 +662,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
+static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
+{
+ /* Check whether RC RoCE QP creation with force loopback is allowed.
+ * There are two separate capability bits for this:
+ * - force loopback when RoCE is enabled
+ * - force loopback when RoCE is disabled
+ */
+ return ((caps->roce_caps.roce_en &&
+ caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
+ (!caps->roce_caps.roce_en &&
+ caps->roce_caps.fl_rc_qp_when_roce_disabled));
+}
+
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
{
struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
@@ -676,17 +693,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
}
/* RTR */
- ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
- if (ret)
- return ret;
-
rtr_attr.mtu = mtu;
rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port;
- rtr_attr.sgid_index = gid_index;
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
+ /* If QP creation with force loopback is allowed, then there
+ * is no need for GID index when creating the QP.
+ * Otherwise we query GID attributes and use GID index.
+ */
+ rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
+ if (!rtr_attr.fl) {
+ ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
+ &rtr_attr.dgid_attr);
+ if (ret)
+ return ret;
+
+ rtr_attr.sgid_index = gid_index;
+ }
+
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
if (ret) {
mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
@@ -900,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE;
+
+ /* Isolated VL is applicable only if force loopback is supported */
+ if (dr_send_allow_fl(&dmn->info.caps))
+ init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
+
spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index f49abc7a4b9b..9b1529137cba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -852,6 +852,35 @@ static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
+ spec->geneve_tlv_option_0_data =
+ MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
+ spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
+ spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
+ spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
+ spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
+ spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
+ spec->gtpu_first_ext_dw_0 =
+ MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
+}
+
+static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
+{
+ spec->prog_sample_field_id_0 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
+ spec->prog_sample_field_value_0 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
+ spec->prog_sample_field_id_1 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
+ spec->prog_sample_field_value_1 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
+ spec->prog_sample_field_id_2 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
+ spec->prog_sample_field_value_2 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
+ spec->prog_sample_field_id_3 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
+ spec->prog_sample_field_value_3 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
}
void mlx5dr_ste_copy_param(u8 match_criteria,
@@ -925,6 +954,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
dr_ste_copy_mask_misc3(buff, &set_param->misc3);
}
+
+ param_location += sizeof(struct mlx5dr_match_misc3);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc4)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc4(buff, &set_param->misc4);
+ }
}
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1051,26 +1094,40 @@ void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_gre_init(sb, mask);
}
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->caps = caps;
+ return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
- ste_ctx->build_tnl_mpls_init(sb, mask);
+ sb->caps = caps;
+ return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
}
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx)
+void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
- return ste_ctx->build_icmp_init(sb, mask);
+ ste_ctx->build_icmp_init(sb, mask);
}
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1113,6 +1170,52 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_init(sb, mask);
}
+void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_tnl_gtpu_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
+}
+
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -1148,6 +1251,26 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_src_gvmi_qpn_init(sb, mask);
}
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_flex_parser_0_init(sb, mask);
+}
+
+void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_flex_parser_1_init(sb, mask);
+}
+
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 06bcb0ee8f96..992b591bf0c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -62,6 +62,13 @@
in_out##_first_mpls_ttl); \
} while (0)
+#define DR_STE_SET_FLEX_PARSER_FIELD(tag, fname, caps, spec) do { \
+ u8 parser_id = (caps)->flex_parser_id_##fname; \
+ u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); \
+ *(__be32 *)parser_ptr = cpu_to_be32((spec)->fname);\
+ (spec)->fname = 0;\
+} while (0)
+
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc)->outer_first_mpls_over_gre_label || \
(_misc)->outer_first_mpls_over_gre_exp || \
@@ -86,8 +93,22 @@ enum dr_ste_action_modify_type_l4 {
DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
};
+enum {
+ HDR_MPLS_OFFSET_LABEL = 12,
+ HDR_MPLS_OFFSET_EXP = 9,
+ HDR_MPLS_OFFSET_S_BOS = 8,
+ HDR_MPLS_OFFSET_TTL = 0,
+};
+
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
+static inline u8 *
+dr_ste_calc_flex_parser_offset(u8 *tag, u8 parser_id)
+{
+ /* Calculate tag byte offset based on flex parser id */
+ return tag + 4 * (3 - (parser_id % 4));
+}
+
#define DR_STE_CTX_BUILDER(fname) \
((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
struct mlx5dr_match_param *mask))
@@ -106,14 +127,22 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(mpls);
void DR_STE_CTX_BUILDER(tnl_gre);
void DR_STE_CTX_BUILDER(tnl_mpls);
- int DR_STE_CTX_BUILDER(icmp);
+ void DR_STE_CTX_BUILDER(tnl_mpls_over_gre);
+ void DR_STE_CTX_BUILDER(tnl_mpls_over_udp);
+ void DR_STE_CTX_BUILDER(icmp);
void DR_STE_CTX_BUILDER(general_purpose);
void DR_STE_CTX_BUILDER(eth_l4_misc);
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve);
+ void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
+ void DR_STE_CTX_BUILDER(flex_parser_0);
+ void DR_STE_CTX_BUILDER(flex_parser_1);
+ void DR_STE_CTX_BUILDER(tnl_gtpu);
+ void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
+ void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
/* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 9ec079247c4b..0757a4e8540e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -331,7 +331,7 @@ static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
DR_STE_ACTION_TYPE_PUSH_VLAN);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
- /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ /* Due to HW limitation we need to set this bit, otherwise reformat +
* push vlan will not work.
*/
if (go_back)
@@ -1248,32 +1248,29 @@ dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+ u32 mpls_hdr;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2, outer_first_mpls_over_gre_ttl);
+ mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+ misc_2->outer_first_mpls_over_gre_label = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+ misc_2->outer_first_mpls_over_gre_exp = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc_2->outer_first_mpls_over_gre_s_bos = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+ misc_2->outer_first_mpls_over_gre_ttl = 0;
} else {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2, outer_first_mpls_over_udp_ttl);
+ mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+ misc_2->outer_first_mpls_over_udp_label = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+ misc_2->outer_first_mpls_over_udp_exp = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc_2->outer_first_mpls_over_udp_s_bos = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+ misc_2->outer_first_mpls_over_udp_ttl = 0;
}
+
+ MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
return 0;
}
@@ -1288,6 +1285,91 @@ dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
}
+static int
+dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_udp_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_udp_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_udp_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_udp_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_udp;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_gre_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_gre_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_gre_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_gre_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_gre;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
+}
+
#define ICMP_TYPE_OFFSET_FIRST_DW 24
#define ICMP_CODE_OFFSET_FIRST_DW 16
@@ -1300,9 +1382,11 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
u32 *icmp_header_data;
int dw0_location;
int dw1_location;
+ u8 *parser_ptr;
u8 *icmp_type;
u8 *icmp_code;
bool is_ipv4;
+ u32 icmp_hdr;
is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
if (is_ipv4) {
@@ -1319,47 +1403,40 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
}
- switch (dw0_location) {
- case 4:
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
- (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
-
- *icmp_type = 0;
- *icmp_code = 0;
- break;
- default:
- return -EINVAL;
- }
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
+ icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
+ (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
+ *(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
+ *icmp_code = 0;
+ *icmp_type = 0;
- switch (dw1_location) {
- case 5:
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
- *icmp_header_data);
- *icmp_header_data = 0;
- break;
- default:
- return -EINVAL;
- }
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
+ *(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
+ *icmp_header_data = 0;
return 0;
}
-static int
+static void
dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
- int ret;
+ u8 parser_id;
+ bool is_ipv4;
- ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
- if (ret)
- return ret;
+ dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
- sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
+ parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
+ sb->caps->flex_parser_id_icmpv6_dw0;
+ sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
-
- return 0;
}
static int
@@ -1595,6 +1672,185 @@ dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
}
+static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
+ u32 *misc4_field_value,
+ bool *parser_is_used,
+ u8 *tag)
+{
+ u32 id = *misc4_field_id;
+ u8 *parser_ptr;
+
+ if (parser_is_used[id])
+ return;
+
+ parser_is_used[id] = true;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
+
+ *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
+ *misc4_field_id = 0;
+ *misc4_field_value = 0;
+}
+
+static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
+ bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
+ &misc_4_mask->prog_sample_field_value_0,
+ parser_is_used, tag);
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
+ &misc_4_mask->prog_sample_field_value_1,
+ parser_is_used, tag);
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
+ &misc_4_mask->prog_sample_field_value_2,
+ parser_is_used, tag);
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
+ &misc_4_mask->prog_sample_field_value_3,
+ parser_is_used, tag);
+
+ return 0;
+}
+
+static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
+}
+
+static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+ u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+
+ MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
+ misc3->geneve_tlv_option_0_data);
+ misc3->geneve_tlv_option_0_data = 0;
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
+}
+
+static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+ gtpu_msg_flags, misc3,
+ gtpu_msg_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+ gtpu_msg_type, misc3,
+ gtpu_msg_type);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+ gtpu_teid, misc3,
+ gtpu_teid);
+
+ return 0;
+}
+
+static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
+}
+
struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
@@ -1609,14 +1865,22 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.build_mpls_init = &dr_ste_v0_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v0_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v0_build_tnl_mpls_over_gre_init,
.build_icmp_init = &dr_ste_v0_build_icmp_init,
.build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_register_0_init = &dr_ste_v0_build_register_0_init,
.build_register_1_init = &dr_ste_v0_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
/* Getters and Setters */
.ste_init = &dr_ste_v0_init,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 9143ec326ebf..054c2e2b6554 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -437,21 +437,6 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p,
- u8 *s_action,
- u16 decap_actions,
- u32 decap_index)
-{
- MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
- DR_STE_V1_ACTION_ID_MODIFY_LIST);
- MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
- decap_actions);
- MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
- decap_index);
-
- dr_ste_v1_set_reparse(hw_ste_p);
-}
-
static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
u8 *s_action,
u16 num_of_actions,
@@ -571,9 +556,6 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
bool allow_ctr = true;
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
- dr_ste_v1_set_rx_decap_l3(last_ste, action,
- attr->decap_actions,
- attr->decap_index);
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->decap_actions,
attr->decap_index);
@@ -1324,6 +1306,88 @@ static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
}
+static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_udp_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_udp_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_udp_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_udp_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_udp;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
+}
+
+static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_gre_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_gre_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_gre_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_gre_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_gre;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
+}
+
static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@@ -1355,16 +1419,14 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
return 0;
}
-static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
-
- return 0;
}
static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
@@ -1532,6 +1594,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
+ misc_mask->source_eswitch_owner_vhca_id = 0;
}
static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
@@ -1588,6 +1651,179 @@ static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
}
+static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
+ u32 *misc4_field_value,
+ bool *parser_is_used,
+ u8 *tag)
+{
+ u32 id = *misc4_field_id;
+ u8 *parser_ptr;
+
+ if (parser_is_used[id])
+ return;
+
+ parser_is_used[id] = true;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
+
+ *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
+ *misc4_field_id = 0;
+ *misc4_field_value = 0;
+}
+
+static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
+ bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
+ &misc_4_mask->prog_sample_field_value_0,
+ parser_is_used, tag);
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
+ &misc_4_mask->prog_sample_field_value_1,
+ parser_is_used, tag);
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
+ &misc_4_mask->prog_sample_field_value_2,
+ parser_is_used, tag);
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
+ &misc_4_mask->prog_sample_field_value_3,
+ parser_is_used, tag);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+ dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
+}
+
+static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
+ dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+ u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+
+ MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
+ misc3->geneve_tlv_option_0_data);
+ misc3->geneve_tlv_option_0_data = 0;
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
+}
+
+static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
+}
+
+static int
+dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
+}
+
+static int
+dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
+}
+
struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
@@ -1602,14 +1838,23 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.build_mpls_init = &dr_ste_v1_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
.build_icmp_init = &dr_ste_v1_build_icmp_init,
.build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
/* Getters and Setters */
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index b599b6beb5b9..30ae3cda6d2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -29,7 +29,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
last_htbl = tbl->rx.s_anchor;
tbl->rx.default_icm_addr = action ?
- action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
+ action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
tbl->rx.nic_dmn->default_icm_addr;
info.type = CONNECT_MISS;
@@ -53,7 +53,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
last_htbl = tbl->tx.s_anchor;
tbl->tx.default_icm_addr = action ?
- action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr :
+ action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr :
tbl->tx.nic_dmn->default_icm_addr;
info.type = CONNECT_MISS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 4af0e4e6a13c..67460c42a99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -12,17 +12,30 @@
#include "mlx5_ifc_dr.h"
#include "mlx5dr.h"
-#define DR_RULE_MAX_STES 17
+#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
+#define DR_NUM_OF_FLEX_PARSERS 8
+#define DR_STE_MAX_FLEX_0_ID 3
+#define DR_STE_MAX_FLEX_1_ID 7
#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
+static inline bool dr_is_flex_parser_0_id(u8 parser_id)
+{
+ return parser_id <= DR_STE_MAX_FLEX_0_ID;
+}
+
+static inline bool dr_is_flex_parser_1_id(u8 parser_id)
+{
+ return parser_id > DR_STE_MAX_FLEX_0_ID;
+}
+
enum mlx5dr_icm_chunk_size {
DR_CHUNK_SIZE_1,
DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
@@ -87,7 +100,8 @@ enum mlx5dr_matcher_criteria {
DR_MATCHER_CRITERIA_INNER = 1 << 2,
DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
- DR_MATCHER_CRITERIA_MAX = 1 << 5,
+ DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
+ DR_MATCHER_CRITERIA_MAX = 1 << 6,
};
enum mlx5dr_action_type {
@@ -389,11 +403,21 @@ void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx);
+void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -402,6 +426,25 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -419,6 +462,14 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
/* Actions utils */
@@ -646,7 +697,24 @@ struct mlx5dr_match_misc3 {
u8 icmpv6_type;
u8 icmpv4_code;
u8 icmpv4_type;
- u8 reserved_auto3[0x1c];
+ u32 geneve_tlv_option_0_data;
+ u8 gtpu_msg_flags;
+ u8 gtpu_msg_type;
+ u32 gtpu_teid;
+ u32 gtpu_dw_2;
+ u32 gtpu_first_ext_dw_0;
+ u32 gtpu_dw_0;
+};
+
+struct mlx5dr_match_misc4 {
+ u32 prog_sample_field_value_0;
+ u32 prog_sample_field_id_0;
+ u32 prog_sample_field_value_1;
+ u32 prog_sample_field_id_1;
+ u32 prog_sample_field_value_2;
+ u32 prog_sample_field_id_2;
+ u32 prog_sample_field_value_3;
+ u32 prog_sample_field_id_3;
};
struct mlx5dr_match_param {
@@ -655,6 +723,7 @@ struct mlx5dr_match_param {
struct mlx5dr_match_spec inner;
struct mlx5dr_match_misc2 misc2;
struct mlx5dr_match_misc3 misc3;
+ struct mlx5dr_match_misc4 misc4;
};
#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
@@ -678,6 +747,12 @@ struct mlx5dr_cmd_vport_cap {
u32 num;
};
+struct mlx5dr_roce_cap {
+ u8 roce_en:1;
+ u8 fl_rc_qp_when_roce_disabled:1;
+ u8 fl_rc_qp_when_roce_enabled:1;
+};
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@ -692,6 +767,13 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_icmp_dw1;
u8 flex_parser_id_icmpv6_dw0;
u8 flex_parser_id_icmpv6_dw1;
+ u8 flex_parser_id_geneve_tlv_option_0;
+ u8 flex_parser_id_mpls_over_gre;
+ u8 flex_parser_id_mpls_over_udp;
+ u8 flex_parser_id_gtpu_dw_0;
+ u8 flex_parser_id_gtpu_teid;
+ u8 flex_parser_id_gtpu_dw_2;
+ u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
u8 num_esw_ports;
@@ -707,6 +789,8 @@ struct mlx5dr_cmd_caps {
struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps;
bool prio_tag_required;
+ struct mlx5dr_roce_cap roce_caps;
+ u8 isolate_vl_tc:1;
};
struct mlx5dr_domain_rx_tx {
@@ -806,53 +890,71 @@ struct mlx5dr_ste_action_modify_field {
u8 l4_type;
};
+struct mlx5dr_action_rewrite {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_icm_chunk *chunk;
+ u8 *data;
+ u16 num_of_actions;
+ u32 index;
+ u8 allow_rx:1;
+ u8 allow_tx:1;
+ u8 modify_ttl:1;
+};
+
+struct mlx5dr_action_reformat {
+ struct mlx5dr_domain *dmn;
+ u32 reformat_id;
+ u32 reformat_size;
+};
+
+struct mlx5dr_action_dest_tbl {
+ u8 is_fw_tbl:1;
+ union {
+ struct mlx5dr_table *tbl;
+ struct {
+ struct mlx5dr_domain *dmn;
+ u32 id;
+ u32 group_id;
+ enum fs_flow_table_type type;
+ u64 rx_icm_addr;
+ u64 tx_icm_addr;
+ struct mlx5dr_action **ref_actions;
+ u32 num_of_ref_actions;
+ } fw_tbl;
+ };
+};
+
+struct mlx5dr_action_ctr {
+ u32 ctr_id;
+ u32 offeset;
+};
+
+struct mlx5dr_action_vport {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_cmd_vport_cap *caps;
+};
+
+struct mlx5dr_action_push_vlan {
+ u32 vlan_hdr; /* tpid_pcp_dei_vid */
+};
+
+struct mlx5dr_action_flow_tag {
+ u32 flow_tag;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
+
union {
- struct {
- struct mlx5dr_domain *dmn;
- struct mlx5dr_icm_chunk *chunk;
- u8 *data;
- u16 num_of_actions;
- u32 index;
- u8 allow_rx:1;
- u8 allow_tx:1;
- u8 modify_ttl:1;
- } rewrite;
- struct {
- struct mlx5dr_domain *dmn;
- u32 reformat_id;
- u32 reformat_size;
- } reformat;
- struct {
- u8 is_fw_tbl:1;
- union {
- struct mlx5dr_table *tbl;
- struct {
- struct mlx5dr_domain *dmn;
- u32 id;
- u32 group_id;
- enum fs_flow_table_type type;
- u64 rx_icm_addr;
- u64 tx_icm_addr;
- struct mlx5dr_action **ref_actions;
- u32 num_of_ref_actions;
- } fw_tbl;
- };
- } dest_tbl;
- struct {
- u32 ctr_id;
- u32 offeset;
- } ctr;
- struct {
- struct mlx5dr_domain *dmn;
- struct mlx5dr_cmd_vport_cap *caps;
- } vport;
- struct {
- u32 vlan_hdr; /* tpid_pcp_dei_vid */
- } push_vlan;
- u32 flow_tag;
+ void *data;
+ struct mlx5dr_action_rewrite *rewrite;
+ struct mlx5dr_action_reformat *reformat;
+ struct mlx5dr_action_dest_tbl *dest_tbl;
+ struct mlx5dr_action_ctr *ctr;
+ struct mlx5dr_action_vport *vport;
+ struct mlx5dr_action_push_vlan *push_vlan;
+ struct mlx5dr_action_flow_tag *flow_tag;
};
};
@@ -1063,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr {
u32 sq_wqe_cnt;
u32 rq_wqe_cnt;
u32 rq_wqe_shift;
+ u8 isolate_vl_tc:1;
};
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 83df6df6b459..9643ee647f57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -434,10 +434,7 @@ struct mlx5_ifc_ste_gre_bits {
};
struct mlx5_ifc_ste_flex_parser_0_bits {
- u8 parser_3_label[0x14];
- u8 parser_3_exp[0x3];
- u8 parser_3_s_bos[0x1];
- u8 parser_3_ttl[0x8];
+ u8 flex_parser_3[0x20];
u8 flex_parser_2[0x20];
@@ -488,6 +485,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
+ u8 reserved_at_0[0x5];
+ u8 gtpu_msg_flags[0x3];
+ u8 gtpu_msg_type[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 gtpu_teid[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index e05c5c0f3ae1..457ad42eaa2a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
-/**
- * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
- *
- * @dev: Pointer to core device
- *
- * mlx5_eswitch_get_total_vports returns total number of vports for
- * the eswitch.
- */
-u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
-{
- return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
-}
-EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
-
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
{
u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 01f075fac276..3091dd014650 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -34,11 +34,6 @@
#include "wq.h"
#include "mlx5_core.h"
-static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
-{
- return ((u32)1 << log_sz) << log_stride;
-}
-
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 52fdc34251ba..7e9a7cb31720 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1728,7 +1728,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
return err;
event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id);
+ err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
if (err)
return err;
switch (event_id) {
@@ -1806,6 +1806,10 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
if (err)
return err;
+ val = mlxsw_reg_mfde_log_ip_get(mfde_pl);
+ err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
+ if (err)
+ return err;
} else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8af7d9d03475..80712dc803d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -58,6 +58,25 @@ struct mlxsw_tx_info {
bool is_emad;
};
+struct mlxsw_rx_md_info {
+ u32 cookie_index;
+ u32 latency;
+ u32 tx_congestion;
+ union {
+ /* Valid when 'tx_port_valid' is set. */
+ u16 tx_sys_port;
+ u16 tx_lag_id;
+ };
+ u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
+ u8 tx_tc;
+ u8 latency_valid:1,
+ tx_congestion_valid:1,
+ tx_tc_valid:1,
+ tx_port_valid:1,
+ tx_port_is_lag:1,
+ unused:3;
+};
+
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
@@ -515,7 +534,7 @@ enum mlxsw_devlink_param_id {
struct mlxsw_skb_cb {
union {
struct mlxsw_tx_info tx_info;
- u32 cookie_index; /* Only used during receive */
+ struct mlxsw_rx_md_info rx_md_info;
};
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 4d699fe98cb6..78d9c0196f2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -2007,3 +2007,134 @@ int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport,
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_l4port);
+
+/* Mirror Sampler Action
+ * ---------------------
+ * The SAMPLER_ACTION is used to mirror packets with a probability (sampling).
+ */
+
+#define MLXSW_AFA_SAMPLER_CODE 0x13
+#define MLXSW_AFA_SAMPLER_SIZE 1
+
+/* afa_sampler_mirror_agent
+ * Mirror (SPAN) agent.
+ */
+MLXSW_ITEM32(afa, sampler, mirror_agent, 0x04, 0, 3);
+
+#define MLXSW_AFA_SAMPLER_RATE_MAX (BIT(24) - 1)
+
+/* afa_sampler_mirror_probability_rate
+ * Mirroring probability.
+ * Valid values are 1 to 2^24 - 1
+ */
+MLXSW_ITEM32(afa, sampler, mirror_probability_rate, 0x08, 0, 24);
+
+static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate)
+{
+ mlxsw_afa_sampler_mirror_agent_set(payload, mirror_agent);
+ mlxsw_afa_sampler_mirror_probability_rate_set(payload, rate);
+}
+
+struct mlxsw_afa_sampler {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u8 local_port;
+ bool ingress;
+};
+
+static void mlxsw_afa_sampler_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_sampler *sampler)
+{
+ mlxsw_afa_resource_del(&sampler->resource);
+ block->afa->ops->sampler_del(block->afa->ops_priv, sampler->local_port,
+ sampler->span_id, sampler->ingress);
+ kfree(sampler);
+}
+
+static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_sampler *sampler;
+
+ sampler = container_of(resource, struct mlxsw_afa_sampler, resource);
+ mlxsw_afa_sampler_destroy(block, sampler);
+}
+
+static struct mlxsw_afa_sampler *
+mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate, bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_sampler *sampler;
+ int err;
+
+ sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
+ if (!sampler)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->sampler_add(block->afa->ops_priv, local_port,
+ psample_group, rate, trunc_size,
+ truncate, ingress, &sampler->span_id,
+ extack);
+ if (err)
+ goto err_sampler_add;
+
+ sampler->ingress = ingress;
+ sampler->local_port = local_port;
+ sampler->resource.destructor = mlxsw_afa_sampler_destructor;
+ mlxsw_afa_resource_add(block, &sampler->resource);
+ return sampler;
+
+err_sampler_add:
+ kfree(sampler);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block,
+ u8 mirror_agent, u32 rate)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_SAMPLER_CODE,
+ MLXSW_AFA_SAMPLER_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_sampler_pack(act, mirror_agent, rate);
+ return 0;
+}
+
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_sampler *sampler;
+ int err;
+
+ if (rate > MLXSW_AFA_SAMPLER_RATE_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling rate is too high");
+ return -EINVAL;
+ }
+
+ sampler = mlxsw_afa_sampler_create(block, local_port, psample_group,
+ rate, trunc_size, truncate, ingress,
+ extack);
+ if (IS_ERR(sampler))
+ return PTR_ERR(sampler);
+
+ err = mlxsw_afa_block_append_allocated_sampler(block, sampler->span_id,
+ rate);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append sampler action");
+ goto err_append_allocated_sampler;
+ }
+
+ return 0;
+
+err_append_allocated_sampler:
+ mlxsw_afa_sampler_destroy(block, sampler);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_sampler);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index b652497b1002..b65bf98eb5ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -30,6 +30,12 @@ struct mlxsw_afa_ops {
u16 *p_policer_index,
struct netlink_ext_ack *extack);
void (*policer_del)(void *priv, u16 policer_index);
+ int (*sampler_add)(void *priv, u8 local_port,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate, bool ingress,
+ int *p_span_id, struct netlink_ext_ack *extack);
+ void (*sampler_del)(void *priv, u8 local_port, int span_id,
+ bool ingress);
bool dummy_first_set;
};
@@ -92,5 +98,10 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
u32 fa_index, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index bf85ce9835d7..37fb2e1fb278 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -19,7 +19,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
@@ -45,7 +44,6 @@ enum mlxsw_thermal_trips {
MLXSW_THERMAL_TEMP_TRIP_NORM,
MLXSW_THERMAL_TEMP_TRIP_HIGH,
MLXSW_THERMAL_TEMP_TRIP_HOT,
- MLXSW_THERMAL_TEMP_TRIP_CRIT,
};
struct mlxsw_thermal_trip {
@@ -75,16 +73,9 @@ static const struct mlxsw_thermal_trip default_thermal_trips[] = {
{ /* Warning */
.type = THERMAL_TRIP_HOT,
.temp = MLXSW_THERMAL_ASIC_TEMP_HOT,
- .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
.min_state = MLXSW_THERMAL_MAX_STATE,
.max_state = MLXSW_THERMAL_MAX_STATE,
},
- { /* Critical - soft poweroff */
- .type = THERMAL_TRIP_CRITICAL,
- .temp = MLXSW_THERMAL_ASIC_TEMP_CRIT,
- .min_state = MLXSW_THERMAL_MAX_STATE,
- .max_state = MLXSW_THERMAL_MAX_STATE,
- }
};
#define MLXSW_THERMAL_NUM_TRIPS ARRAY_SIZE(default_thermal_trips)
@@ -154,7 +145,6 @@ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = 0;
}
static int
@@ -183,11 +173,10 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
}
/* According to the system thermal requirements, the thermal zones are
- * defined with four trip points. The critical and emergency
+ * defined with three trip points. The critical and emergency
* temperature thresholds, provided by QSFP module are set as "active"
- * and "hot" trip points, "normal" and "critical" trip points are
- * derived from "active" and "hot" by subtracting or adding double
- * hysteresis value.
+ * and "hot" trip points, "normal" trip point is derived from "active"
+ * by subtracting double hysteresis value.
*/
if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
@@ -196,8 +185,6 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
- MLXSW_THERMAL_MODULE_TEMP_SHIFT;
return 0;
}
@@ -210,7 +197,7 @@ static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal,
struct mlxsw_thermal_trip *trip = trips;
unsigned int score, delta, i, shift = 1;
- /* Calculate thermal zone score, if temperature is above the critical
+ /* Calculate thermal zone score, if temperature is above the hot
* threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX.
*/
score = MLXSW_THERMAL_TEMP_SCORE_MAX;
@@ -333,8 +320,7 @@ static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
{
struct mlxsw_thermal *thermal = tzdev->devdata;
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS ||
- temp > MLXSW_THERMAL_ASIC_TEMP_CRIT)
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
thermal->trips[trip].temp = temp;
@@ -502,8 +488,7 @@ mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
{
struct mlxsw_thermal_module *tz = tzdev->devdata;
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS ||
- temp > tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp)
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
tz->trips[trip].temp = temp;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index d0052537e627..8e8456811384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -540,6 +540,55 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
spin_unlock(&q->lock);
}
+static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
+ const char *cqe)
+{
+ struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
+
+ if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
+ cb->rx_md_info.tx_port_is_lag = true;
+ cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
+ cb->rx_md_info.tx_lag_port_index =
+ mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
+ } else {
+ cb->rx_md_info.tx_port_is_lag = false;
+ cb->rx_md_info.tx_sys_port =
+ mlxsw_pci_cqe2_tx_system_port_get(cqe);
+ }
+
+ if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
+ cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
+ cb->rx_md_info.tx_port_valid = 1;
+ else
+ cb->rx_md_info.tx_port_valid = 0;
+}
+
+static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
+{
+ struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
+
+ cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
+ if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
+ cb->rx_md_info.tx_congestion_valid = 1;
+ else
+ cb->rx_md_info.tx_congestion_valid = 0;
+ cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
+
+ cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
+ if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
+ cb->rx_md_info.latency_valid = 1;
+ else
+ cb->rx_md_info.latency_valid = 0;
+
+ cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
+ if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
+ cb->rx_md_info.tx_tc_valid = 1;
+ else
+ cb->rx_md_info.tx_tc_valid = 0;
+
+ mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
+}
+
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
@@ -581,11 +630,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
- mlxsw_skb_cb(skb)->cookie_index = cookie_index;
+ mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
} else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
+ mlxsw_pci_cqe_rdq_md_init(skb, cqe);
+ } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
+ mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
+ mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
}
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a2c1fbd3e0d1..7b531228d6c0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -173,6 +173,15 @@ MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
*/
MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+#define MLXSW_PCI_CQE2_MIRROR_CONG_INVALID 0xFFFF
+
+/* pci_cqe_mirror_cong_high
+ * Congestion level in units of 8KB of the egress traffic class of the original
+ * packet that does mirroring to the CPU. Value of 0xFFFF means that the
+ * congestion level is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_cong_high, 0x08, 16, 4);
+
/* pci_cqe_trap_id
* Trap ID that captured the packet.
*/
@@ -208,6 +217,59 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F
+
+/* pci_cqe_mirror_tclass
+ * The egress traffic class of the original packet that does mirroring to the
+ * CPU. Value of 0x1F means that the traffic class is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_tclass, 0x10, 27, 5);
+
+/* pci_cqe_tx_lag
+ * The Tx port of a packet that is mirrored / sampled to the CPU is a LAG.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag, 0x10, 24, 1);
+
+/* pci_cqe_tx_lag_subport
+ * The port index within the LAG of a packet that is mirrored / sampled to the
+ * CPU. Reserved when tx_lag is 0.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag_subport, 0x10, 16, 8);
+
+#define MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT 0xFFFE
+#define MLXSW_PCI_CQE2_TX_PORT_INVALID 0xFFFF
+
+/* pci_cqe_tx_lag_id
+ * The Tx LAG ID of the original packet that is mirrored / sampled to the CPU.
+ * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx LAG ID
+ * is invalid. Reserved when tx_lag is 0.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag_id, 0x10, 0, 16);
+
+/* pci_cqe_tx_system_port
+ * The Tx port of the original packet that is mirrored / sampled to the CPU.
+ * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx port is
+ * invalid. Reserved when tx_lag is 1.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_system_port, 0x10, 0, 16);
+
+/* pci_cqe_mirror_cong_low
+ * Congestion level in units of 8KB of the egress traffic class of the original
+ * packet that does mirroring to the CPU. Value of 0xFFFF means that the
+ * congestion level is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_cong_low, 0x14, 20, 12);
+
+#define MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT 13 /* Units of 8KB. */
+
+static inline u16 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe)
+{
+ u16 cong_high = mlxsw_pci_cqe2_mirror_cong_high_get(cqe);
+ u16 cong_low = mlxsw_pci_cqe2_mirror_cong_low_get(cqe);
+
+ return cong_high << 12 | cong_low;
+}
+
/* pci_cqe_user_def_val_orig_pkt_len
* When trap_id is an ACL: User defined value from policy engine action.
*/
@@ -218,6 +280,15 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
*/
MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF
+
+/* pci_cqe_mirror_latency
+ * End-to-end latency of the original packet that does mirroring to the CPU.
+ * Value of 0xFFFFFF means that the latency is invalid. Units are according to
+ * MOGCR.mirror_latency_units.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_latency, 0x1C, 8, 24);
+
/* pci_cqe_owner
* Ownership bit.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index c4adc7f740d3..900b4bf5bb5b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -842,6 +842,14 @@ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+/* reg_spvid_egr_et_set
+ * When VLAN is pushed at ingress (for untagged packets or for
+ * QinQ push mode) then the EtherType is decided at the egress port.
+ * Reserved when Spectrum-1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, egr_et_set, 0x04, 24, 1);
+
/* reg_spvid_et_vlan
* EtherType used for when VLAN is pushed at ingress (for untagged
* packets or for QinQ push mode).
@@ -849,6 +857,7 @@ MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
* 1: ether_type1
* 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2
* Ethertype IDs are configured by SVER.
+ * Reserved when egr_et_set = 1.
* Access: RW
*/
MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
@@ -2079,6 +2088,41 @@ static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1,
mlxsw_reg_spvc_et0_set(payload, et0);
}
+/* SPEVET - Switch Port Egress VLAN EtherType
+ * ------------------------------------------
+ * The switch port egress VLAN EtherType configures which EtherType to push at
+ * egress for packets incoming through a local port for which 'SPVID.egr_et_set'
+ * is set.
+ */
+#define MLXSW_REG_SPEVET_ID 0x202A
+#define MLXSW_REG_SPEVET_LEN 0x08
+
+MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN);
+
+/* reg_spevet_local_port
+ * Egress Local port number.
+ * Not supported to CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
+
+/* reg_spevet_et_vlan
+ * Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet:
+ * 0: ether_type0 - (default)
+ * 1: ether_type1
+ * 2: ether_type2
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2);
+
+static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
+ u8 et_vlan)
+{
+ MLXSW_REG_ZERO(spevet, payload);
+ mlxsw_reg_spevet_local_port_set(payload, local_port);
+ mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
+}
+
/* CWTP - Congetion WRED ECN TClass Profile
* ----------------------------------------
* Configures the profiles for queues of egress port and traffic class
@@ -5637,7 +5681,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
-/* reg_slot_index
+/* reg_pmaos_slot_index
* Slot index.
* Access: Index
*/
@@ -8086,6 +8130,60 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+/* RATRAD - Router Adjacency Table Activity Dump Register
+ * ------------------------------------------------------
+ * The RATRAD register is used to dump and optionally clear activity bits of
+ * router adjacency table entries.
+ */
+#define MLXSW_REG_RATRAD_ID 0x8022
+#define MLXSW_REG_RATRAD_LEN 0x210
+
+MLXSW_REG_DEFINE(ratrad, MLXSW_REG_RATRAD_ID, MLXSW_REG_RATRAD_LEN);
+
+enum {
+ /* Read activity */
+ MLXSW_REG_RATRAD_OP_READ_ACTIVITY,
+ /* Read and clear activity */
+ MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY,
+};
+
+/* reg_ratrad_op
+ * Access: Operation
+ */
+MLXSW_ITEM32(reg, ratrad, op, 0x00, 30, 2);
+
+/* reg_ratrad_ecmp_size
+ * ecmp_size is the amount of sequential entries from adjacency_index. Valid
+ * ranges:
+ * Spectrum-1: 32-64, 512, 1024, 2048, 4096
+ * Spectrum-2/3: 32-128, 256, 512, 1024, 2048, 4096
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratrad, ecmp_size, 0x00, 0, 13);
+
+/* reg_ratrad_adjacency_index
+ * Index into the adjacency table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratrad, adjacency_index, 0x04, 0, 24);
+
+/* reg_ratrad_activity_vector
+ * Activity bit per adjacency index.
+ * Bits higher than ecmp_size are reserved.
+ * Access: RO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ratrad, activity_vector, 0x10, 0x200, 1);
+
+static inline void mlxsw_reg_ratrad_pack(char *payload, u32 adjacency_index,
+ u16 ecmp_size)
+{
+ MLXSW_REG_ZERO(ratrad, payload);
+ mlxsw_reg_ratrad_op_set(payload,
+ MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY);
+ mlxsw_reg_ratrad_ecmp_size_set(payload, ecmp_size);
+ mlxsw_reg_ratrad_adjacency_index_set(payload, adjacency_index);
+}
+
/* RIGR-V2 - Router Interface Group Register Version 2
* ---------------------------------------------------
* The RIGR_V2 register is used to add, remove and query egress interface list
@@ -9925,15 +10023,28 @@ MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
*/
MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
+#define MLXSW_REG_MPAR_RATE_MAX 3500000000UL
+
+/* reg_mpar_probability_rate
+ * Sampling rate.
+ * Valid values are: 1 to 3.5*10^9
+ * Value of 1 means "sample all". Default is 1.
+ * Reserved when Spectrum-1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32);
+
static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
enum mlxsw_reg_mpar_i_e i_e,
- bool enable, u8 pa_id)
+ bool enable, u8 pa_id,
+ u32 probability_rate)
{
MLXSW_REG_ZERO(mpar, payload);
mlxsw_reg_mpar_local_port_set(payload, local_port);
mlxsw_reg_mpar_enable_set(payload, enable);
mlxsw_reg_mpar_i_e_set(payload, i_e);
mlxsw_reg_mpar_pa_id_set(payload, pa_id);
+ mlxsw_reg_mpar_probability_rate_set(payload, probability_rate);
}
/* MGIR - Management General Information Register
@@ -10577,6 +10688,8 @@ MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4);
+#define MLXSW_REG_MPAGR_RATE_MAX 3500000000UL
+
/* reg_mpagr_probability_rate
* Sampling rate.
* Valid values are: 1 to 3.5*10^9
@@ -10919,7 +11032,7 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
* Which irisc triggered the event
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 8, 4);
+MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8);
enum mlxsw_reg_mfde_event_id {
MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
@@ -10930,7 +11043,7 @@ enum mlxsw_reg_mfde_event_id {
/* reg_mfde_event_id
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 8);
+MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 16);
enum mlxsw_reg_mfde_method {
MLXSW_REG_MFDE_METHOD_QUERY,
@@ -10979,6 +11092,13 @@ MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
*/
MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+/* reg_mfde_log_ip
+ * IP (instruction pointer) that triggered the timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64);
+
/* reg_mfde_pipes_mask
* Bit per kvh pipe.
* Access: RO
@@ -11995,6 +12115,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
+ MLXSW_REG(spevet),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
MLXSW_REG(pgcr),
@@ -12047,6 +12168,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(ratrad),
MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
MLXSW_REG(rrcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1650d9852b5b..bca0354482cb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -23,6 +23,8 @@
#include <linux/netlink.h>
#include <linux/jhash.h>
#include <linux/log2.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/netevent.h>
@@ -45,7 +47,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 2018
+#define MLXSW_SP1_FWREV_SUBMINOR 2406
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 2018
+#define MLXSW_SP2_FWREV_SUBMINOR 2406
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -77,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
#define MLXSW_SP3_FWREV_MAJOR 30
#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 2018
+#define MLXSW_SP3_FWREV_SUBMINOR 2406
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -400,6 +402,22 @@ int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
return 0;
}
+int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ethtype)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spevet_pl[MLXSW_REG_SPEVET_LEN];
+ u8 sver_type;
+ int err;
+
+ err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
+}
+
static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, u16 ethtype)
{
@@ -2212,32 +2230,6 @@ void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
}
-void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct mlxsw_sp_port_sample *sample;
- u32 size;
-
- if (unlikely(!mlxsw_sp_port)) {
- dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
- local_port);
- goto out;
- }
-
- rcu_read_lock();
- sample = rcu_dereference(mlxsw_sp_port->sample);
- if (!sample)
- goto out_unlock;
- size = sample->truncate ? sample->trunc_size : skb->len;
- psample_sample_packet(sample->psample_group, skb, size,
- mlxsw_sp_port->dev->ifindex, 0, sample->rate);
-out_unlock:
- rcu_read_unlock();
-out:
- consume_skb(skb);
-}
-
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
@@ -2576,6 +2568,147 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats = mlxsw_sp2_get_stats,
};
+struct mlxsw_sp_sample_trigger_node {
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params params;
+ struct rhash_head ht_node;
+ struct rcu_head rcu;
+ refcount_t refcount;
+};
+
+static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
+ .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_sample_trigger),
+ .automatic_shrinking = true,
+};
+
+static void
+mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ memset(key, 0, sizeof(*key));
+ key->type = trigger->type;
+ key->local_port = trigger->local_port;
+}
+
+/* RCU read lock must be held */
+struct mlxsw_sp_sample_params *
+mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+ trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return NULL;
+
+ return &trigger_node->params;
+}
+
+static int
+mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ int err;
+
+ trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
+ if (!trigger_node)
+ return -ENOMEM;
+
+ trigger_node->trigger = *trigger;
+ trigger_node->params = *params;
+ refcount_set(&trigger_node->refcount, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
+ &trigger_node->ht_node,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(trigger_node);
+ return err;
+}
+
+static void
+mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_sample_trigger_node *trigger_node)
+{
+ rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
+ &trigger_node->ht_node,
+ mlxsw_sp_sample_trigger_ht_params);
+ kfree_rcu(trigger_node, rcu);
+}
+
+int
+mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ ASSERT_RTNL();
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+
+ trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
+ &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
+ params);
+
+ if (trigger_node->trigger.local_port) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
+ return -EINVAL;
+ }
+
+ if (trigger_node->params.psample_group != params->psample_group ||
+ trigger_node->params.truncate != params->truncate ||
+ trigger_node->params.rate != params->rate ||
+ trigger_node->params.trunc_size != params->trunc_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
+ return -EINVAL;
+ }
+
+ refcount_inc(&trigger_node->refcount);
+
+ return 0;
+}
+
+void
+mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ ASSERT_RTNL();
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+
+ trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
+ &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return;
+
+ if (!refcount_dec_and_test(&trigger_node->refcount))
+ return;
+
+ mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
@@ -2730,6 +2863,13 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_port_module_info_init;
}
+ err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
+ &mlxsw_sp_sample_trigger_ht_params);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
+ goto err_sample_trigger_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -2739,6 +2879,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
+err_sample_trigger_init:
mlxsw_sp_port_module_info_fini(mlxsw_sp);
err_port_module_info_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
@@ -2788,6 +2930,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
@@ -2796,7 +2939,6 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
- mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
@@ -2804,6 +2946,8 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
@@ -2817,6 +2961,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -2825,7 +2970,6 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
- mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
@@ -2833,6 +2977,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -2844,6 +2990,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -2852,7 +2999,6 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
- mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
@@ -2860,6 +3006,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -2870,6 +3018,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
@@ -4283,7 +4432,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
if (br_vlan_enabled(br_dev)) {
br_vlan_get_proto(br_dev, &proto);
if (proto == ETH_P_8021AD) {
- NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge");
+ NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ba28ac7e79bc..f99db88ee884 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -16,6 +16,7 @@
#include <linux/in6.h>
#include <linux/notifier.h>
#include <linux/net_namespace.h>
+#include <linux/spinlock.h>
#include <net/psample.h>
#include <net/pkt_cls.h>
#include <net/red.h>
@@ -87,10 +88,15 @@ enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_MAX,
};
-struct mlxsw_sp_rif_ops;
+struct mlxsw_sp_router_ops;
-extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[];
-extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[];
+extern const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops;
+extern const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops;
+
+struct mlxsw_sp_switchdev_ops;
+
+extern const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops;
+extern const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops;
enum mlxsw_sp_fid_type {
MLXSW_SP_FID_TYPE_8021Q,
@@ -134,6 +140,7 @@ struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
+struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -149,6 +156,7 @@ struct mlxsw_sp {
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
struct mlxsw_sp_port_mapping **port_mapping;
+ struct rhashtable sample_trigger_ht;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
@@ -165,6 +173,7 @@ struct mlxsw_sp {
struct mlxsw_sp_counter_pool *counter_pool;
struct mlxsw_sp_span *span;
struct mlxsw_sp_trap *trap;
+ const struct mlxsw_sp_switchdev_ops *switchdev_ops;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
const struct mlxsw_afa_ops *afa_ops;
const struct mlxsw_afk_ops *afk_ops;
@@ -172,7 +181,6 @@ struct mlxsw_sp {
const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
- const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_sb_ops *sb_ops;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
@@ -180,6 +188,8 @@ struct mlxsw_sp {
const struct mlxsw_sp_span_ops *span_ops;
const struct mlxsw_sp_policer_core_ops *policer_core_ops;
const struct mlxsw_sp_trap_ops *trap_ops;
+ const struct mlxsw_sp_mall_ops *mall_ops;
+ const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
size_t listeners_count;
u32 lowest_shaper_bs;
@@ -233,7 +243,18 @@ struct mlxsw_sp_port_pcpu_stats {
u32 tx_dropped;
};
-struct mlxsw_sp_port_sample {
+enum mlxsw_sp_sample_trigger_type {
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS,
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS,
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+};
+
+struct mlxsw_sp_sample_trigger {
+ enum mlxsw_sp_sample_trigger_type type;
+ u8 local_port; /* Reserved when trigger type is not ingress / egress. */
+};
+
+struct mlxsw_sp_sample_params {
struct psample_group *psample_group;
u32 trunc_size;
u32 rate;
@@ -303,7 +324,6 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw;
} periodic_hw_stats;
- struct mlxsw_sp_port_sample __rcu *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc_state *qdisc;
@@ -546,6 +566,17 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_hdroom *hdroom);
int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_hdroom *hdroom);
+struct mlxsw_sp_sample_params *
+mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger);
+int
+mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params,
+ struct netlink_ext_ack *extack);
+void
+mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
@@ -583,8 +614,6 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u8 local_port, void *priv);
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
u8 local_port);
-void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
@@ -601,6 +630,8 @@ int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type);
+int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ethtype);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u16 ethtype);
struct mlxsw_sp_port_vlan *
@@ -939,6 +970,12 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u16 fid, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate,
+ struct netlink_ext_ack *extack);
struct mlxsw_sp_acl_rule;
@@ -1048,6 +1085,19 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_matchall.c */
+struct mlxsw_sp_mall_ops {
+ int (*sample_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack);
+ void (*sample_del)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry);
+};
+
+extern const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops;
+extern const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops;
+
enum mlxsw_sp_mall_action_type {
MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
@@ -1063,6 +1113,11 @@ struct mlxsw_sp_mall_trap_entry {
int span_id;
};
+struct mlxsw_sp_mall_sample_entry {
+ struct mlxsw_sp_sample_params params;
+ int span_id; /* Relevant for Spectrum-2 onwards. */
+};
+
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
@@ -1072,7 +1127,7 @@ struct mlxsw_sp_mall_entry {
union {
struct mlxsw_sp_mall_mirror_entry mirror;
struct mlxsw_sp_mall_trap_entry trap;
- struct mlxsw_sp_port_sample sample;
+ struct mlxsw_sp_mall_sample_entry sample;
};
struct rcu_head rcu;
};
@@ -1083,7 +1138,8 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 8cfa03a75374..67cedfa76f78 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -688,6 +688,31 @@ int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
}
+int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ if (!list_is_singular(&block->binding_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
+ return -EOPNOTSUPP;
+ }
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_flow_block_binding, list);
+ mlxsw_sp_port = binding->mlxsw_sp_port;
+
+ return mlxsw_afa_block_append_sampler(rulei->act_block,
+ mlxsw_sp_port->local_port,
+ psample_group, rate, trunc_size,
+ truncate, binding->ingress,
+ extack);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 90372d1c28d4..c72aa38424dc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -192,6 +192,22 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index)
policer_index);
}
+static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress, int *p_span_id,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Sampling action is not supported on Spectrum-1");
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id,
+ bool ingress)
+{
+ WARN_ON_ONCE(1);
+}
+
const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
@@ -204,8 +220,73 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.mirror_del = mlxsw_sp_act_mirror_del,
.policer_add = mlxsw_sp_act_policer_add,
.policer_del = mlxsw_sp_act_policer_del,
+ .sampler_add = mlxsw_sp1_act_sampler_add,
+ .sampler_del = mlxsw_sp1_act_sampler_del,
};
+static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress, int *p_span_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+ };
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_sample_params params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ params.psample_group = psample_group;
+ params.trunc_size = trunc_size;
+ params.rate = rate;
+ params.truncate = truncate;
+ err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, &params,
+ extack);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get SPAN agent");
+ goto err_span_agent_get;
+ }
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get analyzed port");
+ goto err_analyzed_port_get;
+ }
+
+ return 0;
+
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
+err_span_agent_get:
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+ return err;
+}
+
+static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id,
+ bool ingress)
+{
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+}
+
const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
.kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
@@ -218,6 +299,8 @@ const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
.mirror_del = mlxsw_sp_act_mirror_del,
.policer_add = mlxsw_sp_act_policer_add,
.policer_del = mlxsw_sp_act_policer_del,
+ .sampler_add = mlxsw_sp2_act_sampler_add,
+ .sampler_del = mlxsw_sp2_act_sampler_del,
.dummy_first_set = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ed81d4fa48ac..1a2fef2a5379 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -912,9 +912,8 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp)
u64 size = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router)
- if (mlxsw_sp_nexthop_offload(nh) &&
- !mlxsw_sp_nexthop_group_has_ipip(nh) &&
- !mlxsw_sp_nexthop_is_discard(nh))
+ if (mlxsw_sp_nexthop_is_forward(nh) &&
+ !mlxsw_sp_nexthop_group_has_ipip(nh))
size++;
return size;
}
@@ -1105,9 +1104,8 @@ start_again:
nh_skip = nh_count;
nh_count = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
- if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh) ||
- mlxsw_sp_nexthop_is_discard(nh))
+ if (!mlxsw_sp_nexthop_is_forward(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
continue;
if (nh_count < nh_skip)
@@ -1180,6 +1178,7 @@ out:
static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
{
+ char ratr_pl[MLXSW_REG_RATR_LEN];
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_nexthop *nh;
u32 adj_hash_index = 0;
@@ -1187,9 +1186,8 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
u32 adj_size = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
- if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh) ||
- mlxsw_sp_nexthop_is_discard(nh))
+ if (!mlxsw_sp_nexthop_is_forward(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
continue;
mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
@@ -1198,8 +1196,9 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
else
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
- mlxsw_sp_nexthop_update(mlxsw_sp,
- adj_index + adj_hash_index, nh);
+ mlxsw_sp_nexthop_eth_update(mlxsw_sp,
+ adj_index + adj_hash_index, nh,
+ true, ratr_pl);
}
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 078601d31cde..c8061beed6db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1059,6 +1059,131 @@ mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info);
}
+static void
+mlxsw_sp_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ phy_stats->SymbolErrorDuringCarrier =
+ mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get(ppcnt_pl);
+}
+
+static void
+mlxsw_sp_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ mac_stats->FramesTransmittedOK =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ mac_stats->FramesReceivedOK =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ mac_stats->FrameCheckSequenceErrors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ mac_stats->AlignmentErrors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+ mac_stats->OctetsTransmittedOK =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ mac_stats->OctetsReceivedOK =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ mac_stats->MulticastFramesXmittedOK =
+ mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get(ppcnt_pl);
+ mac_stats->BroadcastFramesXmittedOK =
+ mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get(ppcnt_pl);
+ mac_stats->MulticastFramesReceivedOK =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+ mac_stats->BroadcastFramesReceivedOK =
+ mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get(ppcnt_pl);
+ mac_stats->InRangeLengthErrors =
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl);
+ mac_stats->OutOfRangeLengthField =
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl);
+ mac_stats->FrameTooLongErrors =
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl);
+}
+
+static void
+mlxsw_sp_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ ctrl_stats->MACControlFramesTransmitted =
+ mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get(ppcnt_pl);
+ ctrl_stats->MACControlFramesReceived =
+ mlxsw_reg_ppcnt_a_mac_control_frames_received_get(ppcnt_pl);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get(ppcnt_pl);
+}
+
+static const struct ethtool_rmon_hist_range mlxsw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 10239 },
+ {}
+};
+
+static void
+mlxsw_sp_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_RFC_2819_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ rmon->undersize_pkts =
+ mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get(ppcnt_pl);
+ rmon->oversize_pkts =
+ mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get(ppcnt_pl);
+ rmon->fragments =
+ mlxsw_reg_ppcnt_ether_stats_fragments_get(ppcnt_pl);
+
+ rmon->hist[0] = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get(ppcnt_pl);
+ rmon->hist[1] =
+ mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get(ppcnt_pl);
+ rmon->hist[2] =
+ mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get(ppcnt_pl);
+ rmon->hist[3] =
+ mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get(ppcnt_pl);
+ rmon->hist[4] =
+ mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get(ppcnt_pl);
+ rmon->hist[5] =
+ mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get(ppcnt_pl);
+ rmon->hist[6] =
+ mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get(ppcnt_pl);
+ rmon->hist[7] =
+ mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get(ppcnt_pl);
+ rmon->hist[8] =
+ mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get(ppcnt_pl);
+ rmon->hist[9] =
+ mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get(ppcnt_pl);
+
+ *ranges = mlxsw_rmon_ranges;
+}
+
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.cap_link_lanes_supported = true,
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
@@ -1075,6 +1200,10 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
.get_ts_info = mlxsw_sp_get_ts_info,
+ .get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
+ .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
+ .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats,
+ .get_rmon_stats = mlxsw_sp_get_rmon_stats,
};
struct mlxsw_sp1_port_link_mode {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
index 0456cda33808..9e50c823a354 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
@@ -71,7 +71,7 @@ static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
- err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port);
+ err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 41855e58564b..be3791ca6069 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -24,6 +24,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
const struct flow_action_entry *act;
int mirror_act_count = 0;
int police_act_count = 0;
+ int sample_act_count = 0;
int err, i;
if (!flow_action_has_entries(flow_action))
@@ -190,6 +191,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
/* The kernel might adjust the requested burst size so
* that it is not exactly a power of two. Re-adjust it
* here since the hardware only supports burst sizes
@@ -204,6 +210,23 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return err;
break;
}
+ case FLOW_ACTION_SAMPLE: {
+ if (sample_act_count++) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
+ block,
+ act->sample.psample_group,
+ act->sample.rate,
+ act->sample.trunc_size,
+ act->sample.truncate,
+ extack);
+ if (err)
+ return err;
+ break;
+ }
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 64a8f838eb53..5facabd86882 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -127,14 +127,16 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
static int
mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_ipip_entry *ipip_entry)
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
__be32 daddr4 = mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev);
- char ratr_pl[MLXSW_REG_RATR_LEN];
+ enum mlxsw_reg_ratr_op op;
- mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
- true, MLXSW_REG_RATR_TYPE_IPIP,
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
adj_index, rif_index);
mlxsw_reg_ratr_ipip4_entry_pack(ratr_pl, be32_to_cpu(daddr4));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 87bef9880e5e..f0837b42d1d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -40,7 +40,8 @@ struct mlxsw_sp_ipip_ops {
enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_ipip_entry *ipip_entry);
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl);
bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
index f30599ad6019..07b371cd9818 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
@@ -24,7 +24,8 @@ mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie
static int
mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mall_entry *mall_entry)
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_agent_parms agent_parms = {};
@@ -33,28 +34,35 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
int err;
if (!mall_entry->mirror.to_dev) {
- netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
+ NL_SET_ERR_MSG(extack, "Could not find requested device");
return -EINVAL;
}
agent_parms.to_dev = mall_entry->mirror.to_dev;
err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
&agent_parms);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
return err;
+ }
err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
mall_entry->ingress);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
goto err_analyzed_port_get;
+ }
trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
MLXSW_SP_SPAN_TRIGGER_EGRESS;
parms.span_id = mall_entry->mirror.span_id;
+ parms.probability_rate = 1;
err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
&parms);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
goto err_agent_bind;
+ }
return 0;
@@ -93,46 +101,64 @@ static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mall_entry *mall_entry)
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_sample_trigger trigger;
int err;
- if (rtnl_dereference(mlxsw_sp_port->sample)) {
- netdev_err(mlxsw_sp_port->dev, "sample already active\n");
- return -EEXIST;
- }
- rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
+ if (mall_entry->ingress)
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ else
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port->local_port;
+ err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger,
+ &mall_entry->sample.params,
+ extack);
+ if (err)
+ return err;
- err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
- mall_entry->sample.rate);
+ err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port,
+ mall_entry, extack);
if (err)
goto err_port_sample_set;
return 0;
err_port_sample_set:
- RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
return err;
}
static void
-mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
+mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
{
- if (!mlxsw_sp_port->sample)
- return;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_sample_trigger trigger;
- mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
- RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+ if (mall_entry->ingress)
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ else
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
}
static int
mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mall_entry *mall_entry)
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
{
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
- return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
+ return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry,
+ extack);
case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
- return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
+ return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry,
+ extack);
default:
WARN_ON(1);
return -EINVAL;
@@ -148,7 +174,7 @@ mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
break;
case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
- mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
+ mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry);
break;
default:
WARN_ON(1);
@@ -212,6 +238,11 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
flower_prio_valid = true;
}
+ if (protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG(f->common.extack, "matchall rules only supported with 'all' protocol");
+ return -EOPNOTSUPP;
+ }
+
mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
if (!mall_entry)
return -ENOMEM;
@@ -219,54 +250,41 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
mall_entry->priority = f->common.prio;
mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ if (flower_prio_valid && mall_entry->ingress &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid && !mall_entry->ingress &&
+ mall_entry->priority <= flower_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
act = &f->rule->action.entries[0];
- if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
- if (flower_prio_valid && mall_entry->ingress &&
- mall_entry->priority >= flower_min_prio) {
- NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
- err = -EOPNOTSUPP;
- goto errout;
- }
- if (flower_prio_valid && !mall_entry->ingress &&
- mall_entry->priority <= flower_max_prio) {
- NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
- err = -EOPNOTSUPP;
- goto errout;
- }
+ switch (act->id) {
+ case FLOW_ACTION_MIRRED:
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
mall_entry->mirror.to_dev = act->dev;
- } else if (act->id == FLOW_ACTION_SAMPLE &&
- protocol == htons(ETH_P_ALL)) {
- if (!mall_entry->ingress) {
- NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
- err = -EOPNOTSUPP;
- goto errout;
- }
- if (flower_prio_valid &&
- mall_entry->priority >= flower_min_prio) {
- NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
- err = -EOPNOTSUPP;
- goto errout;
- }
- if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
- NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
- err = -EOPNOTSUPP;
- goto errout;
- }
+ break;
+ case FLOW_ACTION_SAMPLE:
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
- mall_entry->sample.psample_group = act->sample.psample_group;
- mall_entry->sample.truncate = act->sample.truncate;
- mall_entry->sample.trunc_size = act->sample.trunc_size;
- mall_entry->sample.rate = act->sample.rate;
- } else {
+ mall_entry->sample.params.psample_group = act->sample.psample_group;
+ mall_entry->sample.params.truncate = act->sample.truncate;
+ mall_entry->sample.params.trunc_size = act->sample.trunc_size;
+ mall_entry->sample.params.rate = act->sample.rate;
+ break;
+ default:
err = -EOPNOTSUPP;
goto errout;
}
list_for_each_entry(binding, &block->binding_list, list) {
err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
- mall_entry);
+ mall_entry, f->common.extack);
if (err)
goto rollback;
}
@@ -314,13 +332,15 @@ void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
}
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &block->mall.list, list) {
- err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
+ err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry,
+ extack);
if (err)
goto rollback;
}
@@ -355,3 +375,104 @@ int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
*p_max_prio = block->mall.max_prio;
return 0;
}
+
+static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ u32 rate = mall_entry->sample.params.rate;
+
+ if (!mall_entry->ingress) {
+ NL_SET_ERR_MSG(extack, "Sampling is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (rate > MLXSW_REG_MPSC_RATE_MAX) {
+ NL_SET_ERR_MSG(extack, "Unsupported sampling rate");
+ return -EOPNOTSUPP;
+ }
+
+ return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate);
+}
+
+static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
+}
+
+const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = {
+ .sample_add = mlxsw_sp1_mall_sample_add,
+ .sample_del = mlxsw_sp1_mall_sample_del,
+};
+
+static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .to_dev = NULL, /* Mirror to CPU. */
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+ };
+ u32 rate = mall_entry->sample.params.rate;
+ enum mlxsw_sp_span_trigger span_trigger;
+ int err;
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id,
+ &agent_parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
+ return err;
+ }
+
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
+ mall_entry->ingress);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
+ goto err_analyzed_port_get;
+ }
+
+ span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ trigger_parms.span_id = mall_entry->sample.span_id;
+ trigger_parms.probability_rate = rate;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
+ goto err_agent_bind;
+ }
+
+ return 0;
+
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
+ return err;
+}
+
+static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ enum mlxsw_sp_span_trigger span_trigger;
+
+ span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ trigger_parms.span_id = mall_entry->sample.span_id;
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
+}
+
+const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = {
+ .sample_add = mlxsw_sp2_mall_sample_add,
+ .sample_del = mlxsw_sp2_mall_sample_del,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 2796d3659979..d8104fc6c900 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -18,7 +18,6 @@ struct mlxsw_sp_nve_config {
u32 ul_tb_id;
enum mlxsw_sp_l3proto ul_proto;
union mlxsw_sp_l3addr ul_sip;
- u16 ethertype;
};
struct mlxsw_sp_nve {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 3e2bb22e9ca6..b84bb4b65098 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -113,7 +113,6 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
config->udp_dport = cfg->dst_port;
- config->ethertype = params->ethertype;
}
static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
@@ -318,20 +317,14 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype)
+mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp)
{
char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
- u8 sver_type;
- int err;
mlxsw_reg_spvid_tport_set(spvid_pl, true);
mlxsw_reg_spvid_local_port_set(spvid_pl,
MLXSW_REG_TUNNEL_PORT_NVE);
- err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type);
- if (err)
- return err;
-
- mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type);
+ mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
}
@@ -367,7 +360,7 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_spvtr_write;
- err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype);
+ err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp);
if (err)
goto err_decap_ethertype_set;
@@ -392,8 +385,6 @@ static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
char spvtr_pl[MLXSW_REG_SPVTR_LEN];
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
- /* Set default EtherType */
- mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q);
mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index fd672c6c9133..04672eb5c7f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -29,7 +29,6 @@ struct mlxsw_sp_qdisc;
struct mlxsw_sp_qdisc_ops {
enum mlxsw_sp_qdisc_type type;
int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params);
int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
@@ -48,11 +47,14 @@ struct mlxsw_sp_qdisc_ops {
*/
void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent);
+ unsigned int num_classes;
};
struct mlxsw_sp_qdisc {
u32 handle;
- u8 tclass_num;
+ int tclass_num;
u8 prio_bitmap;
union {
struct red_stats red;
@@ -66,11 +68,13 @@ struct mlxsw_sp_qdisc {
} stats_base;
struct mlxsw_sp_qdisc_ops *ops;
+ struct mlxsw_sp_qdisc *parent;
+ struct mlxsw_sp_qdisc *qdiscs;
+ unsigned int num_classes;
};
struct mlxsw_sp_qdisc_state {
struct mlxsw_sp_qdisc root_qdisc;
- struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
/* When a PRIO or ETS are added, the invisible FIFOs in their bands are
* created first. When notifications for these FIFOs arrive, it is not
@@ -85,15 +89,55 @@ struct mlxsw_sp_qdisc_state {
*/
u32 future_handle;
bool future_fifos[IEEE_8021QAZ_MAX_TCS];
+ struct mutex lock; /* Protects qdisc state. */
};
static bool
-mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
- enum mlxsw_sp_qdisc_type type)
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
+{
+ return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
+ struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
+ void *),
+ void *data)
+{
+ struct mlxsw_sp_qdisc *tmp;
+ unsigned int i;
+
+ if (pre) {
+ tmp = pre(qdisc, data);
+ if (tmp)
+ return tmp;
+ }
+
+ if (qdisc->ops) {
+ for (i = 0; i < qdisc->num_classes; i++) {
+ tmp = &qdisc->qdiscs[i];
+ if (qdisc->ops) {
+ tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
+ if (tmp)
+ return tmp;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
{
- return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
- mlxsw_sp_qdisc->ops->type == type &&
- mlxsw_sp_qdisc->handle == handle;
+ u32 parent = *(u32 *)data;
+
+ if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
+ if (qdisc->ops->find_class)
+ return qdisc->ops->find_class(qdisc, parent);
+ }
+
+ return NULL;
}
static struct mlxsw_sp_qdisc *
@@ -101,39 +145,46 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
bool root_only)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- int tclass, child_index;
+ if (!qdisc_state)
+ return NULL;
if (parent == TC_H_ROOT)
return &qdisc_state->root_qdisc;
-
- if (root_only || !qdisc_state ||
- !qdisc_state->root_qdisc.ops ||
- TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
- TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
+ if (root_only)
return NULL;
+ return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
+ mlxsw_sp_qdisc_walk_cb_find, &parent);
+}
- child_index = TC_H_MIN(parent);
- tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
- return &qdisc_state->tclass_qdiscs[tclass];
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
+{
+ u32 handle = *(u32 *)data;
+
+ if (qdisc->ops && qdisc->handle == handle)
+ return qdisc;
+ return NULL;
}
static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- int i;
-
- if (qdisc_state->root_qdisc.handle == handle)
- return &qdisc_state->root_qdisc;
- if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
+ if (!qdisc_state)
return NULL;
+ return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
+ mlxsw_sp_qdisc_walk_cb_find_by_handle,
+ &handle);
+}
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (qdisc_state->tclass_qdiscs[i].handle == handle)
- return &qdisc_state->tclass_qdiscs[i];
+static void
+mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *tmp;
- return NULL;
+ for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
+ tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
}
static int
@@ -157,32 +208,48 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
}
- if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
+ if (mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
mlxsw_sp_qdisc);
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
mlxsw_sp_qdisc->ops = NULL;
-
+ mlxsw_sp_qdisc->num_classes = 0;
+ kfree(mlxsw_sp_qdisc->qdiscs);
+ mlxsw_sp_qdisc->qdiscs = NULL;
return err_hdroom ?: err;
}
-static int
-mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct mlxsw_sp_qdisc_ops *ops, void *params)
+static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
{
struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
struct mlxsw_sp_hdroom orig_hdroom;
+ unsigned int i;
int err;
- if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
- /* In case this location contained a different qdisc of the
- * same type we can override the old qdisc configuration.
- * Otherwise, we need to remove the old qdisc before setting the
- * new one.
- */
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, params);
+ if (err)
+ return err;
+
+ if (ops->num_classes) {
+ mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
+ sizeof(*mlxsw_sp_qdisc->qdiscs),
+ GFP_KERNEL);
+ if (!mlxsw_sp_qdisc->qdiscs)
+ return -ENOMEM;
+
+ for (i = 0; i < ops->num_classes; i++)
+ mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
+ }
orig_hdroom = *mlxsw_sp_port->hdroom;
if (root_qdisc == mlxsw_sp_qdisc) {
@@ -198,20 +265,46 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
goto err_hdroom_configure;
}
- err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ mlxsw_sp_qdisc->num_classes = ops->num_classes;
+ mlxsw_sp_qdisc->ops = ops;
+ mlxsw_sp_qdisc->handle = handle;
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
- goto err_bad_param;
+ goto err_replace;
+
+ return 0;
+
+err_replace:
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ mlxsw_sp_qdisc->num_classes = 0;
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+err_hdroom_configure:
+ kfree(mlxsw_sp_qdisc->qdiscs);
+ mlxsw_sp_qdisc->qdiscs = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
+{
+ struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
+ int err;
+
+ err = ops->check_params(mlxsw_sp_port, params);
+ if (err)
+ goto unoffload;
err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
- goto err_config;
+ goto unoffload;
/* Check if the Qdisc changed. That includes a situation where an
* invisible Qdisc replaces another one, or is being added for the
* first time.
*/
- if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
- mlxsw_sp_qdisc->ops = ops;
+ if (mlxsw_sp_qdisc->handle != handle) {
if (ops->clean_stats)
ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
}
@@ -219,11 +312,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
mlxsw_sp_qdisc->handle = handle;
return 0;
-err_bad_param:
-err_config:
- mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
-err_hdroom_configure:
- if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+unoffload:
+ if (ops->unoffload)
ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
@@ -231,6 +321,27 @@ err_hdroom_configure:
}
static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ if (!mlxsw_sp_qdisc->ops)
+ return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
+ mlxsw_sp_qdisc, ops, params);
+ else
+ return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
+ mlxsw_sp_qdisc, params);
+}
+
+static int
mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
@@ -295,7 +406,7 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *p_tx_bytes, u64 *p_tx_packets,
u64 *p_drops, u64 *p_backlog)
{
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
u64 tx_bytes, tx_packets;
@@ -395,7 +506,7 @@ static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *red_base;
@@ -421,20 +532,12 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
-
- if (root_qdisc != mlxsw_sp_qdisc)
- root_qdisc->stats_base.backlog -=
- mlxsw_sp_qdisc->stats_base.backlog;
-
return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
mlxsw_sp_qdisc->tclass_num);
}
static int
mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -467,7 +570,7 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct tc_red_qopt_offload_params *p = params;
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
u32 min, max;
u64 prob;
@@ -512,7 +615,7 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
void *xstats_ptr)
{
struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
int early_drops, pdrops;
@@ -536,7 +639,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
u64 overlimits;
@@ -553,6 +656,13 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent)
+{
+ return NULL;
+}
+
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
@@ -564,10 +674,11 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.get_stats = mlxsw_sp_qdisc_get_red_stats,
.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+ .find_class = mlxsw_sp_qdisc_leaf_find_class,
};
-int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_red_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -581,8 +692,7 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_red,
&p->set);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_RED))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -599,6 +709,18 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
static void
mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -622,13 +744,6 @@ static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
-
- if (root_qdisc != mlxsw_sp_qdisc)
- root_qdisc->stats_base.backlog -=
- mlxsw_sp_qdisc->stats_base.backlog;
-
return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
mlxsw_sp_qdisc->tclass_num, 0,
@@ -678,7 +793,6 @@ mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
static int
mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_tbf_qopt_offload_replace_params *p = params;
@@ -766,10 +880,11 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
.destroy = mlxsw_sp_qdisc_tbf_destroy,
.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+ .find_class = mlxsw_sp_qdisc_leaf_find_class,
};
-int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_tbf_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -783,8 +898,7 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_tbf,
&p->replace_params);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_TBF))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -798,22 +912,20 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int
-mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
+ int err;
- if (root_qdisc != mlxsw_sp_qdisc)
- root_qdisc->stats_base.backlog -=
- mlxsw_sp_qdisc->stats_base.backlog;
- return 0;
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
}
static int
mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
return 0;
@@ -841,25 +953,18 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
.type = MLXSW_SP_QDISC_FIFO,
.check_params = mlxsw_sp_qdisc_fifo_check_params,
.replace = mlxsw_sp_qdisc_fifo_replace,
- .destroy = mlxsw_sp_qdisc_fifo_destroy,
.get_stats = mlxsw_sp_qdisc_get_fifo_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
};
-int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_fifo_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass, child_index;
+ unsigned int band;
u32 parent_handle;
- /* Invisible FIFOs are tracked in future_handle and future_fifos. Make
- * sure that not more than one qdisc is created for a port at a time.
- * RTNL is a simple proxy for that.
- */
- ASSERT_RTNL();
-
mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
parent_handle = TC_H_MAJ(p->parent);
@@ -872,13 +977,12 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
qdisc_state->future_handle = parent_handle;
}
- child_index = TC_H_MIN(p->parent);
- tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
- if (tclass < IEEE_8021QAZ_MAX_TCS) {
+ band = TC_H_MIN(p->parent) - 1;
+ if (band < IEEE_8021QAZ_MAX_TCS) {
if (p->command == TC_FIFO_REPLACE)
- qdisc_state->future_fifos[tclass] = true;
+ qdisc_state->future_fifos[band] = true;
else if (p->command == TC_FIFO_DESTROY)
- qdisc_state->future_fifos[tclass] = false;
+ qdisc_state->future_fifos[band] = false;
}
}
if (!mlxsw_sp_qdisc)
@@ -890,16 +994,12 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_fifo, NULL);
}
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_FIFO))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
case TC_FIFO_DESTROY:
- if (p->handle == mlxsw_sp_qdisc->handle)
- return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- mlxsw_sp_qdisc);
- return 0;
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_FIFO_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
@@ -910,21 +1010,32 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static int
-__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &qdisc_state->tclass_qdiscs[i]);
- qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
+ &mlxsw_sp_qdisc->qdiscs[i]);
+ mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0;
}
return 0;
@@ -934,7 +1045,7 @@ static int
mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
}
static int
@@ -948,7 +1059,6 @@ __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
static int
mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
@@ -957,8 +1067,9 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- unsigned int nbands,
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 handle, unsigned int nbands,
const unsigned int *quanta,
const unsigned int *weights,
const u8 *priomap)
@@ -971,7 +1082,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
+ child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
@@ -993,6 +1104,9 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
return err;
}
}
+
+ child_qdisc->tclass_num = tclass;
+
if (old_priomap != child_qdisc->prio_bitmap &&
child_qdisc->ops && child_qdisc->ops->clean_stats) {
backlog = child_qdisc->stats_base.backlog;
@@ -1002,7 +1116,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
}
if (handle == qdisc_state->future_handle &&
- qdisc_state->future_fifos[tclass]) {
+ qdisc_state->future_fifos[band]) {
err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
child_qdisc,
&mlxsw_sp_qdisc_ops_fifo,
@@ -1013,7 +1127,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
+ child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
mlxsw_sp_port_ets_set(mlxsw_sp_port,
@@ -1034,8 +1148,9 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct tc_prio_qopt_offload_params *p = params;
unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
- zeroes, zeroes, p->priomap);
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
+ handle, p->bands, zeroes,
+ zeroes, p->priomap);
}
static void
@@ -1066,7 +1181,6 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *tc_qdisc;
u64 tx_packets = 0;
u64 tx_bytes = 0;
@@ -1074,8 +1188,8 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 drops = 0;
int i;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tc_qdisc = &qdisc_state->tclass_qdiscs[i];
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
&tx_bytes, &tx_packets,
&drops, &backlog);
@@ -1112,6 +1226,18 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent)
+{
+ int child_index = TC_H_MIN(parent);
+ int band = child_index - 1;
+
+ if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
+ return NULL;
+ return &mlxsw_sp_qdisc->qdiscs[band];
+}
+
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.type = MLXSW_SP_QDISC_PRIO,
.check_params = mlxsw_sp_qdisc_prio_check_params,
@@ -1120,11 +1246,12 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.destroy = mlxsw_sp_qdisc_prio_destroy,
.get_stats = mlxsw_sp_qdisc_get_prio_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+ .find_class = mlxsw_sp_qdisc_prio_find_class,
+ .num_classes = IEEE_8021QAZ_MAX_TCS,
};
static int
mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_ets_qopt_offload_replace_params *p = params;
@@ -1139,8 +1266,9 @@ mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
{
struct tc_ets_qopt_offload_replace_params *p = params;
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
- p->quanta, p->weights, p->priomap);
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
+ handle, p->bands, p->quanta,
+ p->weights, p->priomap);
}
static void
@@ -1158,7 +1286,7 @@ static int
mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
}
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
@@ -1169,6 +1297,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
.destroy = mlxsw_sp_qdisc_ets_destroy,
.get_stats = mlxsw_sp_qdisc_get_prio_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+ .find_class = mlxsw_sp_qdisc_prio_find_class,
+ .num_classes = IEEE_8021QAZ_MAX_TCS,
};
/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
@@ -1201,12 +1331,10 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u8 band, u32 child_handle)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
- if (band < IEEE_8021QAZ_MAX_TCS &&
- qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
+ if (band < mlxsw_sp_qdisc->num_classes &&
+ mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
return 0;
if (!child_handle) {
@@ -1224,8 +1352,10 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &qdisc_state->tclass_qdiscs[tclass_num]);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+ if (!WARN_ON(!mlxsw_sp_qdisc))
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
return -EOPNOTSUPP;
}
@@ -1238,8 +1368,8 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
p->band, p->child_handle);
}
-int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_prio_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -1253,8 +1383,7 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_prio,
&p->replace_params);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_PRIO))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -1271,8 +1400,20 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_ets_qopt_offload *p)
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -1286,8 +1427,7 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_ets,
&p->replace_params);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_ETS))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -1305,6 +1445,18 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
struct mlxsw_sp_qevent_block {
struct list_head binding_list;
struct list_head mall_entry_list;
@@ -1341,6 +1493,7 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
goto err_analyzed_port_get;
trigger_parms.span_id = span_id;
+ trigger_parms.probability_rate = 1;
err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
&trigger_parms);
if (err)
@@ -1404,7 +1557,9 @@ static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mall_entry *mall_entry,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
- struct mlxsw_sp_span_agent_parms agent_parms = {};
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
+ };
int err;
err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
@@ -1831,22 +1986,20 @@ int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_por
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_qdisc_state *qdisc_state;
- int i;
qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
if (!qdisc_state)
return -ENOMEM;
+ mutex_init(&qdisc_state->lock);
qdisc_state->root_qdisc.prio_bitmap = 0xff;
qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qdisc_state->tclass_qdiscs[i].tclass_num = i;
-
mlxsw_sp_port->qdisc = qdisc_state;
return 0;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ mutex_destroy(&mlxsw_sp_port->qdisc->lock);
kfree(mlxsw_sp_port->qdisc);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index eda99d82766a..41259c0004d1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -113,6 +113,10 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+struct mlxsw_sp_router_ops {
+ int (*init)(struct mlxsw_sp *mlxsw_sp);
+};
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
@@ -2662,6 +2666,10 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
goto out;
}
+ if (neigh_entry->connected && entry_connected &&
+ !memcmp(neigh_entry->ha, ha, ETH_ALEN))
+ goto out;
+
memcpy(neigh_entry->ha, ha, ETH_ALEN);
mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
@@ -2842,6 +2850,15 @@ enum mlxsw_sp_nexthop_type {
MLXSW_SP_NEXTHOP_TYPE_IPIP,
};
+enum mlxsw_sp_nexthop_action {
+ /* Nexthop forwards packets to an egress RIF */
+ MLXSW_SP_NEXTHOP_ACTION_FORWARD,
+ /* Nexthop discards packets */
+ MLXSW_SP_NEXTHOP_ACTION_DISCARD,
+ /* Nexthop traps packets */
+ MLXSW_SP_NEXTHOP_ACTION_TRAP,
+};
+
struct mlxsw_sp_nexthop_key {
struct fib_nh *fib_nh;
};
@@ -2862,16 +2879,16 @@ struct mlxsw_sp_nexthop {
int norm_nh_weight;
int num_adj_entries;
struct mlxsw_sp_rif *rif;
- u8 should_offload:1, /* set indicates this neigh is connected and
- * should be put to KVD linear area of this group.
+ u8 should_offload:1, /* set indicates this nexthop should be written
+ * to the adjacency table.
*/
- offloaded:1, /* set in case the neigh is actually put into
- * KVD linear area of this group.
+ offloaded:1, /* set indicates this nexthop was written to the
+ * adjacency table.
*/
- update:1, /* set indicates that MAC of this neigh should be
- * updated in HW
+ update:1; /* set indicates this nexthop should be updated in the
+ * adjacency table (f.e., its MAC changed).
*/
- discard:1; /* nexthop is programmed to discard packets */
+ enum mlxsw_sp_nexthop_action action;
enum mlxsw_sp_nexthop_type type;
union {
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -2894,7 +2911,9 @@ struct mlxsw_sp_nexthop_group_info {
u16 count;
int sum_norm_weight;
u8 adj_index_valid:1,
- gateway:1; /* routes using the group use a gateway */
+ gateway:1, /* routes using the group use a gateway */
+ is_resilient:1;
+ struct list_head list; /* member in nh_res_grp_list */
struct mlxsw_sp_nexthop nexthops[0];
#define nh_rif nexthops[0].rif
};
@@ -2979,14 +2998,15 @@ struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
return list_next_entry(nh, router_list_node);
}
-bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
+bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
{
- return nh->offloaded;
+ return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
}
unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
{
- if (!nh->offloaded)
+ if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
+ !mlxsw_sp_nexthop_is_forward(nh))
return NULL;
return nh->neigh_entry->ha;
}
@@ -3036,11 +3056,6 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
return false;
}
-bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh)
-{
- return nh->discard;
-}
-
static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
@@ -3403,20 +3418,38 @@ err_mass_update_vr:
return err;
}
-static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh,
+ bool force, char *ratr_pl)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
- char ratr_pl[MLXSW_REG_RATR_LEN];
+ enum mlxsw_reg_ratr_op op;
+ u16 rif_index;
- mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
- true, MLXSW_REG_RATR_TYPE_ETHERNET,
- adj_index, nh->rif->rif_index);
- if (nh->discard)
+ rif_index = nh->rif ? nh->rif->rif_index :
+ mlxsw_sp->router->lb_rif_index;
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
+ adj_index, rif_index);
+ switch (nh->action) {
+ case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ break;
+ case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
mlxsw_reg_ratr_trap_action_set(ratr_pl,
MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
- else
- mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ break;
+ case MLXSW_SP_NEXTHOP_ACTION_TRAP:
+ mlxsw_reg_ratr_trap_action_set(ratr_pl,
+ MLXSW_REG_RATR_TRAP_ACTION_TRAP);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
if (nh->counter_valid)
mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
else
@@ -3425,15 +3458,17 @@ static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
}
-int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
{
int i;
for (i = 0; i < nh->num_adj_entries; i++) {
int err;
- err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
+ err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
+ nh, force, ratr_pl);
if (err)
return err;
}
@@ -3443,17 +3478,20 @@ int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+ struct mlxsw_sp_nexthop *nh,
+ bool force, char *ratr_pl)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
- return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
+ return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
+ force, ratr_pl);
}
static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
{
int i;
@@ -3461,7 +3499,7 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
int err;
err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
- nh);
+ nh, force, ratr_pl);
if (err)
return err;
}
@@ -3469,11 +3507,29 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
+{
+ /* When action is discard or trap, the nexthop must be
+ * programmed as an Ethernet nexthop.
+ */
+ if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
+ nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
+ nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
+ return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
+ force, ratr_pl);
+ else
+ return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
+ force, ratr_pl);
+}
+
static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi,
bool reallocate)
{
+ char ratr_pl[MLXSW_REG_RATR_LEN];
u32 adj_index = nhgi->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
int i;
@@ -3489,16 +3545,8 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
if (nh->update || reallocate) {
int err = 0;
- switch (nh->type) {
- case MLXSW_SP_NEXTHOP_TYPE_ETH:
- err = mlxsw_sp_nexthop_update
- (mlxsw_sp, adj_index, nh);
- break;
- case MLXSW_SP_NEXTHOP_TYPE_IPIP:
- err = mlxsw_sp_nexthop_ipip_update
- (mlxsw_sp, adj_index, nh);
- break;
- }
+ err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
+ true, ratr_pl);
if (err)
return err;
nh->update = 0;
@@ -3524,34 +3572,69 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
+struct mlxsw_sp_adj_grp_size_range {
+ u16 start; /* Inclusive */
+ u16 end; /* Inclusive */
+};
+
+/* Ordered by range start value */
+static const struct mlxsw_sp_adj_grp_size_range
+mlxsw_sp1_adj_grp_size_ranges[] = {
+ { .start = 1, .end = 64 },
+ { .start = 512, .end = 512 },
+ { .start = 1024, .end = 1024 },
+ { .start = 2048, .end = 2048 },
+ { .start = 4096, .end = 4096 },
+};
+
+/* Ordered by range start value */
+static const struct mlxsw_sp_adj_grp_size_range
+mlxsw_sp2_adj_grp_size_ranges[] = {
+ { .start = 1, .end = 128 },
+ { .start = 256, .end = 256 },
+ { .start = 512, .end = 512 },
+ { .start = 1024, .end = 1024 },
+ { .start = 2048, .end = 2048 },
+ { .start = 4096, .end = 4096 },
+};
+
+static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size)
{
- /* Valid sizes for an adjacency group are:
- * 1-64, 512, 1024, 2048 and 4096.
- */
- if (*p_adj_grp_size <= 64)
- return;
- else if (*p_adj_grp_size <= 512)
- *p_adj_grp_size = 512;
- else if (*p_adj_grp_size <= 1024)
- *p_adj_grp_size = 1024;
- else if (*p_adj_grp_size <= 2048)
- *p_adj_grp_size = 2048;
- else
- *p_adj_grp_size = 4096;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (*p_adj_grp_size >= size_range->start &&
+ *p_adj_grp_size <= size_range->end)
+ return;
+
+ if (*p_adj_grp_size <= size_range->end) {
+ *p_adj_grp_size = size_range->end;
+ return;
+ }
+ }
}
-static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
+static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size,
unsigned int alloc_size)
{
- if (alloc_size >= 4096)
- *p_adj_grp_size = 4096;
- else if (alloc_size >= 2048)
- *p_adj_grp_size = 2048;
- else if (alloc_size >= 1024)
- *p_adj_grp_size = 1024;
- else if (alloc_size >= 512)
- *p_adj_grp_size = 512;
+ int i;
+
+ for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (alloc_size >= size_range->end) {
+ *p_adj_grp_size = size_range->end;
+ return;
+ }
+ }
}
static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
@@ -3563,7 +3646,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
/* Round up the requested group size to the next size supported
* by the device and make sure the request can be satisfied.
*/
- mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
+ mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
*p_adj_grp_size, &alloc_size);
@@ -3573,7 +3656,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
* entries than requested. Try to use as much of them as
* possible.
*/
- mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
+ mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
return 0;
}
@@ -3681,9 +3764,29 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
}
static void
+mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop *nh,
+ u16 bucket_index)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ bool offload = false, trap = false;
+
+ if (nh->offloaded) {
+ if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
+ trap = true;
+ else
+ offload = true;
+ }
+ nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ bucket_index, offload, trap);
+}
+
+static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
+ int i;
+
/* Do not update the flags if the nexthop group is being destroyed
* since:
* 1. The nexthop objects is being deleted, in which case the flags are
@@ -3697,6 +3800,18 @@ mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
nh_grp->nhgi->adj_index_valid, false);
+
+ /* Update flags of individual nexthop buckets in case of a resilient
+ * nexthop group.
+ */
+ if (!nh_grp->nhgi->is_resilient)
+ return;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
+ }
}
static void
@@ -3750,6 +3865,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
}
+ /* Flags of individual nexthop buckets might need to be
+ * updated.
+ */
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
return 0;
}
mlxsw_sp_nexthop_group_normalize(nhgi);
@@ -3832,10 +3951,15 @@ set_trap:
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
bool removing)
{
- if (!removing)
+ if (!removing) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
nh->should_offload = 1;
- else
+ } else if (nh->nhgi->is_resilient) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
+ nh->should_offload = 1;
+ } else {
nh->should_offload = 0;
+ }
nh->update = 1;
}
@@ -4250,6 +4374,85 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop_group *nh_grp,
+ unsigned long *activity)
+{
+ char *ratrad_pl;
+ int i, err;
+
+ ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
+ if (!ratrad_pl)
+ return;
+
+ mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
+ nh_grp->nhgi->count);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
+ if (err)
+ goto out;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
+ continue;
+ bitmap_set(activity, i, 1);
+ }
+
+out:
+ kfree(ratrad_pl);
+}
+
+#define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
+
+static void
+mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ unsigned long *activity;
+
+ activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
+ if (!activity)
+ return;
+
+ mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
+ nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ nh_grp->nhgi->count, activity);
+
+ bitmap_free(activity);
+}
+
+static void
+mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_router *router;
+ bool reschedule = false;
+
+ router = container_of(work, struct mlxsw_sp_router,
+ nh_grp_activity_dw.work);
+
+ mutex_lock(&router->lock);
+
+ list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
+ mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
+ reschedule = true;
+ }
+
+ mutex_unlock(&router->lock);
+
+ if (!reschedule)
+ return;
+ mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
+}
+
static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
const struct nh_notifier_single_info *nh,
@@ -4268,6 +4471,29 @@ mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_single_info *nh,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
+ if (err)
+ return err;
+
+ /* Device only nexthops with an IPIP device are programmed as
+ * encapsulating adjacency entries.
+ */
+ if (!nh->gw_family && !nh->is_reject &&
+ !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
const struct nh_notifier_grp_info *nh_grp,
struct netlink_ext_ack *extack)
@@ -4284,21 +4510,83 @@ mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
int err;
nh = &nh_grp->nh_entries[i].nh;
- err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh,
- extack);
+ err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ extack);
if (err)
return err;
+ }
- /* Device only nexthops with an IPIP device are programmed as
- * encapsulating adjacency entries.
- */
- if (!nh->gw_family && !nh->is_reject &&
- !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
- NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
- return -EINVAL;
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_res_table_info *nh_res_table,
+ struct netlink_ext_ack *extack)
+{
+ unsigned int alloc_size;
+ bool valid_size = false;
+ int err, i;
+
+ if (nh_res_table->num_nh_buckets < 32) {
+ NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (nh_res_table->num_nh_buckets >= size_range->start &&
+ nh_res_table->num_nh_buckets <= size_range->end) {
+ valid_size = true;
+ break;
}
}
+ if (!valid_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nh_res_table->num_nh_buckets,
+ &alloc_size);
+ if (err || nh_res_table->num_nh_buckets != alloc_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_res_table_info *nh_res_table,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+ u16 i;
+
+ err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
+ nh_res_table,
+ extack);
+ if (err)
+ return err;
+
+ for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
+ const struct nh_notifier_single_info *nh;
+ int err;
+
+ nh = &nh_res_table->nhs[i];
+ err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -4306,7 +4594,11 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
unsigned long event,
struct nh_notifier_info *info)
{
- if (event != NEXTHOP_EVENT_REPLACE)
+ struct nh_notifier_single_info *nh;
+
+ if (event != NEXTHOP_EVENT_REPLACE &&
+ event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
+ event != NEXTHOP_EVENT_BUCKET_REPLACE)
return 0;
switch (info->type) {
@@ -4317,6 +4609,14 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
info->nh_grp,
info->extack);
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
+ info->nh_res_table,
+ info->extack);
+ case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
+ nh = &info->nh_res_bucket->new_nh;
+ return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ info->extack);
default:
NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
return -EOPNOTSUPP;
@@ -4334,6 +4634,7 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
return info->nh->gw_family || info->nh->is_reject ||
mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
case NH_NOTIFIER_INFO_TYPE_GRP:
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
/* Already validated earlier. */
return true;
default:
@@ -4346,7 +4647,7 @@ static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
{
u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
- nh->discard = 1;
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
nh->should_offload = 1;
/* While nexthops that discard packets do not forward packets
* via an egress RIF, they still need to be programmed using a
@@ -4398,6 +4699,15 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
if (nh_obj->is_reject)
mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
+ /* In a resilient nexthop group, all the nexthops must be written to
+ * the adjacency table. Even if they do not have a valid neighbour or
+ * RIF.
+ */
+ if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
+ nh->should_offload = 1;
+ }
+
return 0;
err_type_init:
@@ -4409,11 +4719,12 @@ err_type_init:
static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- if (nh->discard)
+ if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ nh->should_offload = 0;
}
static int
@@ -4423,6 +4734,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
+ bool is_resilient = false;
unsigned int nhs;
int err, i;
@@ -4433,6 +4745,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
case NH_NOTIFIER_INFO_TYPE_GRP:
nhs = info->nh_grp->num_nh;
break;
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ nhs = info->nh_res_table->num_nh_buckets;
+ is_resilient = true;
+ break;
default:
return -EINVAL;
}
@@ -4443,6 +4759,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
nh_grp->nhgi = nhgi;
nhgi->nh_grp = nh_grp;
nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
+ nhgi->is_resilient = is_resilient;
nhgi->count = nhs;
for (i = 0; i < nhgi->count; i++) {
struct nh_notifier_single_info *nh_obj;
@@ -4458,6 +4775,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
nh_obj = &info->nh_grp->nh_entries[i].nh;
weight = info->nh_grp->nh_entries[i].weight;
break;
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ nh_obj = &info->nh_res_table->nhs[i];
+ weight = 1;
+ break;
default:
err = -EINVAL;
goto err_nexthop_obj_init;
@@ -4473,6 +4794,15 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
goto err_group_refresh;
}
+ /* Add resilient nexthop groups to a list so that the activity of their
+ * nexthop buckets will be periodically queried and cleared.
+ */
+ if (nhgi->is_resilient) {
+ if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
+ mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
+ list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
+ }
+
return 0;
err_group_refresh:
@@ -4491,8 +4821,15 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
int i;
+ if (nhgi->is_resilient) {
+ list_del(&nhgi->list);
+ if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
+ cancel_delayed_work(&router->nh_grp_activity_dw);
+ }
+
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -4685,6 +5022,136 @@ static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
}
+static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index, char *ratr_pl)
+{
+ MLXSW_REG_ZERO(ratr, ratr_pl);
+ mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
+ mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
+
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
+{
+ /* Clear the opcode and activity on both the old and new payload as
+ * they are irrelevant for the comparison.
+ */
+ mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_a_set(ratr_pl, 0);
+ mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
+
+ /* If the contents of the adjacency entry are consistent with the
+ * replacement request, then replacement was successful.
+ */
+ if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct nh_notifier_info *info)
+{
+ u16 bucket_index = info->nh_res_bucket->bucket_index;
+ struct netlink_ext_ack *extack = info->extack;
+ bool force = info->nh_res_bucket->force;
+ char ratr_pl_new[MLXSW_REG_RATR_LEN];
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ u32 adj_index;
+ int err;
+
+ /* No point in trying an atomic replacement if the idle timer interval
+ * is smaller than the interval in which we query and clear activity.
+ */
+ if (!force && info->nh_res_bucket->idle_timer_ms <
+ MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
+ force = true;
+
+ adj_index = nh->nhgi->adj_index + bucket_index;
+ err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
+ return err;
+ }
+
+ if (!force) {
+ err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
+ ratr_pl_new);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
+ return err;
+ }
+
+ err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
+ return err;
+ }
+ }
+
+ nh->update = 0;
+ nh->offloaded = 1;
+ mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
+
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ u16 bucket_index = info->nh_res_bucket->bucket_index;
+ struct netlink_ext_ack *extack = info->extack;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct nh_notifier_single_info *nh_obj;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop *nh;
+ int err;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
+ return -EINVAL;
+ }
+
+ nhgi = nh_grp->nhgi;
+
+ if (bucket_index >= nhgi->count) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
+ return -EINVAL;
+ }
+
+ nh = &nhgi->nexthops[bucket_index];
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+
+ nh_obj = &info->nh_res_bucket->new_nh;
+ err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
+ goto err_nexthop_obj_init;
+ }
+
+ err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
+ if (err)
+ goto err_nexthop_obj_bucket_adj_update;
+
+ return 0;
+
+err_nexthop_obj_bucket_adj_update:
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+err_nexthop_obj_init:
+ nh_obj = &info->nh_res_bucket->old_nh;
+ mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
+ /* The old adjacency entry was not overwritten */
+ nh->update = 0;
+ nh->offloaded = 1;
+ return err;
+}
+
static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -4699,8 +5166,6 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
mutex_lock(&router->lock);
- ASSERT_RTNL();
-
switch (event) {
case NEXTHOP_EVENT_REPLACE:
err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
@@ -4708,6 +5173,10 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
case NEXTHOP_EVENT_DEL:
mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
break;
+ case NEXTHOP_EVENT_BUCKET_REPLACE:
+ err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
+ info);
+ break;
default:
break;
}
@@ -7667,7 +8136,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
int i, err;
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
- ops = mlxsw_sp->rif_ops_arr[type];
+ ops = mlxsw_sp->router->rif_ops_arr[type];
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
if (IS_ERR(vr))
@@ -8865,7 +9334,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
.deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
};
-const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
+static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
@@ -9050,7 +9519,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
.deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
};
-const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
+static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
@@ -9302,6 +9771,36 @@ static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
}
+static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
+ mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
+ mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
+
+ return 0;
+}
+
+const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
+ .init = mlxsw_sp1_router_init,
+};
+
+static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
+ mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
+
+ return 0;
+}
+
+const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
+ .init = mlxsw_sp2_router_init,
+};
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
@@ -9315,6 +9814,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ err = mlxsw_sp->router_ops->init(mlxsw_sp);
+ if (err)
+ goto err_router_ops_init;
+
err = mlxsw_sp_router_xm_init(mlxsw_sp);
if (err)
goto err_xm_init;
@@ -9328,6 +9831,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ll_op_ctx_init;
+ INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
+ INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
+ mlxsw_sp_nh_grp_activity_work);
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -9451,10 +9958,12 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
mlxsw_sp_router_ll_op_ctx_fini(router);
err_ll_op_ctx_init:
mlxsw_sp_router_xm_fini(mlxsw_sp);
err_xm_init:
+err_router_ops_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -9481,6 +9990,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
mlxsw_sp_router_xm_fini(mlxsw_sp);
mutex_destroy(&mlxsw_sp->router->lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 2875ee8ec537..be7708a375e1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -78,6 +78,10 @@ struct mlxsw_sp_router {
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
u16 lb_rif_index;
struct mlxsw_sp_router_xm *xm;
+ const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
+ size_t adj_grp_size_ranges_count;
+ struct delayed_work nh_grp_activity_dw;
+ struct list_head nh_res_grp_list;
};
struct mlxsw_sp_fib_entry_priv {
@@ -195,20 +199,20 @@ mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *except);
struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
struct mlxsw_sp_nexthop *nh);
-bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh);
unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh);
int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
u32 *p_adj_size, u32 *p_adj_hash_index);
struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh);
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh);
-bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh);
#define mlxsw_sp_nexthop_for_each(nh, router) \
for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \
nh = mlxsw_sp_nexthop_next(router, nh))
int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh, u64 *p_counter);
-int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh);
+int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl);
void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh);
void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 1892cea05ee7..3398cc01e5ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -186,6 +186,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
/* Create a new port analayzer entry for local_port. */
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+ mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id);
mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
@@ -203,6 +204,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
int pa_id = span_entry->id;
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
+ mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
}
@@ -938,7 +940,8 @@ mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp,
if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
curr->parms.policer_enable == sparms->policer_enable &&
- curr->parms.policer_id == sparms->policer_id)
+ curr->parms.policer_id == sparms->policer_id &&
+ curr->parms.session_id == sparms->session_id)
return curr;
}
return NULL;
@@ -1085,6 +1088,7 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
sparms.policer_id = parms->policer_id;
sparms.policer_enable = parms->policer_enable;
+ sparms.session_id = parms->session_id;
span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
if (!span_entry)
return -ENOBUFS;
@@ -1227,8 +1231,12 @@ __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
return -EINVAL;
}
+ if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX)
+ return -EINVAL;
+
mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
- trigger_entry->parms.span_id);
+ trigger_entry->parms.span_id,
+ trigger_entry->parms.probability_rate);
return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
}
@@ -1362,8 +1370,11 @@ mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
return -EINVAL;
}
+ if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX)
+ return -EINVAL;
+
mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
- 1);
+ trigger_entry->parms.probability_rate);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
}
@@ -1561,7 +1572,9 @@ int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
trigger,
mlxsw_sp_port);
if (trigger_entry) {
- if (trigger_entry->parms.span_id != parms->span_id)
+ if (trigger_entry->parms.span_id != parms->span_id ||
+ trigger_entry->parms.probability_rate !=
+ parms->probability_rate)
return -EINVAL;
refcount_inc(&trigger_entry->ref_count);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index aa1cd409c0e2..efaefd1ae863 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -13,6 +13,19 @@
struct mlxsw_sp;
struct mlxsw_sp_port;
+/* SPAN session identifiers that correspond to MLXSW_TRAP_ID_MIRROR_SESSION<i>
+ * trap identifiers. The session identifier is an attribute of the SPAN agent,
+ * which determines the trap identifier of packets that are mirrored to the
+ * CPU. Packets that are trapped to the CPU for the same logical reason (e.g.,
+ * buffer drops) should use the same session identifier.
+ */
+enum mlxsw_sp_span_session_id {
+ MLXSW_SP_SPAN_SESSION_ID_BUFFER,
+ MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+
+ __MLXSW_SP_SPAN_SESSION_ID_MAX = 8,
+};
+
struct mlxsw_sp_span_parms {
struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
unsigned int ttl;
@@ -23,6 +36,7 @@ struct mlxsw_sp_span_parms {
u16 vid;
u16 policer_id;
bool policer_enable;
+ enum mlxsw_sp_span_session_id session_id;
};
enum mlxsw_sp_span_trigger {
@@ -35,12 +49,14 @@ enum mlxsw_sp_span_trigger {
struct mlxsw_sp_span_trigger_parms {
int span_id;
+ u32 probability_rate;
};
struct mlxsw_sp_span_agent_parms {
const struct net_device *to_dev;
u16 policer_id;
bool policer_enable;
+ enum mlxsw_sp_span_session_id session_id;
};
struct mlxsw_sp_span_entry_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 23b7e8d6386b..eeccd586e781 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -98,6 +98,10 @@ struct mlxsw_sp_bridge_ops {
const struct mlxsw_sp_fid *fid);
};
+struct mlxsw_sp_switchdev_ops {
+ void (*init)(struct mlxsw_sp *mlxsw_sp);
+};
+
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -2296,7 +2300,7 @@ mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
vid, ETH_P_8021AD, extack);
}
-static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
+static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
.port_join = mlxsw_sp_bridge_8021ad_port_join,
.port_leave = mlxsw_sp_bridge_8021ad_port_leave,
.vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
@@ -2305,6 +2309,53 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
.fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
};
+static int
+mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* The EtherType of decapsulated packets is determined at the egress
+ * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
+ * co-exist.
+ */
+ err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
+ mlxsw_sp_port, extack);
+ if (err)
+ goto err_bridge_8021ad_port_join;
+
+ return 0;
+
+err_bridge_8021ad_port_join:
+ mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
+ return err;
+}
+
+static void
+mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
+ .port_join = mlxsw_sp2_bridge_8021ad_port_join,
+ .port_leave = mlxsw_sp2_bridge_8021ad_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
+};
+
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev,
@@ -2865,7 +2916,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
return;
if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
- !switchdev_work->fdb_info.added_by_user)
+ (!switchdev_work->fdb_info.added_by_user ||
+ switchdev_work->fdb_info.is_local))
return;
if (!netif_running(dev))
@@ -2920,7 +2972,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
- if (!fdb_info->added_by_user)
+ if (!fdb_info->added_by_user || fdb_info->is_local)
break;
err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
if (err)
@@ -3535,6 +3587,24 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
}
+static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
+}
+
+const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
+ .init = mlxsw_sp1_switchdev_init,
+};
+
+static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
+}
+
+const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
+ .init = mlxsw_sp2_switchdev_init,
+};
+
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge;
@@ -3549,7 +3619,8 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
- bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops;
+
+ mlxsw_sp->switchdev_ops->init(mlxsw_sp);
return mlxsw_sp_fdb_init(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 4ef12e3e021a..26d01adbedad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -49,8 +49,14 @@ enum {
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
enum {
+ /* Packet was mirrored from ingress. */
+ MLXSW_SP_MIRROR_REASON_INGRESS = 1,
+ /* Packet was mirrored from policy engine. */
+ MLXSW_SP_MIRROR_REASON_POLICY_ENGINE = 2,
/* Packet was early dropped. */
MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9,
+ /* Packet was mirrored from egress. */
+ MLXSW_SP_MIRROR_REASON_EGRESS = 14,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -106,7 +112,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
void *trap_ctx)
{
- u32 cookie_index = mlxsw_skb_cb(skb)->cookie_index;
+ u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index;
const struct flow_action_cookie *fa_cookie;
struct devlink_port *in_devlink_port;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -202,21 +208,175 @@ static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port,
mlxsw_sp_ptp_receive(mlxsw_sp, skb, local_port);
}
+static struct mlxsw_sp_port *
+mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_rx_md_info *rx_md_info)
+{
+ u8 local_port;
+
+ if (!rx_md_info->tx_port_valid)
+ return NULL;
+
+ if (rx_md_info->tx_port_is_lag)
+ local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
+ rx_md_info->tx_lag_id,
+ rx_md_info->tx_lag_port_index);
+ else
+ local_port = rx_md_info->tx_sys_port;
+
+ if (local_port >= mlxsw_core_max_ports(mlxsw_sp->core))
+ return NULL;
+
+ return mlxsw_sp->ports[local_port];
+}
+
+/* The latency units are determined according to MOGCR.mirror_latency_units. It
+ * defaults to 64 nanoseconds.
+ */
+#define MLXSW_SP_MIRROR_LATENCY_SHIFT 6
+
+static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp,
+ struct psample_metadata *md,
+ struct sk_buff *skb, int in_ifindex,
+ bool truncate, u32 trunc_size)
+{
+ struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ md->trunc_size = truncate ? trunc_size : skb->len;
+ md->in_ifindex = in_ifindex;
+ mlxsw_sp_port = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info);
+ md->out_ifindex = mlxsw_sp_port && mlxsw_sp_port->dev ?
+ mlxsw_sp_port->dev->ifindex : 0;
+ md->out_tc_valid = rx_md_info->tx_tc_valid;
+ md->out_tc = rx_md_info->tx_tc;
+ md->out_tc_occ_valid = rx_md_info->tx_congestion_valid;
+ md->out_tc_occ = rx_md_info->tx_congestion;
+ md->latency_valid = rx_md_info->latency_valid;
+ md->latency = rx_md_info->latency;
+ md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT;
+}
+
static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params *params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct psample_metadata md = {};
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ trigger.local_port = local_port;
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_port *mlxsw_sp_port, *mlxsw_sp_port_tx;
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params *params;
+ struct psample_metadata md = {};
+ int err;
+
+ /* Locally generated packets are not reported from the policy engine
+ * trigger, so do not report them from the egress trigger as well.
+ */
+ if (local_port == MLXSW_PORT_CPU_PORT)
+ goto out;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ /* Packet was sampled from Tx, so we need to retrieve the sample
+ * parameters based on the Tx port and not the Rx port.
+ */
+ mlxsw_sp_port_tx = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info);
+ if (!mlxsw_sp_port_tx)
+ goto out;
+
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port_tx->local_port;
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_sample_params *params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct psample_metadata md = {};
int err;
err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
if (err)
return;
- /* The sample handler expects skb->data to point to the start of the
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
* Ethernet header.
*/
skb_push(skb, ETH_HLEN);
- mlxsw_sp_sample_receive(mlxsw_sp, skb, local_port);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
}
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
@@ -464,11 +624,6 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.priority = 2,
},
{
- .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
- .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
- .priority = 0,
- },
- {
.group = DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 18),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
.priority = 4,
@@ -993,14 +1148,6 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
},
{
- .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
- MIRROR),
- .listeners_arr = {
- MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE,
- MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD),
- },
- },
- {
.trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_TRAP, ACL_TRAP, TRAP),
.listeners_arr = {
MLXSW_SP_RXL_NO_MARK(ACL0, FLOW_LOGGING, TRAP_TO_CPU,
@@ -1709,10 +1856,23 @@ int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
static const struct mlxsw_sp_trap_group_item
mlxsw_sp1_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ },
};
static const struct mlxsw_sp_trap_item
mlxsw_sp1_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE,
+ MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD),
+ },
+ },
};
static int
@@ -1749,6 +1909,12 @@ mlxsw_sp2_trap_group_items_arr[] = {
.priority = 0,
.fixed_policer = true,
},
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ .fixed_policer = true,
+ },
};
static const struct mlxsw_sp_trap_item
@@ -1760,6 +1926,21 @@ mlxsw_sp2_trap_items_arr[] = {
},
.is_source = true,
},
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_INGRESS),
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_tx_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_EGRESS),
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_acl_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_POLICY_ENGINE),
+ },
+ },
};
static int