diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-06 06:13:21 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-06 06:13:21 +0300 |
commit | 47ec5303d73ea344e84f46660fff693c57641386 (patch) | |
tree | a2252debab749de29620c43285295d60c4741119 /drivers/net/ethernet/broadcom/bnxt/bnxt.c | |
parent | 8186749621ed6b8fc42644c399e8c755a2b6f630 (diff) | |
parent | c1055b76ad00aed0e8b79417080f212d736246b6 (diff) | |
download | linux-47ec5303d73ea344e84f46660fff693c57641386.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller:
1) Support 6Ghz band in ath11k driver, from Rajkumar Manoharan.
2) Support UDP segmentation in code TSO code, from Eric Dumazet.
3) Allow flashing different flash images in cxgb4 driver, from Vishal
Kulkarni.
4) Add drop frames counter and flow status to tc flower offloading,
from Po Liu.
5) Support n-tuple filters in cxgb4, from Vishal Kulkarni.
6) Various new indirect call avoidance, from Eric Dumazet and Brian
Vazquez.
7) Fix BPF verifier failures on 32-bit pointer arithmetic, from
Yonghong Song.
8) Support querying and setting hardware address of a port function via
devlink, use this in mlx5, from Parav Pandit.
9) Support hw ipsec offload on bonding slaves, from Jarod Wilson.
10) Switch qca8k driver over to phylink, from Jonathan McDowell.
11) In bpftool, show list of processes holding BPF FD references to
maps, programs, links, and btf objects. From Andrii Nakryiko.
12) Several conversions over to generic power management, from Vaibhav
Gupta.
13) Add support for SO_KEEPALIVE et al. to bpf_setsockopt(), from Dmitry
Yakunin.
14) Various https url conversions, from Alexander A. Klimov.
15) Timestamping and PHC support for mscc PHY driver, from Antoine
Tenart.
16) Support bpf iterating over tcp and udp sockets, from Yonghong Song.
17) Support 5GBASE-T i40e NICs, from Aleksandr Loktionov.
18) Add kTLS RX HW offload support to mlx5e, from Tariq Toukan.
19) Fix the ->ndo_start_xmit() return type to be netdev_tx_t in several
drivers. From Luc Van Oostenryck.
20) XDP support for xen-netfront, from Denis Kirjanov.
21) Support receive buffer autotuning in MPTCP, from Florian Westphal.
22) Support EF100 chip in sfc driver, from Edward Cree.
23) Add XDP support to mvpp2 driver, from Matteo Croce.
24) Support MPTCP in sock_diag, from Paolo Abeni.
25) Commonize UDP tunnel offloading code by creating udp_tunnel_nic
infrastructure, from Jakub Kicinski.
26) Several pci_ --> dma_ API conversions, from Christophe JAILLET.
27) Add FLOW_ACTION_POLICE support to mlxsw, from Ido Schimmel.
28) Add SK_LOOKUP bpf program type, from Jakub Sitnicki.
29) Refactor a lot of networking socket option handling code in order to
avoid set_fs() calls, from Christoph Hellwig.
30) Add rfc4884 support to icmp code, from Willem de Bruijn.
31) Support TBF offload in dpaa2-eth driver, from Ioana Ciornei.
32) Support XDP_REDIRECT in qede driver, from Alexander Lobakin.
33) Support PCI relaxed ordering in mlx5 driver, from Aya Levin.
34) Support TCP syncookies in MPTCP, from Flowian Westphal.
35) Fix several tricky cases of PMTU handling wrt. briding, from Stefano
Brivio.
* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2056 commits)
net: thunderx: initialize VF's mailbox mutex before first usage
usb: hso: remove bogus check for EINPROGRESS
usb: hso: no complaint about kmalloc failure
hso: fix bailout in error case of probe
ip_tunnel_core: Fix build for archs without _HAVE_ARCH_IPV6_CSUM
selftests/net: relax cpu affinity requirement in msg_zerocopy test
mptcp: be careful on subflow creation
selftests: rtnetlink: make kci_test_encap() return sub-test result
selftests: rtnetlink: correct the final return value for the test
net: dsa: sja1105: use detected device id instead of DT one on mismatch
tipc: set ub->ifindex for local ipv6 address
ipv6: add ipv6_dev_find()
net: openvswitch: silence suspicious RCU usage warning
Revert "vxlan: fix tos value before xmit"
ptp: only allow phase values lower than 1 period
farsync: switch from 'pci_' to 'dma_' API
wan: wanxl: switch from 'pci_' to 'dma_' API
hv_netvsc: do not use VF device if link is down
dpaa2-eth: Fix passing zero to 'PTR_ERR' warning
net: macb: Properly handle phylink on at91sam9x
...
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 880 |
1 files changed, 578 insertions, 302 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7463a1847ceb..31fb5a28e1c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1614,7 +1614,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u16 vlan_proto = tpa_info->metadata >> RX_CMP_FLAGS2_METADATA_TPID_SFT; u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; @@ -1832,7 +1832,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if ((rxcmp1->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; @@ -3546,7 +3546,7 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp) } if (vnic->rss_table) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, + dma_free_coherent(&pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); vnic->rss_table = NULL; @@ -3611,7 +3611,13 @@ vnic_skip_grps: continue; /* Allocate rss table and hash key */ - vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); + if (bp->flags & BNXT_FLAG_CHIP_P5) + size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&pdev->dev, + vnic->rss_table_size, &vnic->rss_table_dma_addr, GFP_KERNEL); if (!vnic->rss_table) { @@ -3619,8 +3625,6 @@ vnic_skip_grps: goto out; } - size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); - vnic->rss_hash_key = ((void *)vnic->rss_table) + size; vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; } @@ -3707,67 +3711,189 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) return 0; } -static void bnxt_free_port_stats(struct bnxt *bp) +static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) { - struct pci_dev *pdev = bp->pdev; + kfree(stats->hw_masks); + stats->hw_masks = NULL; + kfree(stats->sw_stats); + stats->sw_stats = NULL; + if (stats->hw_stats) { + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + stats->hw_stats = NULL; + } +} - bp->flags &= ~BNXT_FLAG_PORT_STATS; - bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; +static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, + bool alloc_masks) +{ + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, + &stats->hw_stats_map, GFP_KERNEL); + if (!stats->hw_stats) + return -ENOMEM; - if (bp->hw_rx_port_stats) { - dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, - bp->hw_rx_port_stats, - bp->hw_rx_port_stats_map); - bp->hw_rx_port_stats = NULL; - } + stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); + if (!stats->sw_stats) + goto stats_mem_err; - if (bp->hw_tx_port_stats_ext) { - dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), - bp->hw_tx_port_stats_ext, - bp->hw_tx_port_stats_ext_map); - bp->hw_tx_port_stats_ext = NULL; + if (alloc_masks) { + stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); + if (!stats->hw_masks) + goto stats_mem_err; } + return 0; + +stats_mem_err: + bnxt_free_stats_mem(bp, stats); + return -ENOMEM; +} + +static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = mask; +} + +static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); +} + +static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, + struct bnxt_stats_mem *stats) +{ + struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qstats_ext_input req = {0}; + __le64 *hw_masks; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || + !(bp->flags & BNXT_FLAG_CHIP_P5)) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); + req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto qstat_exit; - if (bp->hw_rx_port_stats_ext) { - dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), - bp->hw_rx_port_stats_ext, - bp->hw_rx_port_stats_ext_map); - bp->hw_rx_port_stats_ext = NULL; + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + +qstat_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); + +static void bnxt_init_stats(struct bnxt *bp) +{ + struct bnxt_napi *bnapi = bp->bnapi[0]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + __le64 *rx_stats, *tx_stats; + int rc, rx_count, tx_count; + u64 *rx_masks, *tx_masks; + u64 mask; + u8 flags; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + rc = bnxt_hwrm_func_qstat_ext(bp, stats); + if (rc) { + if (bp->flags & BNXT_FLAG_CHIP_P5) + mask = (1ULL << 48) - 1; + else + mask = -1ULL; + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + stats = &bp->port_stats; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats) / 8; + tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_count = sizeof(struct tx_port_stats) / 8; + + flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); + bnxt_hwrm_port_qstats(bp, 0); + } + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + stats = &bp->rx_port_stats_ext; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats_ext) / 8; + stats = &bp->tx_port_stats_ext; + tx_stats = stats->hw_stats; + tx_masks = stats->hw_masks; + tx_count = sizeof(struct tx_port_stats_ext) / 8; + + flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats_ext(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; - if (bp->hw_pcie_stats) { - dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), - bp->hw_pcie_stats, bp->hw_pcie_stats_map); - bp->hw_pcie_stats = NULL; + bnxt_fill_masks(rx_masks, mask, rx_count); + if (tx_stats) + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + if (tx_stats) + bnxt_copy_hw_masks(tx_masks, tx_stats, + tx_count); + bnxt_hwrm_port_qstats_ext(bp, 0); + } } } +static void bnxt_free_port_stats(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_PORT_STATS; + bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; + + bnxt_free_stats_mem(bp, &bp->port_stats); + bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); + bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); +} + static void bnxt_free_ring_stats(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - int size, i; + int i; if (!bp->bnapi) return; - size = bp->hw_ring_stats_size; - for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - if (cpr->hw_stats) { - dma_free_coherent(&pdev->dev, size, cpr->hw_stats, - cpr->hw_stats_map); - cpr->hw_stats = NULL; - } + bnxt_free_stats_mem(bp, &cpr->stats); } } static int bnxt_alloc_stats(struct bnxt *bp) { u32 size, i; - struct pci_dev *pdev = bp->pdev; + int rc; size = bp->hw_ring_stats_size; @@ -3775,11 +3901,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, - &cpr->hw_stats_map, - GFP_KERNEL); - if (!cpr->hw_stats) - return -ENOMEM; + cpr->stats.len = size; + rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); + if (rc) + return rc; cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } @@ -3787,22 +3912,14 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) return 0; - if (bp->hw_rx_port_stats) + if (bp->port_stats.hw_stats) goto alloc_ext_stats; - bp->hw_port_stats_size = sizeof(struct rx_port_stats) + - sizeof(struct tx_port_stats) + 1024; - - bp->hw_rx_port_stats = - dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, - &bp->hw_rx_port_stats_map, - GFP_KERNEL); - if (!bp->hw_rx_port_stats) - return -ENOMEM; + bp->port_stats.len = BNXT_PORT_STATS_SIZE; + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); + if (rc) + return rc; - bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; - bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + - sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; alloc_ext_stats: @@ -3811,41 +3928,28 @@ alloc_ext_stats: if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) return 0; - if (bp->hw_rx_port_stats_ext) + if (bp->rx_port_stats_ext.hw_stats) goto alloc_tx_ext_stats; - bp->hw_rx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), - &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); - if (!bp->hw_rx_port_stats_ext) + bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) return 0; alloc_tx_ext_stats: - if (bp->hw_tx_port_stats_ext) - goto alloc_pcie_stats; + if (bp->tx_port_stats_ext.hw_stats) + return 0; if (bp->hwrm_spec_code >= 0x10902 || (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { - bp->hw_tx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, - sizeof(struct tx_port_stats_ext), - &bp->hw_tx_port_stats_ext_map, - GFP_KERNEL); + bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) + return 0; } bp->flags |= BNXT_FLAG_PORT_STATS_EXT; - -alloc_pcie_stats: - if (bp->hw_pcie_stats || - !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) - return 0; - - bp->hw_pcie_stats = - dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), - &bp->hw_pcie_stats_map, GFP_KERNEL); - if (!bp->hw_pcie_stats) - return 0; - - bp->flags |= BNXT_FLAG_PCIE_STATS; return 0; } @@ -3945,6 +4049,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { bnxt_free_ring_stats(bp); + if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET)) + bnxt_free_port_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); kfree(bp->tx_ring_map); @@ -4048,6 +4154,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) rc = bnxt_alloc_stats(bp); if (rc) goto alloc_mem_err; + bnxt_init_stats(bp); rc = bnxt_alloc_ntp_fltrs(bp); if (rc) @@ -4513,10 +4620,12 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) switch (tunnel_type) { case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: - req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; + req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; break; case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: - req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; + req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; break; default: break; @@ -4551,10 +4660,11 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, switch (tunnel_type) { case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: - bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->vxlan_fw_dst_port_id = + le16_to_cpu(resp->tunnel_dst_port_id); break; case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: - bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); break; default: break; @@ -4834,9 +4944,112 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) } } +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) +{ + int entries; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; + else + entries = HW_HASH_INDEX_SIZE; + + bp->rss_indir_tbl_entries = entries; + bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), + GFP_KERNEL); + if (!bp->rss_indir_tbl) + return -ENOMEM; + return 0; +} + +static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) +{ + u16 max_rings, max_entries, pad, i; + + if (!bp->rx_nr_rings) + return; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + max_rings = bp->rx_nr_rings - 1; + else + max_rings = bp->rx_nr_rings; + + max_entries = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < max_entries; i++) + bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); + + pad = bp->rss_indir_tbl_entries - max_entries; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); +} + +static u16 bnxt_get_max_rss_ring(struct bnxt *bp) +{ + u16 i, tbl_size, max_ring = 0; + + if (!bp->rss_indir_tbl) + return 0; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + for (i = 0; i < tbl_size; i++) + max_ring = max(max_ring, bp->rss_indir_tbl[i]); + return max_ring; +} + +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 2; + return 1; +} + +static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); + u16 i, j; + + /* Fill the RSS indirection table with ring group ids */ + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { + if (!no_rss) + j = bp->rss_indir_tbl[i]; + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); + } +} + +static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + __le16 *ring_tbl = vnic->rss_table; + struct bnxt_rx_ring_info *rxr; + u16 tbl_size, i; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < tbl_size; i++) { + u16 ring_id, j; + + j = bp->rss_indir_tbl[i]; + rxr = &bp->rx_ring[j]; + + ring_id = rxr->rx_ring_struct.fw_ring_id; + *ring_tbl++ = cpu_to_le16(ring_id); + ring_id = bnxt_cp_ring_for_rx(bp, rxr); + *ring_tbl++ = cpu_to_le16(ring_id); + } +} + +static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + __bnxt_fill_hw_rss_tbl_p5(bp, vnic); + else + __bnxt_fill_hw_rss_tbl(bp, vnic); +} + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { - u32 i, j, max_rings; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input req = {0}; @@ -4846,24 +5059,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); if (set_rss) { + bnxt_fill_hw_rss_tbl(bp, vnic); req.hash_type = cpu_to_le32(bp->rss_hash_cfg); req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - if (vnic->flags & BNXT_VNIC_RSS_FLAG) { - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - max_rings = bp->rx_nr_rings - 1; - else - max_rings = bp->rx_nr_rings; - } else { - max_rings = 1; - } - - /* Fill the RSS indirection table with ring group ids */ - for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { - if (j == max_rings) - j = 0; - vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); - } - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); @@ -4875,9 +5073,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; struct hwrm_vnic_rss_cfg_input req = {0}; + dma_addr_t ring_tbl_map; + u32 i, nr_ctxs; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); @@ -4885,31 +5083,18 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return 0; } + bnxt_fill_hw_rss_tbl(bp, vnic); req.hash_type = cpu_to_le32(bp->rss_hash_cfg); req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); - nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); - for (i = 0, k = 0; i < nr_ctxs; i++) { - __le16 *ring_tbl = vnic->rss_table; + ring_tbl_map = vnic->rss_table_dma_addr; + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { int rc; + req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); req.ring_table_pair_index = i; req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); - for (j = 0; j < 64; j++) { - u16 ring_id; - - ring_id = rxr->rx_ring_struct.fw_ring_id; - *ring_tbl++ = cpu_to_le16(ring_id); - ring_id = bnxt_cp_ring_for_rx(bp, rxr); - *ring_tbl++ = cpu_to_le16(ring_id); - rxr++; - k++; - if (k == max_rings) { - k = 0; - rxr = &bp->rx_ring[0]; - } - } rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return rc; @@ -5147,6 +5332,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + + /* Older P5 fw before EXT_HW_STATS support did not set + * VLAN_STRIP_CAP properly. + */ + if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || + ((bp->flags & BNXT_FLAG_CHIP_P5) && + !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) + bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); if (bp->max_tpa_v2) bp->hw_ring_stats_size = @@ -6000,6 +6193,21 @@ static int __bnxt_reserve_rings(struct bnxt *bp) rx = rx_rings << 1; cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; bp->tx_nr_rings = tx; + + /* If we cannot reserve all the RX rings, reset the RSS map only + * if absolutely necessary + */ + if (rx_rings != bp->rx_nr_rings) { + netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", + rx_rings, bp->rx_nr_rings); + if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && + (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != + bnxt_get_nr_rss_ctxs(bp, rx_rings) || + bnxt_get_max_rss_ring(bp) >= rx_rings)) { + netdev_warn(bp->dev, "RSS table entries reverting to default\n"); + bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + } + } bp->rx_nr_rings = rx_rings; bp->cp_nr_rings = cp; @@ -6353,7 +6561,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); + req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -6963,7 +7171,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - u32 flags; + u32 flags, flags_ext; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); req.fid = cpu_to_le16(0xffff); @@ -6988,6 +7196,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) + bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; + + flags_ext = le32_to_cpu(resp->flags_ext); + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -7378,7 +7592,89 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_hwrm_port_qstats(struct bnxt *bp) +static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) +{ + u64 sw_tmp; + + sw_tmp = (*sw & ~mask) | hw; + if (hw < (*sw & mask)) + sw_tmp += mask + 1; + WRITE_ONCE(*sw, sw_tmp); +} + +static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, + int count, bool ignore_zero) +{ + int i; + + for (i = 0; i < count; i++) { + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); + + if (ignore_zero && !hw) + continue; + + if (masks[i] == -1ULL) + sw_stats[i] = hw; + else + bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); + } +} + +static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) +{ + if (!stats->hw_stats) + return; + + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + stats->hw_masks, stats->len / 8, false); +} + +static void bnxt_accumulate_all_stats(struct bnxt *bp) +{ + struct bnxt_stats_mem *ring0_stats; + bool ignore_zero = false; + int i; + + /* Chip bug. Counter intermittently becomes 0. */ + if (bp->flags & BNXT_FLAG_CHIP_P5) + ignore_zero = true; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + if (!i) + ring0_stats = stats; + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + ring0_stats->hw_masks, + ring0_stats->len / 8, ignore_zero); + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct bnxt_stats_mem *stats = &bp->port_stats; + __le64 *hw_stats = stats->hw_stats; + u64 *sw_stats = stats->sw_stats; + u64 *masks = stats->hw_masks; + int cnt; + + cnt = sizeof(struct rx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + + hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + cnt = sizeof(struct tx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + bnxt_accumulate_stats(&bp->rx_port_stats_ext); + bnxt_accumulate_stats(&bp->tx_port_stats_ext); + } +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) { struct bnxt_pf_info *pf = &bp->pf; struct hwrm_port_qstats_input req = {0}; @@ -7386,14 +7682,19 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + + req.flags = flags; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); req.port_id = cpu_to_le16(pf->port_id); - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); + req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + BNXT_TX_PORT_STATS_BYTE_OFFSET); + req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) { struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; @@ -7405,14 +7706,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) return 0; + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); + req.flags = flags; req.port_id = cpu_to_le16(pf->port_id); req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); - tx_stat_size = bp->hw_tx_port_stats_ext ? - sizeof(*bp->hw_tx_port_stats_ext) : 0; + req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + tx_stat_size = bp->tx_port_stats_ext.hw_stats ? + sizeof(struct tx_port_stats_ext) : 0; req.tx_stat_size = cpu_to_le16(tx_stat_size); - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); + req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { @@ -7423,6 +7728,9 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + if (flags) + goto qstats_done; + if (bp->fw_tx_stats_ext_size <= offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { mutex_unlock(&bp->hwrm_cmd_lock); @@ -7463,31 +7771,14 @@ qstats_done: return rc; } -static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) -{ - struct hwrm_pcie_qstats_input req = {0}; - - if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) - return 0; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); - req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); - req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -} - static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { - if (bp->vxlan_port_cnt) { + if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - bp->vxlan_port_cnt = 0; - if (bp->nge_port_cnt) { + if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } - bp->nge_port_cnt = 0; } static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) @@ -7642,7 +7933,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) { int rc, i, nr_ctxs; - nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); for (i = 0; i < nr_ctxs; i++) { rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); if (rc) { @@ -8204,6 +8495,9 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); } + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp); + if (rc) { netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; @@ -8498,6 +8792,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (BNXT_PF(bp)) bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) + bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; + if (resp->supported_speeds_auto_mode) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); @@ -9202,7 +9499,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } if (irq_re_init) - udp_tunnel_get_rx_info(bp->dev); + udp_tunnel_nic_reset_ntf(bp->dev); set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); @@ -9500,34 +9797,33 @@ static void bnxt_get_ring_stats(struct bnxt *bp, { int i; - for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct ctx_hw_stats *hw_stats = cpr->hw_stats; + u64 *sw = cpr->stats.sw_stats; - stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); stats->rx_missed_errors += - le64_to_cpu(hw_stats->rx_discard_pkts); + BNXT_GET_RING_STATS64(sw, rx_discard_pkts); - stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); + stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); + stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); } } @@ -9565,19 +9861,26 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { - struct rx_port_stats *rx = bp->hw_rx_port_stats; - struct tx_port_stats *tx = bp->hw_tx_port_stats; - - stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); - stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); - stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + - le64_to_cpu(rx->rx_ovrsz_frames) + - le64_to_cpu(rx->rx_runt_frames); - stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + - le64_to_cpu(rx->rx_jbr_frames); - stats->collisions = le64_to_cpu(tx->tx_total_collisions); - stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); - stats->tx_errors = le64_to_cpu(tx->tx_err); + u64 *rx = bp->port_stats.sw_stats; + u64 *tx = bp->port_stats.sw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + stats->rx_crc_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); + stats->rx_frame_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); + stats->rx_length_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); + stats->rx_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + stats->collisions = + BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); + stats->tx_fifo_errors = + BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); + stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); } clear_bit(BNXT_STATE_READ_STATS, &bp->state); } @@ -9843,24 +10146,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ - vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX)) { - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); + vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; + if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { + if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; else if (vlan_features) - features |= NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX; + features |= BNXT_HW_FEATURE_VLAN_ALL_RX; } #ifdef CONFIG_BNXT_SRIOV - if (BNXT_VF(bp)) { - if (bp->vf.vlan) { - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - } - } + if (BNXT_VF(bp) && bp->vf.vlan) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; #endif return features; } @@ -9883,7 +10178,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) flags &= ~BNXT_FLAG_TPA; - if (features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) flags |= BNXT_FLAG_STRIP_VLAN; if (features & NETIF_F_NTUPLE) @@ -9931,6 +10226,38 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf) +{ + struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_dbg_read_direct_input req = {0}; + __le32 *dbg_reg_buf; + dma_addr_t mapping; + int rc, i; + + dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, + &mapping, GFP_KERNEL); + if (!dbg_reg_buf) + return -ENOMEM; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); + req.host_dest_addr = cpu_to_le64(mapping); + req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + req.read_len32 = cpu_to_le32(num_words); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc || resp->error_code) { + rc = -EIO; + goto dbg_rd_reg_exit; + } + for (i = 0; i < num_words; i++) + reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); + +dbg_rd_reg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); + return rc; +} + static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, u32 ring_id, u32 *prod, u32 *cons) { @@ -10075,8 +10402,7 @@ static void bnxt_timer(struct timer_list *t) if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) bnxt_fw_health_check(bp); - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && - bp->stats_coal_ticks) { + if (bp->link_info.link_up && bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } @@ -10361,28 +10687,10 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); - if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_alloc( - bp, bp->vxlan_port, - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_free( - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_alloc( - bp, bp->nge_port, - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } - if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_free( - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_port_qstats(bp); - bnxt_hwrm_port_qstats_ext(bp); - bnxt_hwrm_pcie_qstats(bp); + bnxt_hwrm_port_qstats(bp, 0); + bnxt_hwrm_port_qstats_ext(bp, 0); + bnxt_accumulate_all_stats(bp); } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { @@ -10975,6 +11283,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) timer_setup(&bp->timer, bnxt_timer, 0); bp->current_interval = BNXT_TIMER_INTERVAL; + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; + clear_bit(BNXT_STATE_OPEN, &bp->state); return 0; @@ -11302,84 +11613,33 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) #endif /* CONFIG_RFS_ACCEL */ -static void bnxt_udp_tunnel_add(struct net_device *dev, - struct udp_tunnel_info *ti) +static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) { - struct bnxt *bp = netdev_priv(dev); - - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) - return; - - if (!netif_running(dev)) - return; + struct bnxt *bp = netdev_priv(netdev); + struct udp_tunnel_info ti; + unsigned int cmd; - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) - return; + udp_tunnel_nic_get_port(netdev, table, 0, &ti); + if (ti.type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; - bp->vxlan_port_cnt++; - if (bp->vxlan_port_cnt == 1) { - bp->vxlan_port = ti->port; - set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (bp->nge_port_cnt && bp->nge_port != ti->port) - return; + if (ti.port) + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); - bp->nge_port_cnt++; - if (bp->nge_port_cnt == 1) { - bp->nge_port = ti->port; - set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); - } - break; - default: - return; - } - - bnxt_queue_sp_work(bp); + return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); } -static void bnxt_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct bnxt *bp = netdev_priv(dev); - - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) - return; - - if (!netif_running(dev)) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) - return; - bp->vxlan_port_cnt--; - - if (bp->vxlan_port_cnt != 0) - return; - - set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!bp->nge_port_cnt || bp->nge_port != ti->port) - return; - bp->nge_port_cnt--; - - if (bp->nge_port_cnt != 0) - return; - - set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); - break; - default: - return; - } - - bnxt_queue_sp_work(bp); -} +static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { + .sync_table = bnxt_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, @@ -11477,8 +11737,8 @@ static const struct net_device_ops bnxt_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = bnxt_rx_flow_steer, #endif - .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, - .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_bpf = bnxt_xdp, .ndo_xdp_xmit = bnxt_xdp_xmit, .ndo_bridge_getlink = bnxt_bridge_getlink, @@ -11518,6 +11778,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; bnxt_free_port_stats(bp); free_netdev(dev); } @@ -11966,11 +12228,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; + dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_GRO_HW; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; @@ -12026,7 +12292,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_fw_init_one_p3(bp); - if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) + if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; rc = bnxt_init_int_mode(bp); @@ -12038,6 +12304,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + rc = bnxt_alloc_rss_indir_tbl(bp); + if (rc) + goto init_err_pci_clean; + bnxt_set_dflt_rss_indir_tbl(bp); + if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = @@ -12047,7 +12318,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_pci_clean; } } - bnxt_init_tc(bp); + rc = bnxt_init_tc(bp); + if (rc) + netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", + rc); } bnxt_dl_register(bp); @@ -12082,6 +12356,8 @@ init_err_pci_clean: bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; init_err_free: free_netdev(dev); |