summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-06 06:13:21 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-06 06:13:21 +0300
commit47ec5303d73ea344e84f46660fff693c57641386 (patch)
treea2252debab749de29620c43285295d60c4741119 /drivers/net/ethernet/broadcom/bnxt
parent8186749621ed6b8fc42644c399e8c755a2b6f630 (diff)
parentc1055b76ad00aed0e8b79417080f212d736246b6 (diff)
downloadlinux-47ec5303d73ea344e84f46660fff693c57641386.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: 1) Support 6Ghz band in ath11k driver, from Rajkumar Manoharan. 2) Support UDP segmentation in code TSO code, from Eric Dumazet. 3) Allow flashing different flash images in cxgb4 driver, from Vishal Kulkarni. 4) Add drop frames counter and flow status to tc flower offloading, from Po Liu. 5) Support n-tuple filters in cxgb4, from Vishal Kulkarni. 6) Various new indirect call avoidance, from Eric Dumazet and Brian Vazquez. 7) Fix BPF verifier failures on 32-bit pointer arithmetic, from Yonghong Song. 8) Support querying and setting hardware address of a port function via devlink, use this in mlx5, from Parav Pandit. 9) Support hw ipsec offload on bonding slaves, from Jarod Wilson. 10) Switch qca8k driver over to phylink, from Jonathan McDowell. 11) In bpftool, show list of processes holding BPF FD references to maps, programs, links, and btf objects. From Andrii Nakryiko. 12) Several conversions over to generic power management, from Vaibhav Gupta. 13) Add support for SO_KEEPALIVE et al. to bpf_setsockopt(), from Dmitry Yakunin. 14) Various https url conversions, from Alexander A. Klimov. 15) Timestamping and PHC support for mscc PHY driver, from Antoine Tenart. 16) Support bpf iterating over tcp and udp sockets, from Yonghong Song. 17) Support 5GBASE-T i40e NICs, from Aleksandr Loktionov. 18) Add kTLS RX HW offload support to mlx5e, from Tariq Toukan. 19) Fix the ->ndo_start_xmit() return type to be netdev_tx_t in several drivers. From Luc Van Oostenryck. 20) XDP support for xen-netfront, from Denis Kirjanov. 21) Support receive buffer autotuning in MPTCP, from Florian Westphal. 22) Support EF100 chip in sfc driver, from Edward Cree. 23) Add XDP support to mvpp2 driver, from Matteo Croce. 24) Support MPTCP in sock_diag, from Paolo Abeni. 25) Commonize UDP tunnel offloading code by creating udp_tunnel_nic infrastructure, from Jakub Kicinski. 26) Several pci_ --> dma_ API conversions, from Christophe JAILLET. 27) Add FLOW_ACTION_POLICE support to mlxsw, from Ido Schimmel. 28) Add SK_LOOKUP bpf program type, from Jakub Sitnicki. 29) Refactor a lot of networking socket option handling code in order to avoid set_fs() calls, from Christoph Hellwig. 30) Add rfc4884 support to icmp code, from Willem de Bruijn. 31) Support TBF offload in dpaa2-eth driver, from Ioana Ciornei. 32) Support XDP_REDIRECT in qede driver, from Alexander Lobakin. 33) Support PCI relaxed ordering in mlx5 driver, from Aya Levin. 34) Support TCP syncookies in MPTCP, from Flowian Westphal. 35) Fix several tricky cases of PMTU handling wrt. briding, from Stefano Brivio. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2056 commits) net: thunderx: initialize VF's mailbox mutex before first usage usb: hso: remove bogus check for EINPROGRESS usb: hso: no complaint about kmalloc failure hso: fix bailout in error case of probe ip_tunnel_core: Fix build for archs without _HAVE_ARCH_IPV6_CSUM selftests/net: relax cpu affinity requirement in msg_zerocopy test mptcp: be careful on subflow creation selftests: rtnetlink: make kci_test_encap() return sub-test result selftests: rtnetlink: correct the final return value for the test net: dsa: sja1105: use detected device id instead of DT one on mismatch tipc: set ub->ifindex for local ipv6 address ipv6: add ipv6_dev_find() net: openvswitch: silence suspicious RCU usage warning Revert "vxlan: fix tos value before xmit" ptp: only allow phase values lower than 1 period farsync: switch from 'pci_' to 'dma_' API wan: wanxl: switch from 'pci_' to 'dma_' API hv_netvsc: do not use VF device if link is down dpaa2-eth: Fix passing zero to 'PTR_ERR' warning net: macb: Properly handle phylink on at91sam9x ...
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c880
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h126
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c190
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h468
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
10 files changed, 1166 insertions, 542 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7463a1847ceb..31fb5a28e1c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1614,7 +1614,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
- (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1832,7 +1832,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if ((rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
- (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
@@ -3546,7 +3546,7 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp)
}
if (vnic->rss_table) {
- dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
vnic->rss_table,
vnic->rss_table_dma_addr);
vnic->rss_table = NULL;
@@ -3611,7 +3611,13 @@ vnic_skip_grps:
continue;
/* Allocate rss table and hash key */
- vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&pdev->dev,
+ vnic->rss_table_size,
&vnic->rss_table_dma_addr,
GFP_KERNEL);
if (!vnic->rss_table) {
@@ -3619,8 +3625,6 @@ vnic_skip_grps:
goto out;
}
- size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
-
vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
}
@@ -3707,67 +3711,189 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
return 0;
}
-static void bnxt_free_port_stats(struct bnxt *bp)
+static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
{
- struct pci_dev *pdev = bp->pdev;
+ kfree(stats->hw_masks);
+ stats->hw_masks = NULL;
+ kfree(stats->sw_stats);
+ stats->sw_stats = NULL;
+ if (stats->hw_stats) {
+ dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
- bp->flags &= ~BNXT_FLAG_PORT_STATS;
- bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
+ bool alloc_masks)
+{
+ stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
- if (bp->hw_rx_port_stats) {
- dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
- bp->hw_rx_port_stats,
- bp->hw_rx_port_stats_map);
- bp->hw_rx_port_stats = NULL;
- }
+ stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->sw_stats)
+ goto stats_mem_err;
- if (bp->hw_tx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
- bp->hw_tx_port_stats_ext,
- bp->hw_tx_port_stats_ext_map);
- bp->hw_tx_port_stats_ext = NULL;
+ if (alloc_masks) {
+ stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->hw_masks)
+ goto stats_mem_err;
}
+ return 0;
+
+stats_mem_err:
+ bnxt_free_stats_mem(bp, stats);
+ return -ENOMEM;
+}
+
+static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mask_arr[i] = mask;
+}
+
+static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
+}
+
+static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
+ struct bnxt_stats_mem *stats)
+{
+ struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qstats_ext_input req = {0};
+ __le64 *hw_masks;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
+ !(bp->flags & BNXT_FLAG_CHIP_P5))
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
+ req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto qstat_exit;
- if (bp->hw_rx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- bp->hw_rx_port_stats_ext,
- bp->hw_rx_port_stats_ext_map);
- bp->hw_rx_port_stats_ext = NULL;
+ hw_masks = &resp->rx_ucast_pkts;
+ bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
+
+qstat_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
+
+static void bnxt_init_stats(struct bnxt *bp)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[0];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+ __le64 *rx_stats, *tx_stats;
+ int rc, rx_count, tx_count;
+ u64 *rx_masks, *tx_masks;
+ u64 mask;
+ u8 flags;
+
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ rc = bnxt_hwrm_func_qstat_ext(bp, stats);
+ if (rc) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ mask = (1ULL << 48) - 1;
+ else
+ mask = -1ULL;
+ bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ stats = &bp->port_stats;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats) / 8;
+ tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_count = sizeof(struct tx_port_stats) / 8;
+
+ flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
+
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
+ bnxt_hwrm_port_qstats(bp, 0);
+ }
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ stats = &bp->rx_port_stats_ext;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats_ext) / 8;
+ stats = &bp->tx_port_stats_ext;
+ tx_stats = stats->hw_stats;
+ tx_masks = stats->hw_masks;
+ tx_count = sizeof(struct tx_port_stats_ext) / 8;
+
+ flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats_ext(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
- if (bp->hw_pcie_stats) {
- dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- bp->hw_pcie_stats, bp->hw_pcie_stats_map);
- bp->hw_pcie_stats = NULL;
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ if (tx_stats)
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ if (tx_stats)
+ bnxt_copy_hw_masks(tx_masks, tx_stats,
+ tx_count);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ }
}
}
+static void bnxt_free_port_stats(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_PORT_STATS;
+ bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+
+ bnxt_free_stats_mem(bp, &bp->port_stats);
+ bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
+ bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
+}
+
static void bnxt_free_ring_stats(struct bnxt *bp)
{
- struct pci_dev *pdev = bp->pdev;
- int size, i;
+ int i;
if (!bp->bnapi)
return;
- size = bp->hw_ring_stats_size;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- if (cpr->hw_stats) {
- dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
- cpr->hw_stats_map);
- cpr->hw_stats = NULL;
- }
+ bnxt_free_stats_mem(bp, &cpr->stats);
}
}
static int bnxt_alloc_stats(struct bnxt *bp)
{
u32 size, i;
- struct pci_dev *pdev = bp->pdev;
+ int rc;
size = bp->hw_ring_stats_size;
@@ -3775,11 +3901,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
- &cpr->hw_stats_map,
- GFP_KERNEL);
- if (!cpr->hw_stats)
- return -ENOMEM;
+ cpr->stats.len = size;
+ rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
+ if (rc)
+ return rc;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
@@ -3787,22 +3912,14 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
return 0;
- if (bp->hw_rx_port_stats)
+ if (bp->port_stats.hw_stats)
goto alloc_ext_stats;
- bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
- sizeof(struct tx_port_stats) + 1024;
-
- bp->hw_rx_port_stats =
- dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
- &bp->hw_rx_port_stats_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats)
- return -ENOMEM;
+ bp->port_stats.len = BNXT_PORT_STATS_SIZE;
+ rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
+ if (rc)
+ return rc;
- bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
- bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
- sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats:
@@ -3811,41 +3928,28 @@ alloc_ext_stats:
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0;
- if (bp->hw_rx_port_stats_ext)
+ if (bp->rx_port_stats_ext.hw_stats)
goto alloc_tx_ext_stats;
- bp->hw_rx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
- if (!bp->hw_rx_port_stats_ext)
+ bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
return 0;
alloc_tx_ext_stats:
- if (bp->hw_tx_port_stats_ext)
- goto alloc_pcie_stats;
+ if (bp->tx_port_stats_ext.hw_stats)
+ return 0;
if (bp->hwrm_spec_code >= 0x10902 ||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
- bp->hw_tx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
+ bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
+ return 0;
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
-
-alloc_pcie_stats:
- if (bp->hw_pcie_stats ||
- !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
- return 0;
-
- bp->hw_pcie_stats =
- dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- &bp->hw_pcie_stats_map, GFP_KERNEL);
- if (!bp->hw_pcie_stats)
- return 0;
-
- bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
@@ -3945,6 +4049,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
+ if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
+ bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
kfree(bp->tx_ring_map);
@@ -4048,6 +4154,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
rc = bnxt_alloc_stats(bp);
if (rc)
goto alloc_mem_err;
+ bnxt_init_stats(bp);
rc = bnxt_alloc_ntp_fltrs(bp);
if (rc)
@@ -4513,10 +4620,12 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
switch (tunnel_type) {
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
- req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+ req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
+ bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
break;
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
- req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+ req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
+ bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
default:
break;
@@ -4551,10 +4660,11 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
switch (tunnel_type) {
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
- bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->vxlan_fw_dst_port_id =
+ le16_to_cpu(resp->tunnel_dst_port_id);
break;
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
- bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
default:
break;
@@ -4834,9 +4944,112 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
}
}
+static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
+{
+ int entries;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
+ else
+ entries = HW_HASH_INDEX_SIZE;
+
+ bp->rss_indir_tbl_entries = entries;
+ bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
+ GFP_KERNEL);
+ if (!bp->rss_indir_tbl)
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+{
+ u16 max_rings, max_entries, pad, i;
+
+ if (!bp->rx_nr_rings)
+ return;
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ max_rings = bp->rx_nr_rings - 1;
+ else
+ max_rings = bp->rx_nr_rings;
+
+ max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+
+ for (i = 0; i < max_entries; i++)
+ bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+
+ pad = bp->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+}
+
+static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
+{
+ u16 i, tbl_size, max_ring = 0;
+
+ if (!bp->rss_indir_tbl)
+ return 0;
+
+ tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ for (i = 0; i < tbl_size; i++)
+ max_ring = max(max_ring, bp->rss_indir_tbl[i]);
+ return max_ring;
+}
+
+int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return 2;
+ return 1;
+}
+
+static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
+ u16 i, j;
+
+ /* Fill the RSS indirection table with ring group ids */
+ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
+ if (!no_rss)
+ j = bp->rss_indir_tbl[i];
+ vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+ }
+}
+
+static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnxt_rx_ring_info *rxr;
+ u16 tbl_size, i;
+
+ tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bp->rss_indir_tbl[i];
+ rxr = &bp->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
+ else
+ __bnxt_fill_hw_rss_tbl(bp, vnic);
+}
+
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
- u32 i, j, max_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
@@ -4846,24 +5059,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
+ bnxt_fill_hw_rss_tbl(bp, vnic);
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
- if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- max_rings = bp->rx_nr_rings - 1;
- else
- max_rings = bp->rx_nr_rings;
- } else {
- max_rings = 1;
- }
-
- /* Fill the RSS indirection table with ring group ids */
- for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
- if (j == max_rings)
- j = 0;
- vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
- }
-
req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr =
cpu_to_le64(vnic->rss_hash_key_dma_addr);
@@ -4875,9 +5073,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
- u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
struct hwrm_vnic_rss_cfg_input req = {0};
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -4885,31 +5083,18 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return 0;
}
+ bnxt_fill_hw_rss_tbl(bp, vnic);
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
- nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
- for (i = 0, k = 0; i < nr_ctxs; i++) {
- __le16 *ring_tbl = vnic->rss_table;
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
int rc;
+ req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
req.ring_table_pair_index = i;
req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
- for (j = 0; j < 64; j++) {
- u16 ring_id;
-
- ring_id = rxr->rx_ring_struct.fw_ring_id;
- *ring_tbl++ = cpu_to_le16(ring_id);
- ring_id = bnxt_cp_ring_for_rx(bp, rxr);
- *ring_tbl++ = cpu_to_le16(ring_id);
- rxr++;
- k++;
- if (k == max_rings) {
- k = 0;
- rxr = &bp->rx_ring[0];
- }
- }
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
@@ -5147,6 +5332,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+
+ /* Older P5 fw before EXT_HW_STATS support did not set
+ * VLAN_STRIP_CAP properly.
+ */
+ if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
+ ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
+ bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2)
bp->hw_ring_stats_size =
@@ -6000,6 +6193,21 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
rx = rx_rings << 1;
cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
bp->tx_nr_rings = tx;
+
+ /* If we cannot reserve all the RX rings, reset the RSS map only
+ * if absolutely necessary
+ */
+ if (rx_rings != bp->rx_nr_rings) {
+ netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
+ rx_rings, bp->rx_nr_rings);
+ if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
+ (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
+ bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
+ bnxt_get_max_rss_ring(bp) >= rx_rings)) {
+ netdev_warn(bp->dev, "RSS table entries reverting to default\n");
+ bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ }
+ }
bp->rx_nr_rings = rx_rings;
bp->cp_nr_rings = cp;
@@ -6353,7 +6561,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+ req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
@@ -6963,7 +7171,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags;
+ u32 flags, flags_ext;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -6988,6 +7196,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+ if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
+ bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+
+ flags_ext = le32_to_cpu(resp->flags_ext);
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7378,7 +7592,89 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_port_qstats(struct bnxt *bp)
+static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
+{
+ u64 sw_tmp;
+
+ sw_tmp = (*sw & ~mask) | hw;
+ if (hw < (*sw & mask))
+ sw_tmp += mask + 1;
+ WRITE_ONCE(*sw, sw_tmp);
+}
+
+static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
+ int count, bool ignore_zero)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
+
+ if (ignore_zero && !hw)
+ continue;
+
+ if (masks[i] == -1ULL)
+ sw_stats[i] = hw;
+ else
+ bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
+ }
+}
+
+static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
+{
+ if (!stats->hw_stats)
+ return;
+
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ stats->hw_masks, stats->len / 8, false);
+}
+
+static void bnxt_accumulate_all_stats(struct bnxt *bp)
+{
+ struct bnxt_stats_mem *ring0_stats;
+ bool ignore_zero = false;
+ int i;
+
+ /* Chip bug. Counter intermittently becomes 0. */
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ ignore_zero = true;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ if (!i)
+ ring0_stats = stats;
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ ring0_stats->hw_masks,
+ ring0_stats->len / 8, ignore_zero);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ struct bnxt_stats_mem *stats = &bp->port_stats;
+ __le64 *hw_stats = stats->hw_stats;
+ u64 *sw_stats = stats->sw_stats;
+ u64 *masks = stats->hw_masks;
+ int cnt;
+
+ cnt = sizeof(struct rx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+
+ hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ cnt = sizeof(struct tx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ bnxt_accumulate_stats(&bp->rx_port_stats_ext);
+ bnxt_accumulate_stats(&bp->tx_port_stats_ext);
+ }
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
{
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_port_qstats_input req = {0};
@@ -7386,14 +7682,19 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
return 0;
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
+ req.flags = flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
req.port_id = cpu_to_le16(pf->port_id);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
+ req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET);
+ req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
{
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
@@ -7405,14 +7706,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
return 0;
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
+ req.flags = flags;
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- tx_stat_size = bp->hw_tx_port_stats_ext ?
- sizeof(*bp->hw_tx_port_stats_ext) : 0;
+ req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
+ tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
+ sizeof(struct tx_port_stats_ext) : 0;
req.tx_stat_size = cpu_to_le16(tx_stat_size);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
+ req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
@@ -7423,6 +7728,9 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
}
+ if (flags)
+ goto qstats_done;
+
if (bp->fw_tx_stats_ext_size <=
offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -7463,31 +7771,14 @@ qstats_done:
return rc;
}
-static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
-{
- struct hwrm_pcie_qstats_input req = {0};
-
- if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
- return 0;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
- req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
- req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-}
-
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
- if (bp->vxlan_port_cnt) {
+ if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- bp->vxlan_port_cnt = 0;
- if (bp->nge_port_cnt) {
+ if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
- bp->nge_port_cnt = 0;
}
static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
@@ -7642,7 +7933,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
{
int rc, i, nr_ctxs;
- nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
if (rc) {
@@ -8204,6 +8495,9 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
}
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
@@ -8498,6 +8792,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (BNXT_PF(bp))
bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
+ bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
+
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -9202,7 +9499,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
if (irq_re_init)
- udp_tunnel_get_rx_info(bp->dev);
+ udp_tunnel_nic_reset_ntf(bp->dev);
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
@@ -9500,34 +9797,33 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
{
int i;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+ u64 *sw = cpr->stats.sw_stats;
- stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
stats->rx_missed_errors +=
- le64_to_cpu(hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
- stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
- stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
}
}
@@ -9565,19 +9861,26 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
bnxt_add_prev_stats(bp, stats);
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- struct rx_port_stats *rx = bp->hw_rx_port_stats;
- struct tx_port_stats *tx = bp->hw_tx_port_stats;
-
- stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
- stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
- stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
- le64_to_cpu(rx->rx_ovrsz_frames) +
- le64_to_cpu(rx->rx_runt_frames);
- stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
- le64_to_cpu(rx->rx_jbr_frames);
- stats->collisions = le64_to_cpu(tx->tx_total_collisions);
- stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
- stats->tx_errors = le64_to_cpu(tx->tx_err);
+ u64 *rx = bp->port_stats.sw_stats;
+ u64 *tx = bp->port_stats.sw_stats +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ stats->rx_crc_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
+ stats->rx_frame_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
+ stats->rx_length_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
+ stats->rx_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
+ stats->collisions =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
+ stats->tx_fifo_errors =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
+ stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
}
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
}
@@ -9843,24 +10146,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
- vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
- if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX)) {
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
- features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
+ vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
+ if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
+ if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
+ features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
else if (vlan_features)
- features |= NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX;
+ features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
}
#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp)) {
- if (bp->vf.vlan) {
- features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
- }
- }
+ if (BNXT_VF(bp) && bp->vf.vlan)
+ features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
#endif
return features;
}
@@ -9883,7 +10178,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
flags &= ~BNXT_FLAG_TPA;
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
flags |= BNXT_FLAG_STRIP_VLAN;
if (features & NETIF_F_NTUPLE)
@@ -9931,6 +10226,38 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf)
+{
+ struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_read_direct_input req = {0};
+ __le32 *dbg_reg_buf;
+ dma_addr_t mapping;
+ int rc, i;
+
+ dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
+ &mapping, GFP_KERNEL);
+ if (!dbg_reg_buf)
+ return -ENOMEM;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
+ req.host_dest_addr = cpu_to_le64(mapping);
+ req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
+ req.read_len32 = cpu_to_le32(num_words);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc || resp->error_code) {
+ rc = -EIO;
+ goto dbg_rd_reg_exit;
+ }
+ for (i = 0; i < num_words; i++)
+ reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
+
+dbg_rd_reg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
+ return rc;
+}
+
static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
u32 ring_id, u32 *prod, u32 *cons)
{
@@ -10075,8 +10402,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
- bp->stats_coal_ticks) {
+ if (bp->link_info.link_up && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -10361,28 +10687,10 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
- if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_alloc(
- bp, bp->vxlan_port,
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_alloc(
- bp, bp->nge_port,
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
- if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_port_qstats(bp);
- bnxt_hwrm_port_qstats_ext(bp);
- bnxt_hwrm_pcie_qstats(bp);
+ bnxt_hwrm_port_qstats(bp, 0);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ bnxt_accumulate_all_stats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
@@ -10975,6 +11283,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
+ bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
+ bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
+
clear_bit(BNXT_STATE_OPEN, &bp->state);
return 0;
@@ -11302,84 +11613,33 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static void bnxt_udp_tunnel_add(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
{
- struct bnxt *bp = netdev_priv(dev);
-
- if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
- return;
-
- if (!netif_running(dev))
- return;
+ struct bnxt *bp = netdev_priv(netdev);
+ struct udp_tunnel_info ti;
+ unsigned int cmd;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
- return;
+ udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
- bp->vxlan_port_cnt++;
- if (bp->vxlan_port_cnt == 1) {
- bp->vxlan_port = ti->port;
- set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (bp->nge_port_cnt && bp->nge_port != ti->port)
- return;
+ if (ti.port)
+ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
- bp->nge_port_cnt++;
- if (bp->nge_port_cnt == 1) {
- bp->nge_port = ti->port;
- set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
- }
- break;
- default:
- return;
- }
-
- bnxt_queue_sp_work(bp);
+ return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
-static void bnxt_udp_tunnel_del(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct bnxt *bp = netdev_priv(dev);
-
- if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
- return;
-
- if (!netif_running(dev))
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
- return;
- bp->vxlan_port_cnt--;
-
- if (bp->vxlan_port_cnt != 0)
- return;
-
- set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!bp->nge_port_cnt || bp->nge_port != ti->port)
- return;
- bp->nge_port_cnt--;
-
- if (bp->nge_port_cnt != 0)
- return;
-
- set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
- break;
- default:
- return;
- }
-
- bnxt_queue_sp_work(bp);
-}
+static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
+ .sync_table = bnxt_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
@@ -11477,8 +11737,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
- .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
@@ -11518,6 +11778,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -11966,11 +12228,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
+ dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+ dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
@@ -12026,7 +12292,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_fw_init_one_p3(bp);
- if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
rc = bnxt_init_int_mode(bp);
@@ -12038,6 +12304,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ rc = bnxt_alloc_rss_indir_tbl(bp);
+ if (rc)
+ goto init_err_pci_clean;
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -12047,7 +12318,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
}
}
- bnxt_init_tc(bp);
+ rc = bnxt_init_tc(bp);
+ if (rc)
+ netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
+ rc);
}
bnxt_dl_register(bp);
@@ -12082,6 +12356,8 @@ init_err_pci_clean:
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
init_err_free:
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 78e2fd63ac3d..5a13eb66beda 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -919,6 +919,14 @@ struct bnxt_sw_stats {
struct bnxt_cmn_sw_stats cmn;
};
+struct bnxt_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -943,8 +951,7 @@ struct bnxt_cp_ring_info {
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
- struct ctx_hw_stats *hw_stats;
- dma_addr_t hw_stats_map;
+ struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
struct bnxt_sw_stats sw_stats;
@@ -1017,6 +1024,15 @@ struct bnxt_vnic_info {
__le16 *rss_table;
dma_addr_t rss_hash_key_dma_addr;
u64 *rss_hash_key;
+ int rss_table_size;
+#define BNXT_RSS_TABLE_ENTRIES_P5 64
+#define BNXT_RSS_TABLE_SIZE_P5 (BNXT_RSS_TABLE_ENTRIES_P5 * 4)
+#define BNXT_RSS_TABLE_MAX_TBL_P5 8
+#define BNXT_MAX_RSS_TABLE_SIZE_P5 \
+ (BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
+#define BNXT_MAX_RSS_TABLE_ENTRIES_P5 \
+ (BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
+
u32 rx_mask;
u8 *mc_list;
@@ -1126,6 +1142,50 @@ struct bnxt_ntuple_filter {
#define BNXT_FLTR_UPDATE 1
};
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 link_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1244,6 +1304,9 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
+#define CHIMP_REG_VIEW_ADDR \
+ ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
@@ -1557,7 +1620,6 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
- #define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1648,6 +1710,8 @@ struct bnxt {
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
int nr_vnics;
+ u16 *rss_indir_tbl;
+ u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u16 max_mtu;
@@ -1705,6 +1769,10 @@ struct bnxt {
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
#define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000
+ #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
+ #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
+ #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1719,17 +1787,9 @@ struct bnxt {
dma_addr_t hwrm_cmd_kong_resp_dma_addr;
struct rtnl_link_stats64 net_stats_prev;
- struct rx_port_stats *hw_rx_port_stats;
- struct tx_port_stats *hw_tx_port_stats;
- struct rx_port_stats_ext *hw_rx_port_stats_ext;
- struct tx_port_stats_ext *hw_tx_port_stats_ext;
- struct pcie_ctx_hw_stats *hw_pcie_stats;
- dma_addr_t hw_rx_port_stats_map;
- dma_addr_t hw_tx_port_stats_map;
- dma_addr_t hw_rx_port_stats_ext_map;
- dma_addr_t hw_tx_port_stats_ext_map;
- dma_addr_t hw_pcie_stats_map;
- int hw_port_stats_size;
+ struct bnxt_stats_mem port_stats;
+ struct bnxt_stats_mem rx_port_stats_ext;
+ struct bnxt_stats_mem tx_port_stats_ext;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
@@ -1751,12 +1811,8 @@ struct bnxt {
((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
- __be16 vxlan_port;
- u8 vxlan_port_cnt;
- __le16 vxlan_fw_dst_port_id;
- __be16 nge_port;
- u8 nge_port_cnt;
- __le16 nge_fw_dst_port_id;
+ u16 vxlan_fw_dst_port_id;
+ u16 nge_fw_dst_port_id;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
@@ -1776,16 +1832,12 @@ struct bnxt {
#define BNXT_RX_NTP_FLTR_SP_EVENT 1
#define BNXT_LINK_CHNG_SP_EVENT 2
#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3
-#define BNXT_VXLAN_ADD_PORT_SP_EVENT 4
-#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
#define BNXT_RESET_TASK_SP_EVENT 6
#define BNXT_RST_RING_SP_EVENT 7
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
#define BNXT_PERIODIC_STATS_SP_EVENT 9
#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
-#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
-#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16
@@ -1879,12 +1931,27 @@ struct bnxt {
struct device *hwmon_dev;
};
+#define BNXT_GET_RING_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
+
+#define BNXT_GET_RX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct rx_port_stats, counter) / 8))
+
+#define BNXT_GET_TX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct tx_port_stats, counter) / 8))
+
+#define BNXT_PORT_STATS_SIZE \
+ (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024)
+
+#define BNXT_TX_PORT_STATS_BYTE_OFFSET \
+ (sizeof(struct rx_port_stats) + 512)
+
#define BNXT_RX_STATS_OFFSET(counter) \
(offsetof(struct rx_port_stats, counter) / 8)
#define BNXT_TX_STATS_OFFSET(counter) \
((offsetof(struct tx_port_stats, counter) + \
- sizeof(struct rx_port_stats) + 512) / 8)
+ BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8)
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
@@ -1892,8 +1959,10 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
-#define BNXT_PCIE_STATS_OFFSET(counter) \
- (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
+#define BNXT_HW_FEATURE_VLAN_ALL_RX \
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNXT_HW_FEATURE_VLAN_ALL_TX \
+ (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
@@ -2028,6 +2097,7 @@ int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
+int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
@@ -2050,6 +2120,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 02b27551d34d..8e90224c43a2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -544,7 +544,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct bnxt *bp = netdev_priv(dev);
- __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
+ __le64 *stats = bp->port_stats.hw_stats;
struct ieee_pfc *my_pfc = bp->ieee_pfc;
long rx_off, tx_off;
int i, rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index a812beb46325..3a854195d5b0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -411,6 +411,12 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
return rc;
}
+ if (strlen(bp->board_serialno)) {
+ rc = devlink_info_board_serial_number_put(req, bp->board_serialno);
+ if (rc)
+ return rc;
+ }
+
sprintf(buf, "%X", bp->chip_num);
rc = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
@@ -685,6 +691,7 @@ static void bnxt_dl_params_unregister(struct bnxt *bp)
int bnxt_dl_register(struct bnxt *bp)
{
+ struct devlink_port_attrs attrs = {};
struct devlink *dl;
int rc;
@@ -713,9 +720,11 @@ int bnxt_dl_register(struct bnxt *bp)
if (!BNXT_PF(bp))
return 0;
- devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- bp->pf.port_id, false, 0, bp->dsn,
- sizeof(bp->dsn));
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = bp->pf.port_id;
+ memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn));
+ attrs.switch_id.id_len = sizeof(bp->dsn);
+ devlink_port_attrs_set(&bp->dl_port, &attrs);
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b4aa56dc4f9f..64da654f1038 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -142,7 +142,7 @@ static const char * const bnxt_ring_rx_stats_str[] = {
"rx_mcast_packets",
"rx_bcast_packets",
"rx_discards",
- "rx_drops",
+ "rx_errors",
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
@@ -152,8 +152,8 @@ static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
+ "tx_errors",
"tx_discards",
- "tx_drops",
"tx_ucast_bytes",
"tx_mcast_bytes",
"tx_bcast_bytes",
@@ -293,9 +293,6 @@ static const char * const bnxt_cmn_sw_stats_str[] = {
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
-#define BNXT_PCIE_STATS_ENTRY(counter) \
- { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
-
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -454,24 +451,6 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
-static const struct {
- long offset;
- char string[ETH_GSTRING_LEN];
-} bnxt_pcie_stats_arr[] = {
- BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
- BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
-};
-
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
@@ -479,7 +458,6 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
-#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
@@ -526,9 +504,6 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS)
- num_stats += BNXT_NUM_PCIE_STATS;
-
return num_stats;
}
@@ -584,19 +559,19 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw_stats = cpr->stats.sw_stats;
u64 *sw;
int k;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
}
if (is_tx_ring(bp, i)) {
k = NUM_RING_RX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
}
if (!tpa_stats || !is_rx_ring(bp, i))
goto skip_tpa_ring_stats;
@@ -604,7 +579,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
tpa_stats; j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
skip_tpa_ring_stats:
sw = (u64 *)&cpr->sw_stats.rx;
@@ -618,9 +593,9 @@ skip_tpa_ring_stats:
buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
@@ -628,60 +603,50 @@ skip_tpa_ring_stats:
skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
+ u64 *port_stats = bp->port_stats.sw_stats;
- for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(port_stats +
- bnxt_port_stats_arr[i].offset));
- }
+ for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
+ buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
- __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
+ u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
+ u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(rx_port_stats_ext +
- bnxt_port_stats_ext_arr[i].offset));
+ buf[j] = *(rx_port_stats_ext +
+ bnxt_port_stats_ext_arr[i].offset);
}
for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(tx_port_stats_ext +
- bnxt_tx_port_stats_ext_arr[i].offset));
+ buf[j] = *(tx_port_stats_ext +
+ bnxt_tx_port_stats_ext_arr[i].offset);
}
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
-
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(pcie_stats +
- bnxt_pcie_stats_arr[i].offset));
- }
- }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -782,12 +747,6 @@ skip_tpa_stats:
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
- strcpy(buf, bnxt_pcie_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
- }
- }
break;
case ETH_SS_TEST:
if (bp->num_tests)
@@ -926,6 +885,13 @@ static int bnxt_set_channels(struct net_device *dev,
return rc;
}
+ if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
+ bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
+ (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
+ netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
+ return -EINVAL;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1273,8 +1239,12 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return rc;
}
-static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
return HW_HASH_INDEX_SIZE;
}
@@ -1288,7 +1258,7 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
- int i = 0;
+ u32 i, tbl_size;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
@@ -1297,9 +1267,10 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
return 0;
vnic = &bp->vnic_info[0];
- if (indir && vnic->rss_table) {
- for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
- indir[i] = le16_to_cpu(vnic->rss_table[i]);
+ if (indir && bp->rss_indir_tbl) {
+ tbl_size = bnxt_get_rxfh_indir_size(dev);
+ for (i = 0; i < tbl_size; i++)
+ indir[i] = bp->rss_indir_tbl[i];
}
if (key && vnic->rss_hash_key)
@@ -1308,6 +1279,35 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
return 0;
}
+static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ if (hfunc && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
+
+ for (i = 0; i < tbl_size; i++)
+ bp->rss_indir_tbl[i] = indir[i];
+ pad = bp->rss_indir_tbl_entries - tbl_size;
+ if (pad)
+ memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ }
+
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ }
+ return rc;
+}
+
static void bnxt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1324,6 +1324,59 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->regdump_len = 0;
}
+static int bnxt_get_regs_len(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int reg_len;
+
+ reg_len = BNXT_PXP_REG_LEN;
+
+ if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
+ reg_len += sizeof(struct pcie_ctx_hw_stats);
+
+ return reg_len;
+}
+
+static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *_p)
+{
+ struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
+
+ regs->version = 0;
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
+ sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr, GFP_KERNEL);
+ if (!hw_pcie_stats)
+ return;
+
+ regs->version = 1;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
+ req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ __le64 *src = (__le64 *)hw_pcie_stats;
+ u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
+ int i;
+
+ for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
+ dst[i] = le64_to_cpu(src[i]);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
+ hw_pcie_stats_addr);
+}
+
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3599,6 +3652,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
+ .get_regs_len = bnxt_get_regs_len,
+ .get_regs = bnxt_get_regs,
.get_wol = bnxt_get_wol,
.set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
@@ -3617,6 +3672,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
+ .set_rxfh = bnxt_set_rxfh,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ce7585ff9e4d..34f44ddfad79 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -84,8 +84,11 @@ struct hwrm_dbg_cmn_output {
ETH_RESET_PHY | ETH_RESET_RAM) \
<< ETH_RESET_SHARED_SHIFT)
+#define BNXT_PXP_REG_LEN 0x3110
+
extern const struct ethtool_ops bnxt_ethtool_ops;
+u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7e9235c8d21e..c4af6bf15e36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -169,9 +169,14 @@ struct cmd_nums {
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
#define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
#define HWRM_RESERVED5 0x64UL
#define HWRM_RESERVED6 0x65UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
@@ -224,6 +229,7 @@ struct cmd_nums {
#define HWRM_FW_IPC_MAILBOX 0xccUL
#define HWRM_FW_ECN_CFG 0xcdUL
#define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -337,6 +343,7 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -353,24 +360,30 @@ struct cmd_nums {
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
#define HWRM_TF_SESSION_ATTACH 0x2c7UL
- #define HWRM_TF_SESSION_CLOSE 0x2c8UL
- #define HWRM_TF_SESSION_QCFG 0x2c9UL
- #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
- #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
- #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
- #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
- #define HWRM_TF_TBL_TYPE_GET 0x2d0UL
- #define HWRM_TF_TBL_TYPE_SET 0x2d1UL
- #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
- #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
- #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
- #define HWRM_TF_EXT_EM_OP 0x2ddUL
- #define HWRM_TF_EXT_EM_CFG 0x2deUL
- #define HWRM_TF_EXT_EM_QCFG 0x2dfUL
- #define HWRM_TF_TCAM_SET 0x2eeUL
- #define HWRM_TF_TCAM_GET 0x2efUL
- #define HWRM_TF_TCAM_MOVE 0x2f0UL
- #define HWRM_TF_TCAM_FREE 0x2f1UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
+ #define HWRM_TF_EXT_EM_OP 0x2e7UL
+ #define HWRM_TF_EXT_EM_CFG 0x2e8UL
+ #define HWRM_TF_EXT_EM_QCFG 0x2e9UL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -391,6 +404,7 @@ struct cmd_nums {
#define HWRM_DBG_QCAPS 0xff20UL
#define HWRM_DBG_QCFG 0xff21UL
#define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -464,8 +478,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 33
-#define HWRM_VERSION_STR "1.10.1.33"
+#define HWRM_VERSION_RSVD 54
+#define HWRM_VERSION_STR "1.10.1.54"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1094,6 +1108,8 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
#define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
#define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
__le16 num_rsscos_ctxs;
__le16 num_cmpl_rings;
__le16 num_tx_rings;
@@ -1189,10 +1205,16 @@ struct hwrm_func_qcaps_output {
__le16 max_sp_tx_rings;
u8 unused_0[2];
__le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- u8 unused_1[3];
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ u8 max_schqs;
+ u8 unused_1[2];
u8 valid;
};
@@ -1226,6 +1248,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1321,7 +1345,7 @@ struct hwrm_func_qcfg_output {
u8 valid;
};
-/* hwrm_func_cfg_input (size:704b/88B) */
+/* hwrm_func_cfg_input (size:768b/96B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1352,30 +1376,35 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
#define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
__le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1449,6 +1478,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
+ __le16 schq_id;
+ u8 unused_0[6];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1507,7 +1538,7 @@ struct hwrm_func_qstats_output {
u8 valid;
};
-/* hwrm_func_qstats_ext_input (size:192b/24B) */
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
struct hwrm_func_qstats_ext_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1520,7 +1551,12 @@ struct hwrm_func_qstats_ext_input {
#define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
#define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
#define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
- u8 unused_0[5];
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
};
/* hwrm_func_qstats_ext_output (size:1472b/184B) */
@@ -1533,15 +1569,15 @@ struct hwrm_func_qstats_ext_output {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -2376,33 +2412,39 @@ struct hwrm_port_phy_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL
__le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -2415,7 +2457,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2446,7 +2487,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2464,7 +2504,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
@@ -2488,11 +2527,19 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2[2];
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
__le32 tx_lpi_timer;
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le32 unused_3;
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ u8 unused_2[2];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -2526,7 +2573,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2537,7 +2584,10 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 unused_0;
+ u8 link_signal_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4
__le16 link_speed;
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
@@ -2574,7 +2624,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
@@ -2586,7 +2635,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2611,7 +2659,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2629,7 +2676,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
@@ -2763,13 +2809,21 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
__le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
@@ -2778,7 +2832,24 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- u8 unused_2[7];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ __le16 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 unused_0[7];
u8 valid;
};
@@ -3304,19 +3375,20 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+/* hwrm_port_phy_qcaps_output (size:256b/32B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -3339,7 +3411,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL
__le16 supported_speeds_auto_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
@@ -3355,7 +3426,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL
__le16 supported_speeds_eee_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
@@ -3372,8 +3442,18 @@ struct hwrm_port_phy_qcaps_output {
__le32 valid_tx_lpi_timer_high;
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ u8 unused_0[3];
+ u8 valid;
};
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
@@ -3812,7 +3892,7 @@ struct hwrm_queue_qportcfg_input {
u8 unused_0;
};
-/* hwrm_queue_qportcfg_output (size:256b/32B) */
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
struct hwrm_queue_qportcfg_output {
__le16 error_code;
__le16 req_type;
@@ -3898,6 +3978,49 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0;
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
u8 valid;
};
@@ -4938,6 +5061,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -4947,7 +5071,12 @@ struct hwrm_vnic_cfg_input {
__le16 default_rx_ring_id;
__le16 default_cmpl_ring_id;
__le16 queue_id;
- u8 unused0[6];
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 unused0[5];
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -4989,6 +5118,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -5155,15 +5285,18 @@ struct hwrm_vnic_plcmodes_cfg_input {
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
__le32 enables;
#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
__le32 vnic_id;
__le16 jumbo_thresh;
__le16 hds_offset;
__le16 hds_threshold;
- u8 unused_0[6];
+ __le16 max_bds;
+ u8 unused_0[4];
};
/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
@@ -5231,6 +5364,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -5246,7 +5380,7 @@ struct hwrm_ring_alloc_input {
__le32 fbo;
u8 page_size;
u8 page_tbl_depth;
- u8 unused_1[2];
+ __le16 schq_id;
__le32 length;
__le16 logical_id;
__le16 cmpl_ring_id;
@@ -5344,11 +5478,12 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -5529,6 +5664,7 @@ struct hwrm_ring_grp_free_output {
u8 unused_0[7];
u8 valid;
};
+
#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
@@ -6816,15 +6952,15 @@ struct ctx_hw_stats {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6840,15 +6976,15 @@ struct ctx_hw_stats_ext {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6915,7 +7051,9 @@ struct hwrm_stat_ctx_query_input {
__le16 target_id;
__le64 resp_addr;
__le32 stat_ctx_id;
- u8 unused_0[4];
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
};
/* hwrm_stat_ctx_query_output (size:1408b/176B) */
@@ -6948,6 +7086,50 @@ struct hwrm_stat_ctx_query_output {
u8 valid;
};
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
__le16 req_type;
@@ -7497,6 +7679,29 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -7507,7 +7712,8 @@ struct coredump_segment_record {
u8 seg_flags;
u8 compress_flags;
#define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[6];
+ u8 unused_0[2];
+ __le32 segment_len;
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7620,7 +7826,8 @@ struct hwrm_dbg_ring_info_get_input {
#define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le32 fw_ring_id;
};
@@ -7633,7 +7840,8 @@ struct hwrm_dbg_ring_info_get_output {
__le16 resp_len;
__le32 producer_index;
__le32 consumer_index;
- u8 unused_0[7];
+ __le32 cag_vector_ctrl;
+ u8 unused_0[3];
u8 valid;
};
@@ -7922,6 +8130,7 @@ struct hwrm_nvm_install_update_input {
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
u8 unused_0[2];
};
@@ -8101,7 +8310,14 @@ struct hwrm_selftest_qlist_output {
char test5_name[32];
char test6_name[32];
char test7_name[32];
- u8 unused_2[7];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 392e32c7122a..cc2ee4d0bd18 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 4a11c1e7cc02..5e4429b14b8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1638,7 +1638,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
lastused = flow->lastused;
spin_unlock(&flow->stats_lock);
- flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
+ flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0,
lastused, FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
@@ -1888,7 +1888,7 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
kfree(priv);
}
-static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp,
struct flow_block_offload *f, void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
@@ -1911,7 +1911,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
cb_priv, cb_priv,
bnxt_tc_setup_indr_rel, f,
- netdev, data, bp, cleanup);
+ netdev, sch, data, bp, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
@@ -1946,7 +1946,7 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
return netif_is_vxlan(netdev);
}
-static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
@@ -1956,8 +1956,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
switch (type) {
case TC_SETUP_BLOCK:
- return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data,
- cleanup);
+ return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup);
default:
break;
}
@@ -2001,11 +2000,8 @@ int bnxt_init_tc(struct bnxt *bp)
struct bnxt_tc_info *tc_info;
int rc;
- if (bp->hwrm_spec_code < 0x10803) {
- netdev_warn(bp->dev,
- "Firmware does not support TC flower offload.\n");
- return -ENOTSUPP;
- }
+ if (bp->hwrm_spec_code < 0x10803)
+ return 0;
tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
if (!tc_info)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 5e3b4a3b69ea..2704a4709bc7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -330,10 +330,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
rc = bnxt_xdp_set(bp, xdp->prog);
break;
- case XDP_QUERY_PROG:
- xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
- rc = 0;
- break;
default:
rc = -EINVAL;
break;