summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c494
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h96
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c138
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h468
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_chain.c4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100.c541
-rw-r--r--drivers/net/ethernet/sfc/ef100.h12
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c24
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.h12
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c274
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h17
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c619
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h32
-rw-r--r--drivers/net/ethernet/sfc/ef100_regs.h693
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c31
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.h20
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c51
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h22
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.h16
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c11
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c10
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h3
-rw-r--r--drivers/net/ethernet/sfc/io.h16
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h18
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h6
-rw-r--r--drivers/net/ethernet/sfc/ptp.c20
-rw-r--r--drivers/net/ethernet/sfc/siena.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h2
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--include/uapi/linux/hsr_netlink.h2
-rw-r--r--include/uapi/linux/if_link.h12
-rw-r--r--net/hsr/Kconfig35
-rw-r--r--net/hsr/hsr_debugfs.c33
-rw-r--r--net/hsr/hsr_device.c181
-rw-r--r--net/hsr/hsr_device.h2
-rw-r--r--net/hsr/hsr_forward.c313
-rw-r--r--net/hsr/hsr_forward.h16
-rw-r--r--net/hsr/hsr_framereg.c95
-rw-r--r--net/hsr/hsr_framereg.h31
-rw-r--r--net/hsr/hsr_main.c2
-rw-r--r--net/hsr/hsr_main.h120
-rw-r--r--net/hsr/hsr_netlink.c38
-rw-r--r--net/hsr/hsr_netlink.h2
-rw-r--r--net/hsr/hsr_slave.c26
-rw-r--r--net/hsr/hsr_slave.h4
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/mpls/af_mpls.c17
-rw-r--r--net/smc/af_smc.c13
-rw-r--r--net/smc/smc_clc.h1
-rw-r--r--net/smc/smc_core.c4
63 files changed, 4073 insertions, 578 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 3421a0b15983..5f5408cdf008 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -132,7 +132,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
stats->value[BNXT_RE_RX_DROPS] =
- le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
+ le64_to_cpu(bnxt_re_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 49b8a43e3fa2..f5aa85a5377c 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1923,7 +1923,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
- params.elem_size = n_rq_elems;
+ params.num_elems = n_rq_elems;
params.elem_size = QEDR_RQE_ELEMENT_SIZE;
rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b4f0c638c467..2622d3c2d766 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3711,67 +3711,191 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
return 0;
}
-static void bnxt_free_port_stats(struct bnxt *bp)
+static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
{
- struct pci_dev *pdev = bp->pdev;
+ kfree(stats->hw_masks);
+ stats->hw_masks = NULL;
+ kfree(stats->sw_stats);
+ stats->sw_stats = NULL;
+ if (stats->hw_stats) {
+ dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
- bp->flags &= ~BNXT_FLAG_PORT_STATS;
- bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
+ bool alloc_masks)
+{
+ stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
- if (bp->hw_rx_port_stats) {
- dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
- bp->hw_rx_port_stats,
- bp->hw_rx_port_stats_map);
- bp->hw_rx_port_stats = NULL;
- }
+ memset(stats->hw_stats, 0, stats->len);
+
+ stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->sw_stats)
+ goto stats_mem_err;
- if (bp->hw_tx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
- bp->hw_tx_port_stats_ext,
- bp->hw_tx_port_stats_ext_map);
- bp->hw_tx_port_stats_ext = NULL;
+ if (alloc_masks) {
+ stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->hw_masks)
+ goto stats_mem_err;
}
+ return 0;
+
+stats_mem_err:
+ bnxt_free_stats_mem(bp, stats);
+ return -ENOMEM;
+}
+
+static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mask_arr[i] = mask;
+}
+
+static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
+}
+
+static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
+ struct bnxt_stats_mem *stats)
+{
+ struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qstats_ext_input req = {0};
+ __le64 *hw_masks;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
+ !(bp->flags & BNXT_FLAG_CHIP_P5))
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
+ req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto qstat_exit;
+
+ hw_masks = &resp->rx_ucast_pkts;
+ bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
+
+qstat_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
- if (bp->hw_rx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- bp->hw_rx_port_stats_ext,
- bp->hw_rx_port_stats_ext_map);
- bp->hw_rx_port_stats_ext = NULL;
+static void bnxt_init_stats(struct bnxt *bp)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[0];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+ __le64 *rx_stats, *tx_stats;
+ int rc, rx_count, tx_count;
+ u64 *rx_masks, *tx_masks;
+ u64 mask;
+ u8 flags;
+
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ rc = bnxt_hwrm_func_qstat_ext(bp, stats);
+ if (rc) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ mask = (1ULL << 48) - 1;
+ else
+ mask = -1ULL;
+ bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ stats = &bp->port_stats;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats) / 8;
+ tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_count = sizeof(struct tx_port_stats) / 8;
+
+ flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
- if (bp->hw_pcie_stats) {
- dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- bp->hw_pcie_stats, bp->hw_pcie_stats_map);
- bp->hw_pcie_stats = NULL;
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
+ bnxt_hwrm_port_qstats(bp, 0);
+ }
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ stats = &bp->rx_port_stats_ext;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats_ext) / 8;
+ stats = &bp->tx_port_stats_ext;
+ tx_stats = stats->hw_stats;
+ tx_masks = stats->hw_masks;
+ tx_count = sizeof(struct tx_port_stats_ext) / 8;
+
+ flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats_ext(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
+
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ if (tx_stats)
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ if (tx_stats)
+ bnxt_copy_hw_masks(tx_masks, tx_stats,
+ tx_count);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ }
}
}
+static void bnxt_free_port_stats(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_PORT_STATS;
+ bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+
+ bnxt_free_stats_mem(bp, &bp->port_stats);
+ bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
+ bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
+}
+
static void bnxt_free_ring_stats(struct bnxt *bp)
{
- struct pci_dev *pdev = bp->pdev;
- int size, i;
+ int i;
if (!bp->bnapi)
return;
- size = bp->hw_ring_stats_size;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- if (cpr->hw_stats) {
- dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
- cpr->hw_stats_map);
- cpr->hw_stats = NULL;
- }
+ bnxt_free_stats_mem(bp, &cpr->stats);
}
}
static int bnxt_alloc_stats(struct bnxt *bp)
{
u32 size, i;
- struct pci_dev *pdev = bp->pdev;
+ int rc;
size = bp->hw_ring_stats_size;
@@ -3779,11 +3903,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
- &cpr->hw_stats_map,
- GFP_KERNEL);
- if (!cpr->hw_stats)
- return -ENOMEM;
+ cpr->stats.len = size;
+ rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
+ if (rc)
+ return rc;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
@@ -3791,22 +3914,14 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
return 0;
- if (bp->hw_rx_port_stats)
+ if (bp->port_stats.hw_stats)
goto alloc_ext_stats;
- bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
- sizeof(struct tx_port_stats) + 1024;
-
- bp->hw_rx_port_stats =
- dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
- &bp->hw_rx_port_stats_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats)
- return -ENOMEM;
+ bp->port_stats.len = BNXT_PORT_STATS_SIZE;
+ rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
+ if (rc)
+ return rc;
- bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
- bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
- sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats:
@@ -3815,41 +3930,28 @@ alloc_ext_stats:
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0;
- if (bp->hw_rx_port_stats_ext)
+ if (bp->rx_port_stats_ext.hw_stats)
goto alloc_tx_ext_stats;
- bp->hw_rx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
- if (!bp->hw_rx_port_stats_ext)
+ bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
return 0;
alloc_tx_ext_stats:
- if (bp->hw_tx_port_stats_ext)
- goto alloc_pcie_stats;
+ if (bp->tx_port_stats_ext.hw_stats)
+ return 0;
if (bp->hwrm_spec_code >= 0x10902 ||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
- bp->hw_tx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
+ bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
+ return 0;
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
-
-alloc_pcie_stats:
- if (bp->hw_pcie_stats ||
- !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
- return 0;
-
- bp->hw_pcie_stats =
- dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- &bp->hw_pcie_stats_map, GFP_KERNEL);
- if (!bp->hw_pcie_stats)
- return 0;
-
- bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
@@ -3949,6 +4051,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
+ if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
+ bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
kfree(bp->tx_ring_map);
@@ -4052,6 +4156,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
rc = bnxt_alloc_stats(bp);
if (rc)
goto alloc_mem_err;
+ bnxt_init_stats(bp);
rc = bnxt_alloc_ntp_fltrs(bp);
if (rc)
@@ -6458,7 +6563,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+ req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
@@ -7489,7 +7594,89 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_port_qstats(struct bnxt *bp)
+static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
+{
+ u64 sw_tmp;
+
+ sw_tmp = (*sw & ~mask) | hw;
+ if (hw < (*sw & mask))
+ sw_tmp += mask + 1;
+ WRITE_ONCE(*sw, sw_tmp);
+}
+
+static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
+ int count, bool ignore_zero)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
+
+ if (ignore_zero && !hw)
+ continue;
+
+ if (masks[i] == -1ULL)
+ sw_stats[i] = hw;
+ else
+ bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
+ }
+}
+
+static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
+{
+ if (!stats->hw_stats)
+ return;
+
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ stats->hw_masks, stats->len / 8, false);
+}
+
+static void bnxt_accumulate_all_stats(struct bnxt *bp)
+{
+ struct bnxt_stats_mem *ring0_stats;
+ bool ignore_zero = false;
+ int i;
+
+ /* Chip bug. Counter intermittently becomes 0. */
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ ignore_zero = true;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ if (!i)
+ ring0_stats = stats;
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ ring0_stats->hw_masks,
+ ring0_stats->len / 8, ignore_zero);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ struct bnxt_stats_mem *stats = &bp->port_stats;
+ __le64 *hw_stats = stats->hw_stats;
+ u64 *sw_stats = stats->sw_stats;
+ u64 *masks = stats->hw_masks;
+ int cnt;
+
+ cnt = sizeof(struct rx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+
+ hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ cnt = sizeof(struct tx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ bnxt_accumulate_stats(&bp->rx_port_stats_ext);
+ bnxt_accumulate_stats(&bp->tx_port_stats_ext);
+ }
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
{
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_port_qstats_input req = {0};
@@ -7497,14 +7684,19 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
return 0;
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
+ req.flags = flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
req.port_id = cpu_to_le16(pf->port_id);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
+ req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET);
+ req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
{
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
@@ -7516,14 +7708,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
return 0;
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
+ req.flags = flags;
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- tx_stat_size = bp->hw_tx_port_stats_ext ?
- sizeof(*bp->hw_tx_port_stats_ext) : 0;
+ req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
+ tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
+ sizeof(struct tx_port_stats_ext) : 0;
req.tx_stat_size = cpu_to_le16(tx_stat_size);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
+ req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
@@ -7534,6 +7730,9 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
}
+ if (flags)
+ goto qstats_done;
+
if (bp->fw_tx_stats_ext_size <=
offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -7574,19 +7773,6 @@ qstats_done:
return rc;
}
-static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
-{
- struct hwrm_pcie_qstats_input req = {0};
-
- if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
- return 0;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
- req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
- req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-}
-
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
@@ -8608,6 +8794,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (BNXT_PF(bp))
bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
+ bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
+
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -9610,34 +9799,33 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
{
int i;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+ u64 *sw = cpr->stats.sw_stats;
- stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
stats->rx_missed_errors +=
- le64_to_cpu(hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
- stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
- stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
}
}
@@ -9675,19 +9863,26 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
bnxt_add_prev_stats(bp, stats);
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- struct rx_port_stats *rx = bp->hw_rx_port_stats;
- struct tx_port_stats *tx = bp->hw_tx_port_stats;
-
- stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
- stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
- stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
- le64_to_cpu(rx->rx_ovrsz_frames) +
- le64_to_cpu(rx->rx_runt_frames);
- stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
- le64_to_cpu(rx->rx_jbr_frames);
- stats->collisions = le64_to_cpu(tx->tx_total_collisions);
- stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
- stats->tx_errors = le64_to_cpu(tx->tx_err);
+ u64 *rx = bp->port_stats.sw_stats;
+ u64 *tx = bp->port_stats.sw_stats +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ stats->rx_crc_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
+ stats->rx_frame_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
+ stats->rx_length_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
+ stats->rx_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
+ stats->collisions =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
+ stats->tx_fifo_errors =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
+ stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
}
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
}
@@ -10033,6 +10228,38 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf)
+{
+ struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_read_direct_input req = {0};
+ __le32 *dbg_reg_buf;
+ dma_addr_t mapping;
+ int rc, i;
+
+ dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
+ &mapping, GFP_KERNEL);
+ if (!dbg_reg_buf)
+ return -ENOMEM;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
+ req.host_dest_addr = cpu_to_le64(mapping);
+ req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
+ req.read_len32 = cpu_to_le32(num_words);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc || resp->error_code) {
+ rc = -EIO;
+ goto dbg_rd_reg_exit;
+ }
+ for (i = 0; i < num_words; i++)
+ reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
+
+dbg_rd_reg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
+ return rc;
+}
+
static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
u32 ring_id, u32 *prod, u32 *cons)
{
@@ -10177,8 +10404,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
- bp->stats_coal_ticks) {
+ if (bp->link_info.link_up && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -10464,9 +10690,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_port_qstats(bp);
- bnxt_hwrm_port_qstats_ext(bp);
- bnxt_hwrm_pcie_qstats(bp);
+ bnxt_hwrm_port_qstats(bp, 0);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ bnxt_accumulate_all_stats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2acd7f958246..5a13eb66beda 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -919,6 +919,14 @@ struct bnxt_sw_stats {
struct bnxt_cmn_sw_stats cmn;
};
+struct bnxt_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -943,8 +951,7 @@ struct bnxt_cp_ring_info {
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
- struct ctx_hw_stats *hw_stats;
- dma_addr_t hw_stats_map;
+ struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
struct bnxt_sw_stats sw_stats;
@@ -1135,6 +1142,50 @@ struct bnxt_ntuple_filter {
#define BNXT_FLTR_UPDATE 1
};
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 link_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1253,6 +1304,9 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
+#define CHIMP_REG_VIEW_ADDR \
+ ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
@@ -1566,7 +1620,6 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
- #define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1719,6 +1772,7 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1733,17 +1787,9 @@ struct bnxt {
dma_addr_t hwrm_cmd_kong_resp_dma_addr;
struct rtnl_link_stats64 net_stats_prev;
- struct rx_port_stats *hw_rx_port_stats;
- struct tx_port_stats *hw_tx_port_stats;
- struct rx_port_stats_ext *hw_rx_port_stats_ext;
- struct tx_port_stats_ext *hw_tx_port_stats_ext;
- struct pcie_ctx_hw_stats *hw_pcie_stats;
- dma_addr_t hw_rx_port_stats_map;
- dma_addr_t hw_tx_port_stats_map;
- dma_addr_t hw_rx_port_stats_ext_map;
- dma_addr_t hw_tx_port_stats_ext_map;
- dma_addr_t hw_pcie_stats_map;
- int hw_port_stats_size;
+ struct bnxt_stats_mem port_stats;
+ struct bnxt_stats_mem rx_port_stats_ext;
+ struct bnxt_stats_mem tx_port_stats_ext;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
@@ -1885,12 +1931,27 @@ struct bnxt {
struct device *hwmon_dev;
};
+#define BNXT_GET_RING_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
+
+#define BNXT_GET_RX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct rx_port_stats, counter) / 8))
+
+#define BNXT_GET_TX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct tx_port_stats, counter) / 8))
+
+#define BNXT_PORT_STATS_SIZE \
+ (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024)
+
+#define BNXT_TX_PORT_STATS_BYTE_OFFSET \
+ (sizeof(struct rx_port_stats) + 512)
+
#define BNXT_RX_STATS_OFFSET(counter) \
(offsetof(struct rx_port_stats, counter) / 8)
#define BNXT_TX_STATS_OFFSET(counter) \
((offsetof(struct tx_port_stats, counter) + \
- sizeof(struct rx_port_stats) + 512) / 8)
+ BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8)
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
@@ -1898,9 +1959,6 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
-#define BNXT_PCIE_STATS_OFFSET(counter) \
- (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
-
#define BNXT_HW_FEATURE_VLAN_ALL_RX \
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
#define BNXT_HW_FEATURE_VLAN_ALL_TX \
@@ -2062,6 +2120,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 02b27551d34d..8e90224c43a2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -544,7 +544,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct bnxt *bp = netdev_priv(dev);
- __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
+ __le64 *stats = bp->port_stats.hw_stats;
struct ieee_pfc *my_pfc = bp->ieee_pfc;
long rx_off, tx_off;
int i, rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 25252d5beec8..64da654f1038 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -142,7 +142,7 @@ static const char * const bnxt_ring_rx_stats_str[] = {
"rx_mcast_packets",
"rx_bcast_packets",
"rx_discards",
- "rx_drops",
+ "rx_errors",
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
@@ -152,8 +152,8 @@ static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
+ "tx_errors",
"tx_discards",
- "tx_drops",
"tx_ucast_bytes",
"tx_mcast_bytes",
"tx_bcast_bytes",
@@ -293,9 +293,6 @@ static const char * const bnxt_cmn_sw_stats_str[] = {
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
-#define BNXT_PCIE_STATS_ENTRY(counter) \
- { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
-
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -454,24 +451,6 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
-static const struct {
- long offset;
- char string[ETH_GSTRING_LEN];
-} bnxt_pcie_stats_arr[] = {
- BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
- BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
-};
-
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
@@ -479,7 +458,6 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
-#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
@@ -526,9 +504,6 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS)
- num_stats += BNXT_NUM_PCIE_STATS;
-
return num_stats;
}
@@ -584,19 +559,19 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw_stats = cpr->stats.sw_stats;
u64 *sw;
int k;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
}
if (is_tx_ring(bp, i)) {
k = NUM_RING_RX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
}
if (!tpa_stats || !is_rx_ring(bp, i))
goto skip_tpa_ring_stats;
@@ -604,7 +579,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
tpa_stats; j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
skip_tpa_ring_stats:
sw = (u64 *)&cpr->sw_stats.rx;
@@ -618,9 +593,9 @@ skip_tpa_ring_stats:
buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
@@ -628,60 +603,50 @@ skip_tpa_ring_stats:
skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
+ u64 *port_stats = bp->port_stats.sw_stats;
- for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(port_stats +
- bnxt_port_stats_arr[i].offset));
- }
+ for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
+ buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
- __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
+ u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
+ u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(rx_port_stats_ext +
- bnxt_port_stats_ext_arr[i].offset));
+ buf[j] = *(rx_port_stats_ext +
+ bnxt_port_stats_ext_arr[i].offset);
}
for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(tx_port_stats_ext +
- bnxt_tx_port_stats_ext_arr[i].offset));
+ buf[j] = *(tx_port_stats_ext +
+ bnxt_tx_port_stats_ext_arr[i].offset);
}
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
-
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(pcie_stats +
- bnxt_pcie_stats_arr[i].offset));
- }
- }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -782,12 +747,6 @@ skip_tpa_stats:
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
- strcpy(buf, bnxt_pcie_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
- }
- }
break;
case ETH_SS_TEST:
if (bp->num_tests)
@@ -1365,6 +1324,59 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->regdump_len = 0;
}
+static int bnxt_get_regs_len(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int reg_len;
+
+ reg_len = BNXT_PXP_REG_LEN;
+
+ if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
+ reg_len += sizeof(struct pcie_ctx_hw_stats);
+
+ return reg_len;
+}
+
+static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *_p)
+{
+ struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
+
+ regs->version = 0;
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
+ sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr, GFP_KERNEL);
+ if (!hw_pcie_stats)
+ return;
+
+ regs->version = 1;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
+ req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ __le64 *src = (__le64 *)hw_pcie_stats;
+ u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
+ int i;
+
+ for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
+ dst[i] = le64_to_cpu(src[i]);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
+ hw_pcie_stats_addr);
+}
+
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3640,6 +3652,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
+ .get_regs_len = bnxt_get_regs_len,
+ .get_regs = bnxt_get_regs,
.get_wol = bnxt_get_wol,
.set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index dddbca1d052c..34f44ddfad79 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -84,6 +84,8 @@ struct hwrm_dbg_cmn_output {
ETH_RESET_PHY | ETH_RESET_RAM) \
<< ETH_RESET_SHARED_SHIFT)
+#define BNXT_PXP_REG_LEN 0x3110
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7e9235c8d21e..c4af6bf15e36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -169,9 +169,14 @@ struct cmd_nums {
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
#define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
#define HWRM_RESERVED5 0x64UL
#define HWRM_RESERVED6 0x65UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
@@ -224,6 +229,7 @@ struct cmd_nums {
#define HWRM_FW_IPC_MAILBOX 0xccUL
#define HWRM_FW_ECN_CFG 0xcdUL
#define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -337,6 +343,7 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -353,24 +360,30 @@ struct cmd_nums {
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
#define HWRM_TF_SESSION_ATTACH 0x2c7UL
- #define HWRM_TF_SESSION_CLOSE 0x2c8UL
- #define HWRM_TF_SESSION_QCFG 0x2c9UL
- #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
- #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
- #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
- #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
- #define HWRM_TF_TBL_TYPE_GET 0x2d0UL
- #define HWRM_TF_TBL_TYPE_SET 0x2d1UL
- #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
- #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
- #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
- #define HWRM_TF_EXT_EM_OP 0x2ddUL
- #define HWRM_TF_EXT_EM_CFG 0x2deUL
- #define HWRM_TF_EXT_EM_QCFG 0x2dfUL
- #define HWRM_TF_TCAM_SET 0x2eeUL
- #define HWRM_TF_TCAM_GET 0x2efUL
- #define HWRM_TF_TCAM_MOVE 0x2f0UL
- #define HWRM_TF_TCAM_FREE 0x2f1UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
+ #define HWRM_TF_EXT_EM_OP 0x2e7UL
+ #define HWRM_TF_EXT_EM_CFG 0x2e8UL
+ #define HWRM_TF_EXT_EM_QCFG 0x2e9UL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -391,6 +404,7 @@ struct cmd_nums {
#define HWRM_DBG_QCAPS 0xff20UL
#define HWRM_DBG_QCFG 0xff21UL
#define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -464,8 +478,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 33
-#define HWRM_VERSION_STR "1.10.1.33"
+#define HWRM_VERSION_RSVD 54
+#define HWRM_VERSION_STR "1.10.1.54"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1094,6 +1108,8 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
#define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
#define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
__le16 num_rsscos_ctxs;
__le16 num_cmpl_rings;
__le16 num_tx_rings;
@@ -1189,10 +1205,16 @@ struct hwrm_func_qcaps_output {
__le16 max_sp_tx_rings;
u8 unused_0[2];
__le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- u8 unused_1[3];
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ u8 max_schqs;
+ u8 unused_1[2];
u8 valid;
};
@@ -1226,6 +1248,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1321,7 +1345,7 @@ struct hwrm_func_qcfg_output {
u8 valid;
};
-/* hwrm_func_cfg_input (size:704b/88B) */
+/* hwrm_func_cfg_input (size:768b/96B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1352,30 +1376,35 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
#define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
__le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1449,6 +1478,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
+ __le16 schq_id;
+ u8 unused_0[6];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1507,7 +1538,7 @@ struct hwrm_func_qstats_output {
u8 valid;
};
-/* hwrm_func_qstats_ext_input (size:192b/24B) */
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
struct hwrm_func_qstats_ext_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1520,7 +1551,12 @@ struct hwrm_func_qstats_ext_input {
#define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
#define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
#define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
- u8 unused_0[5];
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
};
/* hwrm_func_qstats_ext_output (size:1472b/184B) */
@@ -1533,15 +1569,15 @@ struct hwrm_func_qstats_ext_output {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -2376,33 +2412,39 @@ struct hwrm_port_phy_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL
__le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -2415,7 +2457,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2446,7 +2487,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2464,7 +2504,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
@@ -2488,11 +2527,19 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2[2];
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
__le32 tx_lpi_timer;
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le32 unused_3;
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ u8 unused_2[2];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -2526,7 +2573,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2537,7 +2584,10 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 unused_0;
+ u8 link_signal_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4
__le16 link_speed;
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
@@ -2574,7 +2624,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
@@ -2586,7 +2635,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2611,7 +2659,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2629,7 +2676,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
@@ -2763,13 +2809,21 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
__le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
@@ -2778,7 +2832,24 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- u8 unused_2[7];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ __le16 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 unused_0[7];
u8 valid;
};
@@ -3304,19 +3375,20 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+/* hwrm_port_phy_qcaps_output (size:256b/32B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -3339,7 +3411,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL
__le16 supported_speeds_auto_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
@@ -3355,7 +3426,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL
__le16 supported_speeds_eee_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
@@ -3372,8 +3442,18 @@ struct hwrm_port_phy_qcaps_output {
__le32 valid_tx_lpi_timer_high;
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ u8 unused_0[3];
+ u8 valid;
};
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
@@ -3812,7 +3892,7 @@ struct hwrm_queue_qportcfg_input {
u8 unused_0;
};
-/* hwrm_queue_qportcfg_output (size:256b/32B) */
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
struct hwrm_queue_qportcfg_output {
__le16 error_code;
__le16 req_type;
@@ -3898,6 +3978,49 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0;
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
u8 valid;
};
@@ -4938,6 +5061,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -4947,7 +5071,12 @@ struct hwrm_vnic_cfg_input {
__le16 default_rx_ring_id;
__le16 default_cmpl_ring_id;
__le16 queue_id;
- u8 unused0[6];
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 unused0[5];
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -4989,6 +5118,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -5155,15 +5285,18 @@ struct hwrm_vnic_plcmodes_cfg_input {
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
__le32 enables;
#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
__le32 vnic_id;
__le16 jumbo_thresh;
__le16 hds_offset;
__le16 hds_threshold;
- u8 unused_0[6];
+ __le16 max_bds;
+ u8 unused_0[4];
};
/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
@@ -5231,6 +5364,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -5246,7 +5380,7 @@ struct hwrm_ring_alloc_input {
__le32 fbo;
u8 page_size;
u8 page_tbl_depth;
- u8 unused_1[2];
+ __le16 schq_id;
__le32 length;
__le16 logical_id;
__le16 cmpl_ring_id;
@@ -5344,11 +5478,12 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -5529,6 +5664,7 @@ struct hwrm_ring_grp_free_output {
u8 unused_0[7];
u8 valid;
};
+
#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
@@ -6816,15 +6952,15 @@ struct ctx_hw_stats {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6840,15 +6976,15 @@ struct ctx_hw_stats_ext {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6915,7 +7051,9 @@ struct hwrm_stat_ctx_query_input {
__le16 target_id;
__le64 resp_addr;
__le32 stat_ctx_id;
- u8 unused_0[4];
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
};
/* hwrm_stat_ctx_query_output (size:1408b/176B) */
@@ -6948,6 +7086,50 @@ struct hwrm_stat_ctx_query_output {
u8 valid;
};
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
__le16 req_type;
@@ -7497,6 +7679,29 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -7507,7 +7712,8 @@ struct coredump_segment_record {
u8 seg_flags;
u8 compress_flags;
#define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[6];
+ u8 unused_0[2];
+ __le32 segment_len;
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7620,7 +7826,8 @@ struct hwrm_dbg_ring_info_get_input {
#define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le32 fw_ring_id;
};
@@ -7633,7 +7840,8 @@ struct hwrm_dbg_ring_info_get_output {
__le16 resp_len;
__le32 producer_index;
__le32 consumer_index;
- u8 unused_0[7];
+ __le32 cag_vector_ctrl;
+ u8 unused_0[3];
u8 valid;
};
@@ -7922,6 +8130,7 @@ struct hwrm_nvm_install_update_input {
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
u8 unused_0[2];
};
@@ -8101,7 +8310,14 @@ struct hwrm_selftest_qlist_output {
char test5_name[32];
char test6_name[32];
char test7_name[32];
- u8 unused_2[7];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 392e32c7122a..cc2ee4d0bd18 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
index f8efd36d66e0..b83d17b14e85 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_chain.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -268,8 +268,10 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
chain->pbl.pp_addr_tbl = addr_tbl;
- if (chain->b_external_pbl)
+ if (chain->b_external_pbl) {
+ pbl_virt = chain->pbl_sp.table_virt;
goto alloc_pages;
+ }
size = array_size(page_cnt, sizeof(*pbl_virt));
if (unlikely(size == SIZE_MAX))
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 81b0f7d3a025..5e37c8313725 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -17,7 +17,7 @@ config NET_VENDOR_SOLARFLARE
if NET_VENDOR_SOLARFLARE
config SFC
- tristate "Solarflare SFC9000/SFC9100-family support"
+ tristate "Solarflare SFC9000/SFC9100/EF100-family support"
depends on PCI
select MDIO
select CRC32
@@ -26,6 +26,9 @@ config SFC
This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC9000-family and SFC9100-family controllers.
+ It also supports 10/25/40/100-gigabit Ethernet cards based
+ on the Solarflare EF100 networking IP in Xilinx FPGAs.
+
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 87d093da22ca..8bd01c429f91 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -4,7 +4,9 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
tx.o tx_common.o tx_tso.o rx.o rx_common.o \
selftest.o ethtool.o ethtool_common.o ptp.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
- mcdi_functions.o mcdi_filters.o mcdi_mon.o
+ mcdi_functions.o mcdi_filters.o mcdi_mon.o \
+ ef100.o ef100_nic.o ef100_netdev.o \
+ ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index fa7229fff2ff..4b0b2cf026a5 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6,6 +6,7 @@
#include "net_driver.h"
#include "rx_common.h"
+#include "tx_common.h"
#include "ef10_regs.h"
#include "io.h"
#include "mcdi.h"
@@ -3977,6 +3978,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
.rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
.rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
.rx_probe = efx_mcdi_rx_probe,
@@ -3984,6 +3986,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
.ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
.ev_fini = efx_mcdi_ev_fini,
@@ -4038,6 +4041,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.rx_hash_key_size = 40,
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4087,6 +4091,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
.rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
.rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
.rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
@@ -4097,6 +4102,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
.ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
.ev_fini = efx_mcdi_ev_fini,
@@ -4172,4 +4178,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.rx_hash_key_size = 40,
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
};
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
new file mode 100644
index 000000000000..de611c0f94e7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/aer.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "io.h"
+#include "ef100_nic.h"
+#include "ef100_netdev.h"
+#include "ef100_regs.h"
+#include "ef100.h"
+
+#define EFX_EF100_PCI_DEFAULT_BAR 2
+
+/* Number of bytes at start of vendor specified extended capability that indicate
+ * that the capability is vendor specified. i.e. offset from value returned by
+ * pci_find_next_ext_capability() to beginning of vendor specified capability
+ * header.
+ */
+#define PCI_EXT_CAP_HDR_LENGTH 4
+
+/* Expected size of a Xilinx continuation address table entry. */
+#define ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH 16
+
+struct ef100_func_ctl_window {
+ bool valid;
+ unsigned int bar;
+ u64 offset;
+};
+
+static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset,
+ struct ef100_func_ctl_window *result);
+
+/* Number of bytes to offset when reading bit position x with dword accessors. */
+#define ROUND_DOWN_TO_DWORD(x) (((x) & (~31)) >> 3)
+
+#define EXTRACT_BITS(x, lbn, width) \
+ (((x) >> ((lbn) & 31)) & ((1ull << (width)) - 1))
+
+static u32 _ef100_pci_get_bar_bits_with_width(struct efx_nic *efx,
+ int structure_start,
+ int lbn, int width)
+{
+ efx_dword_t dword;
+
+ efx_readd(efx, &dword, structure_start + ROUND_DOWN_TO_DWORD(lbn));
+
+ return EXTRACT_BITS(le32_to_cpu(dword.u32[0]), lbn, width);
+}
+
+#define ef100_pci_get_bar_bits(efx, entry_location, bitdef) \
+ _ef100_pci_get_bar_bits_with_width(efx, entry_location, \
+ ESF_GZ_CFGBAR_ ## bitdef ## _LBN, \
+ ESF_GZ_CFGBAR_ ## bitdef ## _WIDTH)
+
+static int ef100_pci_parse_ef100_entry(struct efx_nic *efx, int entry_location,
+ struct ef100_func_ctl_window *result)
+{
+ u64 offset = ef100_pci_get_bar_bits(efx, entry_location, EF100_FUNC_CTL_WIN_OFF) <<
+ ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT;
+ u32 bar = ef100_pci_get_bar_bits(efx, entry_location, EF100_BAR);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "Found EF100 function control window bar=%d offset=0x%llx\n",
+ bar, offset);
+
+ if (result->valid) {
+ netif_err(efx, probe, efx->net_dev,
+ "Duplicated EF100 table entry.\n");
+ return -EINVAL;
+ }
+
+ if (bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM ||
+ bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad BAR value of %d in Xilinx capabilities EF100 entry.\n",
+ bar);
+ return -EINVAL;
+ }
+
+ result->bar = bar;
+ result->offset = offset;
+ result->valid = true;
+ return 0;
+}
+
+static bool ef100_pci_does_bar_overflow(struct efx_nic *efx, int bar,
+ u64 next_entry)
+{
+ return next_entry + ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE >
+ pci_resource_len(efx->pci_dev, bar);
+}
+
+/* Parse a Xilinx capabilities table entry describing a continuation to a new
+ * sub-table.
+ */
+static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_location,
+ struct ef100_func_ctl_window *result)
+{
+ unsigned int previous_bar;
+ efx_oword_t entry;
+ u64 offset;
+ int rc = 0;
+ u32 bar;
+
+ efx_reado(efx, &entry, entry_location);
+
+ bar = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_CONT_CAP_BAR);
+
+ offset = EFX_OWORD_FIELD64(entry, ESF_GZ_CFGBAR_CONT_CAP_OFFSET) <<
+ ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT;
+
+ previous_bar = efx->mem_bar;
+
+ if (bar == ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM ||
+ bar == ESE_GZ_VSEC_BAR_NUM_INVALID) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad BAR value of %d in Xilinx capabilities sub-table.\n",
+ bar);
+ return -EINVAL;
+ }
+
+ if (bar != previous_bar) {
+ efx_fini_io(efx);
+
+ if (ef100_pci_does_bar_overflow(efx, bar, offset)) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table will overrun BAR[%d] offset=0x%llx\n",
+ bar, offset);
+ return -EINVAL;
+ }
+
+ /* Temporarily map new BAR. */
+ rc = efx_init_io(efx, bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, bar));
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Mapping new BAR for Xilinx table failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = ef100_pci_walk_xilinx_table(efx, offset, result);
+ if (rc)
+ return rc;
+
+ if (bar != previous_bar) {
+ efx_fini_io(efx);
+
+ /* Put old BAR back. */
+ rc = efx_init_io(efx, previous_bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, previous_bar));
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Putting old BAR back failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* Iterate over the Xilinx capabilities table in the currently mapped BAR and
+ * call ef100_pci_parse_ef100_entry() on any EF100 entries and
+ * ef100_pci_parse_continue_entry() on any table continuations.
+ */
+static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset,
+ struct ef100_func_ctl_window *result)
+{
+ u64 current_entry = offset;
+ int rc = 0;
+
+ while (true) {
+ u32 id = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_FORMAT);
+ u32 last = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_LAST);
+ u32 rev = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_REV);
+ u32 entry_size;
+
+ if (id == ESE_GZ_CFGBAR_ENTRY_LAST)
+ return 0;
+
+ entry_size = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_SIZE);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "Seen Xilinx table entry 0x%x size 0x%x at 0x%llx in BAR[%d]\n",
+ id, entry_size, current_entry, efx->mem_bar);
+
+ if (entry_size < sizeof(u32) * 2) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table entry too short len=0x%x\n", entry_size);
+ return -EINVAL;
+ }
+
+ switch (id) {
+ case ESE_GZ_CFGBAR_ENTRY_EF100:
+ if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 ||
+ entry_size < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad length or rev for EF100 entry in Xilinx capabilities table. entry_size=%d rev=%d.\n",
+ entry_size, rev);
+ return -EINVAL;
+ }
+
+ rc = ef100_pci_parse_ef100_entry(efx, current_entry,
+ result);
+ if (rc)
+ return rc;
+ break;
+ case ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR:
+ if (rev != 0 || entry_size < ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad length or rev for continue entry in Xilinx capabilities table. entry_size=%d rev=%d.\n",
+ entry_size, rev);
+ return -EINVAL;
+ }
+
+ rc = ef100_pci_parse_continue_entry(efx, current_entry, result);
+ if (rc)
+ return rc;
+ break;
+ default:
+ /* Ignore unknown table entries. */
+ break;
+ }
+
+ if (last)
+ return 0;
+
+ current_entry += entry_size;
+
+ if (ef100_pci_does_bar_overflow(efx, efx->mem_bar, current_entry)) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table overrun at position=0x%llx.\n",
+ current_entry);
+ return -EINVAL;
+ }
+ }
+}
+
+static int _ef100_pci_get_config_bits_with_width(struct efx_nic *efx,
+ int structure_start, int lbn,
+ int width, u32 *result)
+{
+ int rc, pos = structure_start + ROUND_DOWN_TO_DWORD(lbn);
+ u32 temp;
+
+ rc = pci_read_config_dword(efx->pci_dev, pos, &temp);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read PCI config dword at %d\n",
+ pos);
+ return rc;
+ }
+
+ *result = EXTRACT_BITS(temp, lbn, width);
+
+ return 0;
+}
+
+#define ef100_pci_get_config_bits(efx, entry_location, bitdef, result) \
+ _ef100_pci_get_config_bits_with_width(efx, entry_location, \
+ ESF_GZ_VSEC_ ## bitdef ## _LBN, \
+ ESF_GZ_VSEC_ ## bitdef ## _WIDTH, result)
+
+/* Call ef100_pci_walk_xilinx_table() for the Xilinx capabilities table pointed
+ * to by this PCI_EXT_CAP_ID_VNDR.
+ */
+static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap,
+ bool has_offset_hi,
+ struct ef100_func_ctl_window *result)
+{
+ u32 offset_high = 0;
+ u32 offset_lo = 0;
+ u64 offset = 0;
+ u32 bar = 0;
+ int rc = 0;
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_BAR, &bar);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_TBL_BAR, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM ||
+ bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad BAR value of %d in Xilinx capabilities sub-table.\n",
+ bar);
+ return -EINVAL;
+ }
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_LO, &offset_lo);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_TBL_OFF_LO, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get optional extension to 64bit offset. */
+ if (has_offset_hi) {
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_HI, &offset_high);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_TBL_OFF_HI, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ offset = (((u64)offset_lo) << ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT) |
+ (((u64)offset_high) << ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT);
+
+ if (offset > pci_resource_len(efx->pci_dev, bar) - sizeof(u32) * 2) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table will overrun BAR[%d] offset=0x%llx\n",
+ bar, offset);
+ return -EINVAL;
+ }
+
+ /* Temporarily map BAR. */
+ rc = efx_init_io(efx, bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, bar));
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "efx_init_io failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = ef100_pci_walk_xilinx_table(efx, offset, result);
+
+ /* Unmap temporarily mapped BAR. */
+ efx_fini_io(efx);
+ return rc;
+}
+
+/* Call ef100_pci_parse_ef100_entry() for each Xilinx PCI_EXT_CAP_ID_VNDR
+ * capability.
+ */
+static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx,
+ struct ef100_func_ctl_window *result)
+{
+ int num_xilinx_caps = 0;
+ int cap = 0;
+
+ result->valid = false;
+
+ while ((cap = pci_find_next_ext_capability(efx->pci_dev, cap, PCI_EXT_CAP_ID_VNDR)) != 0) {
+ int vndr_cap = cap + PCI_EXT_CAP_HDR_LENGTH;
+ u32 vsec_ver = 0;
+ u32 vsec_len = 0;
+ u32 vsec_id = 0;
+ int rc = 0;
+
+ num_xilinx_caps++;
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, ID, &vsec_id);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_ID, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, VER, &vsec_ver);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_VER, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get length of whole capability - i.e. starting at cap */
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, LEN, &vsec_len);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_LEN, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (vsec_id == ESE_GZ_XILINX_VSEC_ID &&
+ vsec_ver == ESE_GZ_VSEC_VER_XIL_CFGBAR &&
+ vsec_len >= ESE_GZ_VSEC_LEN_MIN) {
+ bool has_offset_hi = (vsec_len >= ESE_GZ_VSEC_LEN_HIGH_OFFT);
+
+ rc = ef100_pci_parse_xilinx_cap(efx, vndr_cap,
+ has_offset_hi, result);
+ if (rc)
+ return rc;
+ }
+ }
+
+ if (num_xilinx_caps && !result->valid) {
+ netif_err(efx, probe, efx->net_dev,
+ "Seen %d Xilinx tables, but no EF100 entry.\n",
+ num_xilinx_caps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
+ */
+static void ef100_pci_remove(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx;
+
+ efx = pci_get_drvdata(pci_dev);
+ if (!efx)
+ return;
+
+ rtnl_lock();
+ dev_close(efx->net_dev);
+ rtnl_unlock();
+
+ /* Unregistering our netdev notifier triggers unbinding of TC indirect
+ * blocks, so we have to do it before PCI removal.
+ */
+ unregister_netdevice_notifier(&efx->netdev_notifier);
+ ef100_remove(efx);
+ efx_fini_io(efx);
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
+};
+
+static int ef100_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
+{
+ struct ef100_func_ctl_window fcw = { 0 };
+ struct net_device *net_dev;
+ struct efx_nic *efx;
+ int rc;
+
+ /* Allocate and initialise a struct net_device and struct efx_nic */
+ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ efx = netdev_priv(net_dev);
+ efx->type = (const struct efx_nic_type *)entry->driver_data;
+
+ pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+ rc = efx_init_struct(efx, pci_dev, net_dev);
+ if (rc)
+ goto fail;
+
+ efx->vi_stride = EF100_DEFAULT_VI_STRIDE;
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare EF100 NIC detected\n");
+
+ rc = ef100_pci_find_func_ctrl_window(efx, &fcw);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Error looking for ef100 function control window, rc=%d\n",
+ rc);
+ goto fail;
+ }
+
+ if (!fcw.valid) {
+ /* Extended capability not found - use defaults. */
+ fcw.bar = EFX_EF100_PCI_DEFAULT_BAR;
+ fcw.offset = 0;
+ fcw.valid = true;
+ }
+
+ if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
+ netif_err(efx, probe, efx->net_dev,
+ "Func control window overruns BAR\n");
+ goto fail;
+ }
+
+ /* Set up basic I/O (BAR mappings etc) */
+ rc = efx_init_io(efx, fcw.bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, fcw.bar));
+ if (rc)
+ goto fail;
+
+ efx->reg_base = fcw.offset;
+
+ efx->netdev_notifier.notifier_call = ef100_netdev_event;
+ rc = register_netdevice_notifier(&efx->netdev_notifier);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to register netdevice notifier, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = efx->type->probe(efx);
+ if (rc)
+ goto fail;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+ return 0;
+
+fail:
+ ef100_pci_remove(pci_dev);
+ return rc;
+}
+
+/* PCI device ID table */
+static const struct pci_device_id ef100_pci_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */
+ .driver_data = (unsigned long) &ef100_pf_nic_type },
+ {0} /* end of list */
+};
+
+struct pci_driver ef100_pci_driver = {
+ .name = "sfc_ef100",
+ .id_table = ef100_pci_table,
+ .probe = ef100_pci_probe,
+ .remove = ef100_pci_remove,
+ .err_handler = &efx_err_handlers,
+};
+
+MODULE_DEVICE_TABLE(pci, ef100_pci_table);
diff --git a/drivers/net/ethernet/sfc/ef100.h b/drivers/net/ethernet/sfc/ef100.h
new file mode 100644
index 000000000000..a63f0ff6641b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+extern struct pci_driver ef100_pci_driver;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
new file mode 100644
index 000000000000..729c425d0f78
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi_port_common.h"
+#include "ethtool_common.h"
+#include "ef100_ethtool.h"
+#include "mcdi_functions.h"
+
+/* Ethtool options available
+ */
+const struct ethtool_ops ef100_ethtool_ops = {
+ .get_drvinfo = efx_ethtool_get_drvinfo,
+};
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.h b/drivers/net/ethernet/sfc/ef100_ethtool.h
new file mode 100644
index 000000000000..6efda72dfc6c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+extern const struct ethtool_ops ef100_ethtool_ops;
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
new file mode 100644
index 000000000000..4c3caac2c8cc
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include "net_driver.h"
+#include "mcdi_port_common.h"
+#include "mcdi_functions.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "tx_common.h"
+#include "ef100_netdev.h"
+#include "ef100_ethtool.h"
+#include "efx_common.h"
+#include "nic_common.h"
+#include "ef100_nic.h"
+#include "ef100_tx.h"
+#include "ef100_regs.h"
+#include "mcdi_filters.h"
+#include "rx_common.h"
+
+static void ef100_update_name(struct efx_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+}
+
+static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
+{
+ /* EF100 uses a single TXQ per channel, as all checksum offloading
+ * is configured in the TX descriptor, and there is no TX Pacer for
+ * HIGHPRI queues.
+ */
+ unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
+ unsigned int rx_vis = efx->n_rx_channels;
+ unsigned int min_vis, max_vis;
+
+ EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
+
+ tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
+
+ max_vis = max(rx_vis, tx_vis);
+ /* Currently don't handle resource starvation and only accept
+ * our maximum needs and no less.
+ */
+ min_vis = max_vis;
+
+ return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
+ NULL, allocated_vis);
+}
+
+static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
+{
+ unsigned int uc_mem_map_size;
+ void __iomem *membase;
+
+ efx->max_vis = max_vis;
+ uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
+
+ /* Extend the original UC mapping of the memory BAR */
+ membase = ioremap(efx->membase_phys, uc_mem_map_size);
+ if (!membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not extend memory BAR to %x\n",
+ uc_mem_map_size);
+ return -ENOMEM;
+ }
+ iounmap(efx->membase);
+ efx->membase = membase;
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+static int ef100_net_stop(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
+
+ netif_stop_queue(net_dev);
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+ efx_clear_interrupt_affinity(efx);
+ efx_nic_fini_interrupt(efx);
+ efx_fini_napi(efx);
+ efx_remove_channels(efx);
+ efx_mcdi_free_vis(efx);
+ efx_remove_interrupts(efx);
+
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int ef100_net_open(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ unsigned int allocated_vis;
+ int rc;
+
+ ef100_update_name(efx);
+ netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_set_channels(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_mcdi_free_vis(efx);
+ if (rc)
+ goto fail;
+
+ rc = ef100_alloc_vis(efx, &allocated_vis);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_channels(efx);
+ if (rc)
+ return rc;
+
+ rc = ef100_remap_bar(efx, allocated_vis);
+ if (rc)
+ goto fail;
+
+ efx_init_napi(efx);
+
+ rc = efx_nic_init_interrupt(efx);
+ if (rc)
+ goto fail;
+ efx_set_interrupt_affinity(efx);
+
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ /* in case the MC rebooted while we were stopped, consume the change
+ * to the warm reboot count
+ */
+ (void) efx_mcdi_poll_reboot(efx);
+
+ efx_start_all(efx);
+
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+ mutex_lock(&efx->mac_lock);
+ if (efx_mcdi_phy_poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+
+fail:
+ ef100_net_stop(net_dev);
+ return rc;
+}
+
+/* Initiate a packet transmission. We use one channel per CPU
+ * (sharing when we have more CPUs than channels).
+ *
+ * Context: non-blocking.
+ * Note that returning anything other than NETDEV_TX_OK will cause the
+ * OS to free the skb.
+ */
+static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int rc;
+
+ channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
+ netif_vdbg(efx, tx_queued, efx->net_dev,
+ "%s len %d data %d channel %d\n", __func__,
+ skb->len, skb->data_len, channel->channel);
+ if (!efx->n_channels || !efx->n_tx_channels || !channel) {
+ netif_stop_queue(net_dev);
+ goto err;
+ }
+
+ tx_queue = &channel->tx_queue[0];
+ rc = ef100_enqueue_skb(tx_queue, skb);
+ if (rc == 0)
+ return NETDEV_TX_OK;
+
+err:
+ net_dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ef100_netdev_ops = {
+ .ndo_open = ef100_net_open,
+ .ndo_stop = ef100_net_stop,
+ .ndo_start_xmit = ef100_hard_start_xmit,
+ .ndo_get_phys_port_id = efx_get_phys_port_id,
+ .ndo_get_phys_port_name = efx_get_phys_port_name,
+};
+
+/* Netdev registration
+ */
+int ef100_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
+ struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME)
+ ef100_update_name(efx);
+
+ return NOTIFY_DONE;
+}
+
+int ef100_register_netdev(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ int rc;
+
+ net_dev->watchdog_timeo = 5 * HZ;
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &ef100_netdev_ops;
+ net_dev->min_mtu = EFX_MIN_MTU;
+ net_dev->max_mtu = EFX_MAX_MTU;
+ net_dev->ethtool_ops = &ef100_ethtool_ops;
+
+ rtnl_lock();
+
+ rc = dev_alloc_name(net_dev, net_dev->name);
+ if (rc < 0)
+ goto fail_locked;
+ ef100_update_name(efx);
+
+ rc = register_netdevice(net_dev);
+ if (rc)
+ goto fail_locked;
+
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(net_dev);
+
+ efx->state = STATE_READY;
+ rtnl_unlock();
+ efx_init_mcdi_logging(efx);
+
+ return 0;
+
+fail_locked:
+ rtnl_unlock();
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+ return rc;
+}
+
+void ef100_unregister_netdev(struct efx_nic *efx)
+{
+ if (efx_dev_registered(efx)) {
+ efx_fini_mcdi_logging(efx);
+ efx->state = STATE_UNINIT;
+ unregister_netdev(efx->net_dev);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.h b/drivers/net/ethernet/sfc/ef100_netdev.h
new file mode 100644
index 000000000000..d40abb7cc086
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_netdev.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+
+int ef100_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr);
+int ef100_register_netdev(struct efx_nic *efx);
+void ef100_unregister_netdev(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
new file mode 100644
index 000000000000..6a00f2a2dc2b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "ef100_nic.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "io.h"
+#include "selftest.h"
+#include "ef100_regs.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
+#include "mcdi_functions.h"
+#include "mcdi_filters.h"
+#include "ef100_rx.h"
+#include "ef100_tx.h"
+#include "ef100_netdev.h"
+
+#define EF100_MAX_VIS 4096
+#define EF100_NUM_MCDI_BUFFERS 1
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+#define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT)
+
+/* MCDI
+ */
+static u8 *ef100_mcdi_buf(struct efx_nic *efx, u8 bufid, dma_addr_t *dma_addr)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ if (dma_addr)
+ *dma_addr = nic_data->mcdi_buf.dma_addr +
+ bufid * ALIGN(MCDI_BUF_LEN, 256);
+ return nic_data->mcdi_buf.addr + bufid * ALIGN(MCDI_BUF_LEN, 256);
+}
+
+static int ef100_get_warm_boot_count(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ efx_readd(efx, &reg, efx_reg(efx, ER_GZ_MC_SFT_STATUS));
+
+ if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) == 0xffffffff) {
+ netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n");
+ efx->state = STATE_DISABLED;
+ return -ENETDOWN;
+ } else {
+ return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
+ EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
+ }
+}
+
+static void ef100_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ dma_addr_t dma_addr;
+ u8 *pdu = ef100_mcdi_buf(efx, 0, &dma_addr);
+
+ memcpy(pdu, hdr, hdr_len);
+ memcpy(pdu + hdr_len, sdu, sdu_len);
+ wmb();
+
+ /* The hardware provides 'low' and 'high' (doorbell) registers
+ * for passing the 64-bit address of an MCDI request to
+ * firmware. However the dwords are swapped by firmware. The
+ * least significant bits of the doorbell are then 0 for all
+ * MCDI requests due to alignment.
+ */
+ _efx_writed(efx, cpu_to_le32((u64)dma_addr >> 32), efx_reg(efx, ER_GZ_MC_DB_LWRD));
+ _efx_writed(efx, cpu_to_le32((u32)dma_addr), efx_reg(efx, ER_GZ_MC_DB_HWRD));
+}
+
+static bool ef100_mcdi_poll_response(struct efx_nic *efx)
+{
+ const efx_dword_t hdr =
+ *(const efx_dword_t *)(ef100_mcdi_buf(efx, 0, NULL));
+
+ rmb();
+ return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void ef100_mcdi_read_response(struct efx_nic *efx,
+ efx_dword_t *outbuf, size_t offset,
+ size_t outlen)
+{
+ const u8 *pdu = ef100_mcdi_buf(efx, 0, NULL);
+
+ memcpy(outbuf, pdu + offset, outlen);
+}
+
+static int ef100_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = ef100_get_warm_boot_count(efx);
+ if (rc < 0) {
+ /* The firmware is presumably in the process of
+ * rebooting. However, we are supposed to report each
+ * reboot just once, so we must only do that once we
+ * can read and store the updated warm boot count.
+ */
+ return 0;
+ }
+
+ if (rc == nic_data->warm_boot_count)
+ return 0;
+
+ nic_data->warm_boot_count = rc;
+
+ return -EIO;
+}
+
+static void ef100_mcdi_reboot_detected(struct efx_nic *efx)
+{
+}
+
+/* MCDI calls
+ */
+static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
+ return -EIO;
+
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
+ return 0;
+}
+
+static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ u8 vi_window_mode;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to read datapath firmware capabilities\n");
+ return -EIO;
+ }
+
+ nic_data->datapath_caps = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_OUT_FLAGS1);
+ nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_V2_OUT_FLAGS2);
+ if (outlen < MC_CMD_GET_CAPABILITIES_V7_OUT_LEN)
+ nic_data->datapath_caps3 = 0;
+ else
+ nic_data->datapath_caps3 = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_V7_OUT_FLAGS3);
+
+ vi_window_mode = MCDI_BYTE(outbuf,
+ GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+ rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
+ if (rc)
+ return rc;
+
+ if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3))
+ efx->net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ efx->num_mac_stats = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware reports num_mac_stats = %u\n",
+ efx->num_mac_stats);
+ return 0;
+}
+
+/* Event handling
+ */
+static int ef100_ev_probe(struct efx_channel *channel)
+{
+ /* Allocate an extra descriptor for the QMDA status completion entry */
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 2) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static int ef100_ev_init(struct efx_channel *channel)
+{
+ struct ef100_nic_data *nic_data = channel->efx->nic_data;
+
+ /* initial phase is 0 */
+ clear_bit(channel->channel, nic_data->evq_phases);
+
+ return efx_mcdi_ev_init(channel, false, false);
+}
+
+static void ef100_ev_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t evq_prime;
+
+ EFX_POPULATE_DWORD_2(evq_prime,
+ ERF_GZ_EVQ_ID, channel->channel,
+ ERF_GZ_IDX, channel->eventq_read_ptr &
+ channel->eventq_mask);
+
+ efx_writed(channel->efx, &evq_prime,
+ efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME));
+}
+
+static int ef100_ev_process(struct efx_channel *channel, int quota)
+{
+ struct efx_nic *efx = channel->efx;
+ struct ef100_nic_data *nic_data;
+ bool evq_phase, old_evq_phase;
+ unsigned int read_ptr;
+ efx_qword_t *p_event;
+ int spent = 0;
+ bool ev_phase;
+ int ev_type;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ nic_data = efx->nic_data;
+ evq_phase = test_bit(channel->channel, nic_data->evq_phases);
+ old_evq_phase = evq_phase;
+ read_ptr = channel->eventq_read_ptr;
+ BUILD_BUG_ON(ESF_GZ_EV_RXPKTS_PHASE_LBN != ESF_GZ_EV_TXCMPL_PHASE_LBN);
+
+ while (spent < quota) {
+ p_event = efx_event(channel, read_ptr);
+
+ ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE);
+ if (ev_phase != evq_phase)
+ break;
+
+ netif_vdbg(efx, drv, efx->net_dev,
+ "processing event on %d " EFX_QWORD_FMT "\n",
+ channel->channel, EFX_QWORD_VAL(*p_event));
+
+ ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE);
+
+ switch (ev_type) {
+ case ESE_GZ_EF100_EV_MCDI:
+ efx_mcdi_process_event(channel, p_event);
+ break;
+ case ESE_GZ_EF100_EV_DRIVER:
+ netif_info(efx, drv, efx->net_dev,
+ "Driver initiated event " EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*p_event));
+ break;
+ default:
+ netif_info(efx, drv, efx->net_dev,
+ "Unhandled event " EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*p_event));
+ }
+
+ ++read_ptr;
+ if ((read_ptr & channel->eventq_mask) == 0)
+ evq_phase = !evq_phase;
+ }
+
+ channel->eventq_read_ptr = read_ptr;
+ if (evq_phase != old_evq_phase)
+ change_bit(channel->channel, nic_data->evq_phases);
+
+ return spent;
+}
+
+static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
+
+ if (likely(READ_ONCE(efx->irq_soft_enabled))) {
+ /* Note test interrupts */
+ if (context->index == efx->irq_level)
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ef100_phy_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data;
+ int rc;
+
+ /* Probe for the PHY */
+ efx->phy_data = kzalloc(sizeof(struct efx_mcdi_phy_data), GFP_KERNEL);
+ if (!efx->phy_data)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, efx->phy_data);
+ if (rc)
+ return rc;
+
+ /* Populate driver and ethtool settings */
+ phy_data = efx->phy_data;
+ mcdi_to_ethtool_linkset(phy_data->media, phy_data->supported_cap,
+ efx->link_advertising);
+ efx->fec_config = mcdi_fec_caps_to_ethtool(phy_data->supported_cap,
+ false);
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+
+ /* Push settings to the PHY. Failure is not fatal, the user can try to
+ * fix it using ethtool.
+ */
+ rc = efx_mcdi_port_reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_warn(efx, drv, efx->net_dev,
+ "could not initialise PHY settings\n");
+
+ return 0;
+}
+
+/* Other
+ */
+static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only)
+{
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ efx_mcdi_filter_sync_rx_mode(efx);
+
+ if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
+ return efx_mcdi_set_mtu(efx);
+ return efx_mcdi_set_mac(efx);
+}
+
+static enum reset_type ef100_map_reset_reason(enum reset_type reason)
+{
+ if (reason == RESET_TYPE_TX_WATCHDOG)
+ return reason;
+ return RESET_TYPE_DISABLE;
+}
+
+static int ef100_map_reset_flags(u32 *flags)
+{
+ /* Only perform a RESET_TYPE_ALL because we don't support MC_REBOOTs */
+ if ((*flags & EF100_RESET_PORT)) {
+ *flags &= ~EF100_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+ if (*flags & ETH_RESET_MGMT) {
+ *flags &= ~ETH_RESET_MGMT;
+ return RESET_TYPE_DISABLE;
+ }
+
+ return -EINVAL;
+}
+
+static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
+{
+ int rc;
+
+ dev_close(efx->net_dev);
+
+ if (reset_type == RESET_TYPE_TX_WATCHDOG) {
+ netif_device_attach(efx->net_dev);
+ __clear_bit(reset_type, &efx->reset_pending);
+ rc = dev_open(efx->net_dev, NULL);
+ } else if (reset_type == RESET_TYPE_ALL) {
+ rc = efx_mcdi_reset(efx, reset_type);
+ if (rc)
+ return rc;
+
+ netif_device_attach(efx->net_dev);
+
+ rc = dev_open(efx->net_dev, NULL);
+ } else {
+ rc = 1; /* Leave the device closed */
+ }
+ return rc;
+}
+
+static int efx_ef100_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
+
+static unsigned int ef100_check_caps(const struct efx_nic *efx,
+ u8 flag, u32 offset)
+{
+ const struct ef100_nic_data *nic_data = efx->nic_data;
+
+ switch (offset) {
+ case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST:
+ return nic_data->datapath_caps & BIT_ULL(flag);
+ case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST:
+ return nic_data->datapath_caps2 & BIT_ULL(flag);
+ case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST:
+ return nic_data->datapath_caps3 & BIT_ULL(flag);
+ default:
+ return 0;
+ }
+}
+
+/* NIC level access functions
+ */
+const struct efx_nic_type ef100_pf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = false,
+ .probe = ef100_probe_pf,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+
+ .check_caps = ef100_check_caps,
+
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+
+ .get_phys_port_id = efx_ef100_get_phys_port_id,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+
+ /* Per-type bar/size configuration not used on ef100. Location of
+ * registers is defined by extended capabilities.
+ */
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+/* NIC probe and remove
+ */
+static int ef100_probe_main(struct efx_nic *efx)
+{
+ unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
+ struct net_device *net_dev = efx->net_dev;
+ struct ef100_nic_data *nic_data;
+ int i, rc;
+
+ if (WARN_ON(bar_size == 0))
+ return -EIO;
+
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+ nic_data->efx = efx;
+ net_dev->features |= efx->type->offload_features;
+ net_dev->hw_features |= efx->type->offload_features;
+
+ /* we assume later that we can copy from this buffer in dwords */
+ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
+ /* MCDI buffers must be 256 byte aligned. */
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, MCDI_BUF_LEN,
+ GFP_KERNEL);
+ if (rc)
+ goto fail;
+
+ /* Get the MC's warm boot count. In case it's rebooting right
+ * now, be prepared to retry.
+ */
+ i = 0;
+ for (;;) {
+ rc = ef100_get_warm_boot_count(efx);
+ if (rc >= 0)
+ break;
+ if (++i == 5)
+ goto fail;
+ ssleep(1);
+ }
+ nic_data->warm_boot_count = rc;
+
+ /* In case we're recovering from a crash (kexec), we want to
+ * cancel any outstanding request by the previous user of this
+ * function. We send a special message using the least
+ * significant bits of the 'high' (doorbell) register.
+ */
+ _efx_writed(efx, cpu_to_le32(1), efx_reg(efx, ER_GZ_MC_DB_HWRD));
+
+ /* Post-IO section. */
+
+ rc = efx_mcdi_init(efx);
+ if (!rc && efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
+ netif_info(efx, probe, efx->net_dev,
+ "No network port on this PCI function");
+ rc = -ENODEV;
+ }
+ if (rc)
+ goto fail;
+ /* Reset (most) configuration for this function */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ goto fail;
+
+ rc = efx_ef100_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail;
+
+ efx->max_vis = EF100_MAX_VIS;
+
+ rc = efx_mcdi_port_get_number(efx);
+ if (rc < 0)
+ goto fail;
+ efx->port_num = rc;
+
+ rc = ef100_phy_probe(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ rc = ef100_register_netdev(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+fail:
+ return rc;
+}
+
+int ef100_probe_pf(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct ef100_nic_data *nic_data;
+ int rc = ef100_probe_main(efx);
+
+ if (rc)
+ goto fail;
+
+ nic_data = efx->nic_data;
+ rc = ef100_get_mac_address(efx, net_dev->perm_addr);
+ if (rc)
+ goto fail;
+ /* Assign MAC address */
+ memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN);
+ memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+void ef100_remove(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ ef100_unregister_netdev(efx);
+ efx_fini_channels(efx);
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+ efx_mcdi_detach(efx);
+ efx_mcdi_fini(efx);
+ if (nic_data)
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+ kfree(nic_data);
+ efx->nic_data = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
new file mode 100644
index 000000000000..6367bbb2c9b3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "nic_common.h"
+
+extern const struct efx_nic_type ef100_pf_nic_type;
+
+int ef100_probe_pf(struct efx_nic *efx);
+void ef100_remove(struct efx_nic *efx);
+
+struct ef100_nic_data {
+ struct efx_nic *efx;
+ struct efx_buffer mcdi_buf;
+ u32 datapath_caps;
+ u32 datapath_caps2;
+ u32 datapath_caps3;
+ u16 warm_boot_count;
+ u8 port_id[ETH_ALEN];
+ DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
+};
+
+#define efx_ef100_has_cap(caps, flag) \
+ (!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN)))
diff --git a/drivers/net/ethernet/sfc/ef100_regs.h b/drivers/net/ethernet/sfc/ef100_regs.h
new file mode 100644
index 000000000000..710bbdb19885
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_regs.h
@@ -0,0 +1,693 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF100_REGS_H
+#define EFX_EF100_REGS_H
+
+/* EF100 hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * E<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register Host memory structure
+ * -------------------------------------------------------------
+ * Address R
+ * Bitfield RF SF
+ * Enumerator FE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * G: Riverhead
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * EF100 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* HW_REV_ID_REG: Hardware revision info register */
+#define ER_GZ_HW_REV_ID 0x00000000
+
+/* NIC_REV_ID: SoftNIC revision info register */
+#define ER_GZ_NIC_REV_ID 0x00000004
+
+/* NIC_MAGIC: Signature register that should contain a well-known value */
+#define ER_GZ_NIC_MAGIC 0x00000008
+#define ERF_GZ_NIC_MAGIC_LBN 0
+#define ERF_GZ_NIC_MAGIC_WIDTH 32
+#define EFE_GZ_NIC_MAGIC_EXPECTED 0xEF100FCB
+
+/* MC_SFT_STATUS: MC soft status */
+#define ER_GZ_MC_SFT_STATUS 0x00000010
+#define ER_GZ_MC_SFT_STATUS_STEP 4
+#define ER_GZ_MC_SFT_STATUS_ROWS 2
+
+/* MC_DB_LWRD_REG: MC doorbell register, low word */
+#define ER_GZ_MC_DB_LWRD 0x00000020
+
+/* MC_DB_HWRD_REG: MC doorbell register, high word */
+#define ER_GZ_MC_DB_HWRD 0x00000024
+
+/* EVQ_INT_PRIME: Prime EVQ */
+#define ER_GZ_EVQ_INT_PRIME 0x00000040
+#define ERF_GZ_IDX_LBN 16
+#define ERF_GZ_IDX_WIDTH 16
+#define ERF_GZ_EVQ_ID_LBN 0
+#define ERF_GZ_EVQ_ID_WIDTH 16
+
+/* INT_AGG_RING_PRIME: Prime interrupt aggregation ring. */
+#define ER_GZ_INT_AGG_RING_PRIME 0x00000048
+/* defined as ERF_GZ_IDX_LBN 16; access=WO reset=0x0 */
+/* defined as ERF_GZ_IDX_WIDTH 16 */
+#define ERF_GZ_RING_ID_LBN 0
+#define ERF_GZ_RING_ID_WIDTH 16
+
+/* EVQ_TMR: EVQ timer control */
+#define ER_GZ_EVQ_TMR 0x00000104
+#define ER_GZ_EVQ_TMR_STEP 65536
+#define ER_GZ_EVQ_TMR_ROWS 1024
+
+/* EVQ_UNSOL_CREDIT_GRANT_SEQ: Grant credits for unsolicited events. */
+#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ 0x00000108
+#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_STEP 65536
+#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_ROWS 1024
+
+/* EVQ_DESC_CREDIT_GRANT_SEQ: Grant credits for descriptor proxy events. */
+#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ 0x00000110
+#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_STEP 65536
+#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_ROWS 1024
+
+/* RX_RING_DOORBELL: Ring Rx doorbell. */
+#define ER_GZ_RX_RING_DOORBELL 0x00000180
+#define ER_GZ_RX_RING_DOORBELL_STEP 65536
+#define ER_GZ_RX_RING_DOORBELL_ROWS 1024
+#define ERF_GZ_RX_RING_PIDX_LBN 16
+#define ERF_GZ_RX_RING_PIDX_WIDTH 16
+
+/* TX_RING_DOORBELL: Ring Tx doorbell. */
+#define ER_GZ_TX_RING_DOORBELL 0x00000200
+#define ER_GZ_TX_RING_DOORBELL_STEP 65536
+#define ER_GZ_TX_RING_DOORBELL_ROWS 1024
+#define ERF_GZ_TX_RING_PIDX_LBN 16
+#define ERF_GZ_TX_RING_PIDX_WIDTH 16
+
+/* TX_DESC_PUSH: Tx ring descriptor push. Reserved for future use. */
+#define ER_GZ_TX_DESC_PUSH 0x00000210
+#define ER_GZ_TX_DESC_PUSH_STEP 65536
+#define ER_GZ_TX_DESC_PUSH_ROWS 1024
+
+/* THE_TIME: NIC hardware time */
+#define ER_GZ_THE_TIME 0x00000280
+#define ER_GZ_THE_TIME_STEP 65536
+#define ER_GZ_THE_TIME_ROWS 1024
+#define ERF_GZ_THE_TIME_SECS_LBN 32
+#define ERF_GZ_THE_TIME_SECS_WIDTH 32
+#define ERF_GZ_THE_TIME_NANOS_LBN 2
+#define ERF_GZ_THE_TIME_NANOS_WIDTH 30
+#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_LBN 1
+#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_WIDTH 1
+#define ERF_GZ_THE_TIME_CLOCK_IS_SET_LBN 0
+#define ERF_GZ_THE_TIME_CLOCK_IS_SET_WIDTH 1
+
+/* PARAMS_TLV_LEN: Size of design parameters area in bytes */
+#define ER_GZ_PARAMS_TLV_LEN 0x00000c00
+#define ER_GZ_PARAMS_TLV_LEN_STEP 65536
+#define ER_GZ_PARAMS_TLV_LEN_ROWS 1024
+
+/* PARAMS_TLV: Design parameters */
+#define ER_GZ_PARAMS_TLV 0x00000c04
+#define ER_GZ_PARAMS_TLV_STEP 65536
+#define ER_GZ_PARAMS_TLV_ROWS 1024
+
+/* EW_EMBEDDED_EVENT */
+#define ESF_GZ_EV_256_EVENT_LBN 0
+#define ESF_GZ_EV_256_EVENT_WIDTH 64
+#define ESE_GZ_EW_EMBEDDED_EVENT_STRUCT_SIZE 64
+
+/* NMMU_PAGESZ_2M_ADDR */
+#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_LBN 59
+#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_WIDTH 5
+#define ESE_GZ_NMMU_PAGE_SIZE_2M 9
+#define ESF_GZ_NMMU_2M_PAGE_ID_LBN 21
+#define ESF_GZ_NMMU_2M_PAGE_ID_WIDTH 38
+#define ESF_GZ_NMMU_2M_PAGE_OFFSET_LBN 0
+#define ESF_GZ_NMMU_2M_PAGE_OFFSET_WIDTH 21
+#define ESE_GZ_NMMU_PAGESZ_2M_ADDR_STRUCT_SIZE 64
+
+/* PARAM_TLV */
+#define ESF_GZ_TLV_VALUE_LBN 16
+#define ESF_GZ_TLV_VALUE_WIDTH 8
+#define ESE_GZ_TLV_VALUE_LENMIN 8
+#define ESE_GZ_TLV_VALUE_LENMAX 2040
+#define ESF_GZ_TLV_LEN_LBN 8
+#define ESF_GZ_TLV_LEN_WIDTH 8
+#define ESF_GZ_TLV_TYPE_LBN 0
+#define ESF_GZ_TLV_TYPE_WIDTH 8
+#define ESE_GZ_DP_NMMU_GROUP_SIZE 5
+#define ESE_GZ_DP_EVQ_UNSOL_CREDIT_SEQ_BITS 4
+#define ESE_GZ_DP_TX_EV_NUM_DESCS_BITS 3
+#define ESE_GZ_DP_RX_EV_NUM_PACKETS_BITS 2
+#define ESE_GZ_DP_PARTIAL_TSTAMP_SUB_NANO_BITS 1
+#define ESE_GZ_DP_PAD 0
+#define ESE_GZ_PARAM_TLV_STRUCT_SIZE 24
+
+/* PCI_EXPRESS_XCAP_HDR */
+#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_LBN 20
+#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_WIDTH 12
+#define ESF_GZ_PCI_EXPRESS_XCAP_VER_LBN 16
+#define ESF_GZ_PCI_EXPRESS_XCAP_VER_WIDTH 4
+#define ESE_GZ_PCI_EXPRESS_XCAP_VER_VSEC 1
+#define ESF_GZ_PCI_EXPRESS_XCAP_ID_LBN 0
+#define ESF_GZ_PCI_EXPRESS_XCAP_ID_WIDTH 16
+#define ESE_GZ_PCI_EXPRESS_XCAP_ID_VNDR 0xb
+#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_STRUCT_SIZE 32
+
+/* RHEAD_BASE_EVENT */
+#define ESF_GZ_E_TYPE_LBN 60
+#define ESF_GZ_E_TYPE_WIDTH 4
+#define ESE_GZ_EF100_EV_DRIVER 5
+#define ESE_GZ_EF100_EV_MCDI 4
+#define ESE_GZ_EF100_EV_CONTROL 3
+#define ESE_GZ_EF100_EV_TX_TIMESTAMP 2
+#define ESE_GZ_EF100_EV_TX_COMPLETION 1
+#define ESE_GZ_EF100_EV_RX_PKTS 0
+#define ESF_GZ_EV_EVQ_PHASE_LBN 59
+#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1
+#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64
+
+/* RHEAD_EW_EVENT */
+#define ESF_GZ_EV_256_EV32_PHASE_LBN 255
+#define ESF_GZ_EV_256_EV32_PHASE_WIDTH 1
+#define ESF_GZ_EV_256_EV32_TYPE_LBN 251
+#define ESF_GZ_EV_256_EV32_TYPE_WIDTH 4
+#define ESE_GZ_EF100_EVEW_VIRTQ_DESC 2
+#define ESE_GZ_EF100_EVEW_TXQ_DESC 1
+#define ESE_GZ_EF100_EVEW_64BIT 0
+#define ESE_GZ_RHEAD_EW_EVENT_STRUCT_SIZE 256
+
+/* RX_DESC */
+#define ESF_GZ_RX_BUF_ADDR_LBN 0
+#define ESF_GZ_RX_BUF_ADDR_WIDTH 64
+#define ESE_GZ_RX_DESC_STRUCT_SIZE 64
+
+/* TXQ_DESC_PROXY_EVENT */
+#define ESF_GZ_EV_TXQ_DP_VI_ID_LBN 128
+#define ESF_GZ_EV_TXQ_DP_VI_ID_WIDTH 16
+#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_LBN 0
+#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_WIDTH 128
+#define ESE_GZ_TXQ_DESC_PROXY_EVENT_STRUCT_SIZE 144
+
+/* TX_DESC_TYPE */
+#define ESF_GZ_TX_DESC_TYPE_LBN 124
+#define ESF_GZ_TX_DESC_TYPE_WIDTH 4
+#define ESE_GZ_TX_DESC_TYPE_DESC2CMPT 7
+#define ESE_GZ_TX_DESC_TYPE_MEM2MEM 4
+#define ESE_GZ_TX_DESC_TYPE_SEG 3
+#define ESE_GZ_TX_DESC_TYPE_TSO 2
+#define ESE_GZ_TX_DESC_TYPE_PREFIX 1
+#define ESE_GZ_TX_DESC_TYPE_SEND 0
+#define ESE_GZ_TX_DESC_TYPE_STRUCT_SIZE 128
+
+/* VIRTQ_DESC_PROXY_EVENT */
+#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_LBN 144
+#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_WIDTH 16
+#define ESF_GZ_EV_VQ_DP_VI_ID_LBN 128
+#define ESF_GZ_EV_VQ_DP_VI_ID_WIDTH 16
+#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_LBN 0
+#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_WIDTH 128
+#define ESE_GZ_VIRTQ_DESC_PROXY_EVENT_STRUCT_SIZE 160
+
+/* XIL_CFGBAR_TBL_ENTRY */
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_LBN 96
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_WIDTH 32
+#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_LBN 68
+#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_WIDTH 60
+#define ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT 4
+#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_LBN 67
+#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_WIDTH 29
+#define ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT 4
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_LBN 68
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_WIDTH 28
+#define ESF_GZ_CFGBAR_CONT_CAP_RSV_LBN 67
+#define ESF_GZ_CFGBAR_CONT_CAP_RSV_WIDTH 1
+#define ESF_GZ_CFGBAR_EF100_BAR_LBN 64
+#define ESF_GZ_CFGBAR_EF100_BAR_WIDTH 3
+#define ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID 7
+#define ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM 6
+#define ESF_GZ_CFGBAR_CONT_CAP_BAR_LBN 64
+#define ESF_GZ_CFGBAR_CONT_CAP_BAR_WIDTH 3
+#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID 7
+#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM 6
+#define ESF_GZ_CFGBAR_ENTRY_SIZE_LBN 32
+#define ESF_GZ_CFGBAR_ENTRY_SIZE_WIDTH 32
+#define ESE_GZ_CFGBAR_ENTRY_SIZE_EF100 12
+#define ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE 8
+#define ESF_GZ_CFGBAR_ENTRY_LAST_LBN 28
+#define ESF_GZ_CFGBAR_ENTRY_LAST_WIDTH 1
+#define ESF_GZ_CFGBAR_ENTRY_REV_LBN 20
+#define ESF_GZ_CFGBAR_ENTRY_REV_WIDTH 8
+#define ESE_GZ_CFGBAR_ENTRY_REV_EF100 0
+#define ESF_GZ_CFGBAR_ENTRY_FORMAT_LBN 0
+#define ESF_GZ_CFGBAR_ENTRY_FORMAT_WIDTH 20
+#define ESE_GZ_CFGBAR_ENTRY_LAST 0xfffff
+#define ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR 0xffffe
+#define ESE_GZ_CFGBAR_ENTRY_EF100 0xef100
+#define ESE_GZ_XIL_CFGBAR_TBL_ENTRY_STRUCT_SIZE 128
+
+/* XIL_CFGBAR_VSEC */
+#define ESF_GZ_VSEC_TBL_OFF_HI_LBN 64
+#define ESF_GZ_VSEC_TBL_OFF_HI_WIDTH 32
+#define ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT 32
+#define ESF_GZ_VSEC_TBL_OFF_LO_LBN 36
+#define ESF_GZ_VSEC_TBL_OFF_LO_WIDTH 28
+#define ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT 4
+#define ESF_GZ_VSEC_TBL_BAR_LBN 32
+#define ESF_GZ_VSEC_TBL_BAR_WIDTH 4
+#define ESE_GZ_VSEC_BAR_NUM_INVALID 7
+#define ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM 6
+#define ESF_GZ_VSEC_LEN_LBN 20
+#define ESF_GZ_VSEC_LEN_WIDTH 12
+#define ESE_GZ_VSEC_LEN_HIGH_OFFT 16
+#define ESE_GZ_VSEC_LEN_MIN 12
+#define ESF_GZ_VSEC_VER_LBN 16
+#define ESF_GZ_VSEC_VER_WIDTH 4
+#define ESE_GZ_VSEC_VER_XIL_CFGBAR 0
+#define ESF_GZ_VSEC_ID_LBN 0
+#define ESF_GZ_VSEC_ID_WIDTH 16
+#define ESE_GZ_XILINX_VSEC_ID 0x20
+#define ESE_GZ_XIL_CFGBAR_VSEC_STRUCT_SIZE 96
+
+/* rh_egres_hclass */
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_LBN 15
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_WIDTH 1
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_LBN 13
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_LBN 12
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_WIDTH 1
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_LBN 10
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN 8
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_LBN 5
+#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_WIDTH 3
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_LBN 3
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_LBN 2
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_WIDTH 1
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_LBN 0
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_WIDTH 2
+#define ESE_GZ_RH_EGRES_HCLASS_STRUCT_SIZE 16
+
+/* sf_driver */
+#define ESF_GZ_DRIVER_E_TYPE_LBN 60
+#define ESF_GZ_DRIVER_E_TYPE_WIDTH 4
+#define ESF_GZ_DRIVER_PHASE_LBN 59
+#define ESF_GZ_DRIVER_PHASE_WIDTH 1
+#define ESF_GZ_DRIVER_DATA_LBN 0
+#define ESF_GZ_DRIVER_DATA_WIDTH 59
+#define ESE_GZ_SF_DRIVER_STRUCT_SIZE 64
+
+/* sf_ev_rsvd */
+#define ESF_GZ_EV_RSVD_TBD_NEXT_LBN 34
+#define ESF_GZ_EV_RSVD_TBD_NEXT_WIDTH 3
+#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_LBN 30
+#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_WIDTH 4
+#define ESF_GZ_EV_RSVD_SRC_QID_LBN 18
+#define ESF_GZ_EV_RSVD_SRC_QID_WIDTH 12
+#define ESF_GZ_EV_RSVD_SEQ_NUM_LBN 2
+#define ESF_GZ_EV_RSVD_SEQ_NUM_WIDTH 16
+#define ESF_GZ_EV_RSVD_TBD_LBN 0
+#define ESF_GZ_EV_RSVD_TBD_WIDTH 2
+#define ESE_GZ_SF_EV_RSVD_STRUCT_SIZE 37
+
+/* sf_flush_evnt */
+#define ESF_GZ_EV_FLSH_E_TYPE_LBN 60
+#define ESF_GZ_EV_FLSH_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_FLSH_PHASE_LBN 59
+#define ESF_GZ_EV_FLSH_PHASE_WIDTH 1
+#define ESF_GZ_EV_FLSH_SUB_TYPE_LBN 53
+#define ESF_GZ_EV_FLSH_SUB_TYPE_WIDTH 6
+#define ESF_GZ_EV_FLSH_RSVD_LBN 10
+#define ESF_GZ_EV_FLSH_RSVD_WIDTH 43
+#define ESF_GZ_EV_FLSH_LABEL_LBN 4
+#define ESF_GZ_EV_FLSH_LABEL_WIDTH 6
+#define ESF_GZ_EV_FLSH_FLUSH_TYPE_LBN 0
+#define ESF_GZ_EV_FLSH_FLUSH_TYPE_WIDTH 4
+#define ESE_GZ_SF_FLUSH_EVNT_STRUCT_SIZE 64
+
+/* sf_rx_pkts */
+#define ESF_GZ_EV_RXPKTS_E_TYPE_LBN 60
+#define ESF_GZ_EV_RXPKTS_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_RXPKTS_PHASE_LBN 59
+#define ESF_GZ_EV_RXPKTS_PHASE_WIDTH 1
+#define ESF_GZ_EV_RXPKTS_RSVD_LBN 22
+#define ESF_GZ_EV_RXPKTS_RSVD_WIDTH 37
+#define ESF_GZ_EV_RXPKTS_Q_LABEL_LBN 16
+#define ESF_GZ_EV_RXPKTS_Q_LABEL_WIDTH 6
+#define ESF_GZ_EV_RXPKTS_NUM_PKT_LBN 0
+#define ESF_GZ_EV_RXPKTS_NUM_PKT_WIDTH 16
+#define ESE_GZ_SF_RX_PKTS_STRUCT_SIZE 64
+
+/* sf_rx_prefix */
+#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_LBN 160
+#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16
+#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144
+#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16
+#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128
+#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16
+#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96
+#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32
+#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64
+#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32
+#define ESF_GZ_RX_PREFIX_CLASS_LBN 16
+#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16
+#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15
+#define ESF_GZ_RX_PREFIX_USER_FLAG_WIDTH 1
+#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN 14
+#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_WIDTH 1
+#define ESF_GZ_RX_PREFIX_LENGTH_LBN 0
+#define ESF_GZ_RX_PREFIX_LENGTH_WIDTH 14
+#define ESE_GZ_SF_RX_PREFIX_STRUCT_SIZE 176
+
+/* sf_rxtx_generic */
+#define ESF_GZ_EV_BARRIER_LBN 167
+#define ESF_GZ_EV_BARRIER_WIDTH 1
+#define ESF_GZ_EV_RSVD_LBN 130
+#define ESF_GZ_EV_RSVD_WIDTH 37
+#define ESF_GZ_EV_DPRXY_LBN 129
+#define ESF_GZ_EV_DPRXY_WIDTH 1
+#define ESF_GZ_EV_VIRTIO_LBN 128
+#define ESF_GZ_EV_VIRTIO_WIDTH 1
+#define ESF_GZ_EV_COUNT_LBN 0
+#define ESF_GZ_EV_COUNT_WIDTH 128
+#define ESE_GZ_SF_RXTX_GENERIC_STRUCT_SIZE 168
+
+/* sf_ts_stamp */
+#define ESF_GZ_EV_TS_E_TYPE_LBN 60
+#define ESF_GZ_EV_TS_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_TS_PHASE_LBN 59
+#define ESF_GZ_EV_TS_PHASE_WIDTH 1
+#define ESF_GZ_EV_TS_RSVD_LBN 56
+#define ESF_GZ_EV_TS_RSVD_WIDTH 3
+#define ESF_GZ_EV_TS_STATUS_LBN 54
+#define ESF_GZ_EV_TS_STATUS_WIDTH 2
+#define ESF_GZ_EV_TS_Q_LABEL_LBN 48
+#define ESF_GZ_EV_TS_Q_LABEL_WIDTH 6
+#define ESF_GZ_EV_TS_DESC_ID_LBN 32
+#define ESF_GZ_EV_TS_DESC_ID_WIDTH 16
+#define ESF_GZ_EV_TS_PARTIAL_STAMP_LBN 0
+#define ESF_GZ_EV_TS_PARTIAL_STAMP_WIDTH 32
+#define ESE_GZ_SF_TS_STAMP_STRUCT_SIZE 64
+
+/* sf_tx_cmplt */
+#define ESF_GZ_EV_TXCMPL_E_TYPE_LBN 60
+#define ESF_GZ_EV_TXCMPL_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_TXCMPL_PHASE_LBN 59
+#define ESF_GZ_EV_TXCMPL_PHASE_WIDTH 1
+#define ESF_GZ_EV_TXCMPL_RSVD_LBN 22
+#define ESF_GZ_EV_TXCMPL_RSVD_WIDTH 37
+#define ESF_GZ_EV_TXCMPL_Q_LABEL_LBN 16
+#define ESF_GZ_EV_TXCMPL_Q_LABEL_WIDTH 6
+#define ESF_GZ_EV_TXCMPL_NUM_DESC_LBN 0
+#define ESF_GZ_EV_TXCMPL_NUM_DESC_WIDTH 16
+#define ESE_GZ_SF_TX_CMPLT_STRUCT_SIZE 64
+
+/* sf_tx_desc2cmpt_dsc_fmt */
+#define ESF_GZ_D2C_TGT_VI_ID_LBN 108
+#define ESF_GZ_D2C_TGT_VI_ID_WIDTH 16
+#define ESF_GZ_D2C_CMPT2_LBN 107
+#define ESF_GZ_D2C_CMPT2_WIDTH 1
+#define ESF_GZ_D2C_ABS_VI_ID_LBN 106
+#define ESF_GZ_D2C_ABS_VI_ID_WIDTH 1
+#define ESF_GZ_D2C_ORDERED_LBN 105
+#define ESF_GZ_D2C_ORDERED_WIDTH 1
+#define ESF_GZ_D2C_SKIP_N_LBN 97
+#define ESF_GZ_D2C_SKIP_N_WIDTH 8
+#define ESF_GZ_D2C_RSVD_LBN 64
+#define ESF_GZ_D2C_RSVD_WIDTH 33
+#define ESF_GZ_D2C_COMPLETION_LBN 0
+#define ESF_GZ_D2C_COMPLETION_WIDTH 64
+#define ESE_GZ_SF_TX_DESC2CMPT_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_mem2mem_dsc_fmt */
+#define ESF_GZ_M2M_ADDR_SPC_EN_LBN 123
+#define ESF_GZ_M2M_ADDR_SPC_EN_WIDTH 1
+#define ESF_GZ_M2M_TRANSLATE_ADDR_LBN 122
+#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1
+#define ESF_GZ_M2M_RSVD_LBN 120
+#define ESF_GZ_M2M_RSVD_WIDTH 2
+#define ESF_GZ_M2M_ADDR_SPC_LBN 108
+#define ESF_GZ_M2M_ADDR_SPC_WIDTH 12
+#define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86
+#define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22
+#define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84
+#define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64
+#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20
+#define ESF_GZ_M2M_ADDR_LBN 0
+#define ESF_GZ_M2M_ADDR_WIDTH 64
+#define ESE_GZ_SF_TX_MEM2MEM_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_ovr_dsc_fmt */
+#define ESF_GZ_TX_PREFIX_MARK_EN_LBN 123
+#define ESF_GZ_TX_PREFIX_MARK_EN_WIDTH 1
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_LBN 122
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_WIDTH 1
+#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_LBN 121
+#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_WIDTH 1
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_LBN 120
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_WIDTH 1
+#define ESF_GZ_TX_PREFIX_RSRVD_LBN 64
+#define ESF_GZ_TX_PREFIX_RSRVD_WIDTH 56
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_LBN 48
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_WIDTH 16
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_LBN 32
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_WIDTH 16
+#define ESF_GZ_TX_PREFIX_MARK_LBN 0
+#define ESF_GZ_TX_PREFIX_MARK_WIDTH 32
+#define ESE_GZ_SF_TX_OVR_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_seg_dsc_fmt */
+#define ESF_GZ_TX_SEG_ADDR_SPC_EN_LBN 123
+#define ESF_GZ_TX_SEG_ADDR_SPC_EN_WIDTH 1
+#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_LBN 122
+#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1
+#define ESF_GZ_TX_SEG_RSVD2_LBN 120
+#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2
+#define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108
+#define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12
+#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86
+#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22
+#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84
+#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_TX_SEG_RSVD_LBN 80
+#define ESF_GZ_TX_SEG_RSVD_WIDTH 4
+#define ESF_GZ_TX_SEG_LEN_LBN 64
+#define ESF_GZ_TX_SEG_LEN_WIDTH 16
+#define ESF_GZ_TX_SEG_ADDR_LBN 0
+#define ESF_GZ_TX_SEG_ADDR_WIDTH 64
+#define ESE_GZ_SF_TX_SEG_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_std_dsc_fmt */
+#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_LBN 108
+#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_WIDTH 16
+#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_LBN 107
+#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_WIDTH 1
+#define ESF_GZ_TX_SEND_TSTAMP_REQ_LBN 106
+#define ESF_GZ_TX_SEND_TSTAMP_REQ_WIDTH 1
+#define ESF_GZ_TX_SEND_CSO_OUTER_L4_LBN 105
+#define ESF_GZ_TX_SEND_CSO_OUTER_L4_WIDTH 1
+#define ESF_GZ_TX_SEND_CSO_OUTER_L3_LBN 104
+#define ESF_GZ_TX_SEND_CSO_OUTER_L3_WIDTH 1
+#define ESF_GZ_TX_SEND_CSO_INNER_L3_LBN 101
+#define ESF_GZ_TX_SEND_CSO_INNER_L3_WIDTH 3
+#define ESF_GZ_TX_SEND_RSVD_LBN 99
+#define ESF_GZ_TX_SEND_RSVD_WIDTH 2
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_LBN 97
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_WIDTH 2
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_LBN 92
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_WIDTH 5
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_LBN 83
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_WIDTH 9
+#define ESF_GZ_TX_SEND_NUM_SEGS_LBN 78
+#define ESF_GZ_TX_SEND_NUM_SEGS_WIDTH 5
+#define ESF_GZ_TX_SEND_LEN_LBN 64
+#define ESF_GZ_TX_SEND_LEN_WIDTH 14
+#define ESF_GZ_TX_SEND_ADDR_LBN 0
+#define ESF_GZ_TX_SEND_ADDR_WIDTH 64
+#define ESE_GZ_SF_TX_STD_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_tso_dsc_fmt */
+#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_LBN 108
+#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_WIDTH 16
+#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_LBN 107
+#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_WIDTH 1
+#define ESF_GZ_TX_TSO_TSTAMP_REQ_LBN 106
+#define ESF_GZ_TX_TSO_TSTAMP_REQ_WIDTH 1
+#define ESF_GZ_TX_TSO_CSO_OUTER_L4_LBN 105
+#define ESF_GZ_TX_TSO_CSO_OUTER_L4_WIDTH 1
+#define ESF_GZ_TX_TSO_CSO_OUTER_L3_LBN 104
+#define ESF_GZ_TX_TSO_CSO_OUTER_L3_WIDTH 1
+#define ESF_GZ_TX_TSO_CSO_INNER_L3_LBN 101
+#define ESF_GZ_TX_TSO_CSO_INNER_L3_WIDTH 3
+#define ESF_GZ_TX_TSO_RSVD_LBN 94
+#define ESF_GZ_TX_TSO_RSVD_WIDTH 7
+#define ESF_GZ_TX_TSO_CSO_INNER_L4_LBN 93
+#define ESF_GZ_TX_TSO_CSO_INNER_L4_WIDTH 1
+#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_LBN 85
+#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_WIDTH 8
+#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_LBN 77
+#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_WIDTH 8
+#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_LBN 69
+#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_WIDTH 8
+#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_LBN 64
+#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_WIDTH 5
+#define ESF_GZ_TX_TSO_PAYLOAD_LEN_LBN 42
+#define ESF_GZ_TX_TSO_PAYLOAD_LEN_WIDTH 22
+#define ESF_GZ_TX_TSO_HDR_LEN_W_LBN 34
+#define ESF_GZ_TX_TSO_HDR_LEN_W_WIDTH 8
+#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_LBN 33
+#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_WIDTH 1
+#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_LBN 32
+#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_WIDTH 1
+#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_LBN 31
+#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_WIDTH 1
+#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_LBN 29
+#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_WIDTH 2
+#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_LBN 27
+#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_WIDTH 2
+#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_LBN 17
+#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_WIDTH 10
+#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_LBN 14
+#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_WIDTH 3
+#define ESF_GZ_TX_TSO_MSS_LBN 0
+#define ESF_GZ_TX_TSO_MSS_WIDTH 14
+#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124
+
+
+/* Enum DESIGN_PARAMS */
+#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17
+#define ESE_EF100_DP_GZ_VI_STRIDES 16
+#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES 15
+#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS 14
+#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN 13
+#define ESE_EF100_DP_GZ_COMPAT 12
+#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES 11
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS 10
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN 9
+#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY 8
+#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY 7
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS 6
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN 5
+#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS 4
+#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE 3
+#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS 2
+#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS 1
+#define ESE_EF100_DP_GZ_PAD 0
+
+/* Enum DESIGN_PARAM_DEFAULTS */
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT 0x3fffff
+#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT 8192
+#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN_DEFAULT 8192
+#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS_DEFAULT 0x1106
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT 0x3ff
+#define ESE_EF100_DP_GZ_RX_MAX_RUNT_DEFAULT 640
+#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS_DEFAULT 512
+#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES_DEFAULT 512
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT 192
+#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY_DEFAULT 64
+#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY_DEFAULT 64
+#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE_DEFAULT 32
+#define ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT 16
+#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS_DEFAULT 7
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT 4
+#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS_DEFAULT 2
+#define ESE_EF100_DP_GZ_COMPAT_DEFAULT 0
+
+/* Enum HOST_IF_CONSTANTS */
+#define ESE_GZ_FCW_LEN 0x4C
+#define ESE_GZ_RX_PKT_PREFIX_LEN 22
+
+/* Enum PCI_CONSTANTS */
+#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256
+#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4
+
+/* Enum RH_HCLASS_L2_CLASS */
+#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1
+#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0
+
+/* Enum RH_HCLASS_L2_STATUS */
+#define ESE_GZ_RH_HCLASS_L2_STATUS_RESERVED 3
+#define ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR 2
+#define ESE_GZ_RH_HCLASS_L2_STATUS_LEN_ERR 1
+#define ESE_GZ_RH_HCLASS_L2_STATUS_OK 0
+
+/* Enum RH_HCLASS_L3_CLASS */
+#define ESE_GZ_RH_HCLASS_L3_CLASS_OTHER 3
+#define ESE_GZ_RH_HCLASS_L3_CLASS_IP6 2
+#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD 1
+#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD 0
+
+/* Enum RH_HCLASS_L4_CLASS */
+#define ESE_GZ_RH_HCLASS_L4_CLASS_OTHER 3
+#define ESE_GZ_RH_HCLASS_L4_CLASS_FRAG 2
+#define ESE_GZ_RH_HCLASS_L4_CLASS_UDP 1
+#define ESE_GZ_RH_HCLASS_L4_CLASS_TCP 0
+
+/* Enum RH_HCLASS_L4_CSUM */
+#define ESE_GZ_RH_HCLASS_L4_CSUM_GOOD 1
+#define ESE_GZ_RH_HCLASS_L4_CSUM_BAD_OR_UNKNOWN 0
+
+/* Enum RH_HCLASS_TUNNEL_CLASS */
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_7 7
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_6 6
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_5 5
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_4 4
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE 3
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE 2
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0
+
+/* Enum TX_DESC_CSO_PARTIAL_EN */
+#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2
+#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1
+#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF 0
+
+/* Enum TX_DESC_CS_INNER_L3 */
+#define ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE 3
+#define ESE_GZ_TX_DESC_CS_INNER_L3_NVGRE 2
+#define ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN 1
+#define ESE_GZ_TX_DESC_CS_INNER_L3_OFF 0
+
+/* Enum TX_DESC_IP4_ID */
+#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2
+#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1
+#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0
+/**************************************************************************/
+
+#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44
+#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_WIDTH 4
+#define ESF_GZ_EV_DEBUG_SRC_QID_LBN 32
+#define ESF_GZ_EV_DEBUG_SRC_QID_WIDTH 12
+#define ESF_GZ_EV_DEBUG_SEQ_NUM_LBN 16
+#define ESF_GZ_EV_DEBUG_SEQ_NUM_WIDTH 16
+
+#endif /* EFX_EF100_REGS_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
new file mode 100644
index 000000000000..4223a38f46d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef100_rx.h"
+#include "rx_common.h"
+#include "efx.h"
+
+/* RX stubs */
+
+void ef100_rx_write(struct efx_rx_queue *rx_queue)
+{
+}
+
+void __ef100_rx_packet(struct efx_channel *channel)
+{
+ /* Stub. No RX path yet. Discard the buffer. */
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue,
+ channel->rx_pkt_index);
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->rx_pkt_n_frags = 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h
new file mode 100644
index 000000000000..b5dadf741aa0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rx.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF100_RX_H
+#define EFX_EF100_RX_H
+
+#include "net_driver.h"
+
+void ef100_rx_write(struct efx_rx_queue *rx_queue);
+void __ef100_rx_packet(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
new file mode 100644
index 000000000000..15e646f8c3e0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "tx_common.h"
+#include "nic_common.h"
+#include "ef100_tx.h"
+
+/* TX queue stubs */
+int ef100_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ return 0;
+}
+
+void ef100_tx_init(struct efx_tx_queue *tx_queue)
+{
+ /* must be the inverse of lookup in efx_get_tx_channel */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(tx_queue->efx->net_dev,
+ tx_queue->channel->channel -
+ tx_queue->efx->tx_channel_offset);
+}
+
+void ef100_tx_write(struct efx_tx_queue *tx_queue)
+{
+}
+
+/* Add a socket buffer to a TX queue
+ *
+ * You must hold netif_tx_lock() to call this function.
+ *
+ * Returns 0 on success, error code otherwise. In case of an error this
+ * function will free the SKB.
+ */
+int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ /* Stub. No TX path yet. */
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_stop_queue(efx->net_dev);
+ dev_kfree_skb_any(skb);
+ return -ENODEV;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
new file mode 100644
index 000000000000..9a472f7aff43
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF100_TX_H
+#define EFX_EF100_TX_H
+
+#include "net_driver.h"
+
+int ef100_tx_probe(struct efx_tx_queue *tx_queue);
+void ef100_tx_init(struct efx_tx_queue *tx_queue);
+void ef100_tx_write(struct efx_tx_queue *tx_queue);
+
+netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+#endif
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index f16b4f236031..f5aa1bd02f19 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -25,6 +25,7 @@
#include "efx.h"
#include "efx_common.h"
#include "efx_channels.h"
+#include "ef100.h"
#include "rx_common.h"
#include "tx_common.h"
#include "nic.h"
@@ -1360,8 +1361,14 @@ static int __init efx_init_module(void)
if (rc < 0)
goto err_pci;
+ rc = pci_register_driver(&ef100_pci_driver);
+ if (rc < 0)
+ goto err_pci_ef100;
+
return 0;
+ err_pci_ef100:
+ pci_unregister_driver(&efx_pci_driver);
err_pci:
efx_destroy_reset_workqueue();
err_reset:
@@ -1378,6 +1385,7 @@ static void __exit efx_exit_module(void)
{
printk(KERN_INFO "Solarflare NET driver unloading\n");
+ pci_unregister_driver(&ef100_pci_driver);
pci_unregister_driver(&efx_pci_driver);
efx_destroy_reset_workqueue();
#ifdef CONFIG_SFC_SRIOV
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index e7e7d8d1a07b..a9808e86068d 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -8,7 +8,10 @@
#ifndef EFX_EFX_H
#define EFX_EFX_H
+#include <linux/indirect_call_wrapper.h>
#include "net_driver.h"
+#include "ef100_rx.h"
+#include "ef100_tx.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@@ -18,13 +21,18 @@ int efx_net_stop(struct net_device *net_dev);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
-netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
+ ef100_enqueue_skb, __efx_enqueue_skb,
+ tx_queue, skb);
+}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
extern unsigned int efx_piobuf_size;
-extern bool efx_separate_tx_channels;
/* RX */
void __efx_rx_packet(struct efx_channel *channel);
@@ -33,7 +41,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
- __efx_rx_packet(channel);
+ INDIRECT_CALL_2(channel->efx->type->rx_packet,
+ __ef100_rx_packet, __efx_rx_packet,
+ channel);
}
/* Maximum number of TCP segments we support for soft-TSO */
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 5667694c6514..26b05ec4e712 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -814,14 +814,18 @@ fail:
*/
int efx_reset(struct efx_nic *efx, enum reset_type method)
{
+ int rc, rc2 = 0;
bool disabled;
- int rc, rc2;
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
efx_device_detach_sync(efx);
- efx_reset_down(efx, method);
+ /* efx_reset_down() grabs locks that prevent recovery on EF100.
+ * EF100 reset is handled in the efx_nic_type callback below.
+ */
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
@@ -849,7 +853,8 @@ out:
disabled = rc ||
method == RESET_TYPE_DISABLE ||
method == RESET_TYPE_RECOVER_OR_DISABLE;
- rc2 = efx_reset_up(efx, method, !disabled);
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
if (!rc)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 9828516bd82d..4ffda7782f68 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -221,8 +221,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
return 0;
}
-const char *efx_driver_name = KBUILD_MODNAME;
-
const struct ethtool_ops efx_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
@@ -232,7 +230,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_regs = efx_ethtool_get_regs,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
- .nway_reset = efx_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index e9a5a66529bf..05ac87807929 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -104,7 +104,7 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, efx_driver_name, sizeof(info->driver));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
@@ -173,14 +173,6 @@ fail:
test->flags |= ETH_TEST_FL_FAILED;
}
-/* Restart autonegotiation */
-int efx_ethtool_nway_reset(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- return mdio45_nway_restart(&efx->mdio);
-}
-
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 3f3aaa92fbb5..659491932101 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -11,15 +11,12 @@
#ifndef EFX_ETHTOOL_COMMON_H
#define EFX_ETHTOOL_COMMON_H
-extern const char *efx_driver_name;
-
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info);
u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data);
-int efx_ethtool_nway_reset(struct net_device *net_dev);
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause);
int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index c3c011bc6a68..30439cc83a89 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -75,6 +75,11 @@
#endif
#endif
+static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
+{
+ return efx->reg_base + reg;
+}
+
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
unsigned int reg)
@@ -217,8 +222,11 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* default VI stride (step between per-VI registers) is 8K */
-#define EFX_DEFAULT_VI_STRIDE 0x2000
+/* default VI stride (step between per-VI registers) is 8K on EF10 and
+ * 64K on EF100
+ */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
+#define EF100_DEFAULT_VI_STRIDE 0x10000
/* Calculate offset to page-mapped register */
static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
@@ -265,7 +273,9 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
- BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ BUILD_BUG_ON_ZERO((reg) != 0x180 && \
+ (reg) != 0x200 && \
+ (reg) != 0x400 && \
(reg) != 0x420 && \
(reg) != 0x830 && \
(reg) != 0x83c && \
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 6c49740a178e..5467819aef6e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1337,7 +1337,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_process_link_change(efx, event);
break;
case MCDI_EVENT_CODE_SENSOREVT:
- efx_mcdi_sensor_event(efx, event);
+ efx_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
netif_dbg(efx, hw, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index e053adfe82b0..658cf345420d 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -327,10 +327,10 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
#define MCDI_CAPABILITY(field) \
- MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _LBN
+ MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN
#define MCDI_CAPABILITY_OFST(field) \
- MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST
+ MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST
#define efx_has_cap(efx, field) \
efx->type->check_caps(efx, \
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 786fda559976..9791fac0b649 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -963,7 +963,9 @@ struct efx_async_filter_insertion {
* @vpd_sn: Serial number read from VPD
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
* xdp_rxq_info structures?
+ * @netdev_notifier: Netdevice notifier.
* @mem_bar: The BAR that is mapped into membase.
+ * @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1141,7 +1143,10 @@ struct efx_nic {
char *vpd_sn;
bool xdp_rxq_info_failed;
+ struct notifier_block netdev_notifier;
+
unsigned int mem_bar;
+ u32 reg_base;
/* The following fields may be written more often */
@@ -1244,6 +1249,7 @@ struct efx_udp_tunnel {
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
+ * @tx_enqueue: Add an SKB to TX queue
* @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
* @rx_push_rss_context_config: Write RSS hash key and indirection table for
@@ -1255,6 +1261,7 @@ struct efx_udp_tunnel {
* @rx_remove: Free resources for RX queue
* @rx_write: Write RX descriptors and doorbell
* @rx_defer_refill: Generate a refill reminder event
+ * @rx_packet: Receive the queued RX buffer on a channel
* @ev_probe: Allocate resources for event queue
* @ev_init: Initialise event queue on the NIC
* @ev_fini: Deinitialise event queue on the NIC
@@ -1299,6 +1306,7 @@ struct efx_udp_tunnel {
* @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @print_additional_fwver: Dump NIC-specific additional FW version info
+ * @sensor_event: Handle a sensor event from MCDI
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1379,6 +1387,7 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
+ netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
@@ -1396,6 +1405,7 @@ struct efx_nic_type {
void (*rx_remove)(struct efx_rx_queue *rx_queue);
void (*rx_write)(struct efx_rx_queue *rx_queue);
void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ void (*rx_packet)(struct efx_channel *channel);
int (*ev_probe)(struct efx_channel *channel);
int (*ev_init)(struct efx_channel *channel);
void (*ev_fini)(struct efx_channel *channel);
@@ -1470,6 +1480,7 @@ struct efx_nic_type {
bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
size_t len);
+ void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1521,6 +1532,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
_channel = _channel->channel ? \
(_efx)->channel[_channel->channel - 1] : NULL)
+static inline struct efx_channel *
+efx_get_tx_channel(struct efx_nic *efx, unsigned int index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels);
+ return efx->channel[efx->tx_channel_offset + index];
+}
+
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index e04b6817cde3..974107354087 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -225,6 +225,12 @@ void efx_nic_event_test_start(struct efx_channel *channel);
bool efx_nic_event_present(struct efx_channel *channel);
+static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ if (efx->type->sensor_event)
+ efx->type->sensor_event(efx, ev);
+}
+
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 393b7cbac8b2..bea4725a4499 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1154,17 +1154,15 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
/* Drop time-expired events */
spin_lock_bh(&ptp->evt_lock);
- if (!list_empty(&ptp->evt_list)) {
- list_for_each_safe(cursor, next, &ptp->evt_list) {
- struct efx_ptp_event_rx *evt;
-
- evt = list_entry(cursor, struct efx_ptp_event_rx,
- link);
- if (time_after(jiffies, evt->expiry)) {
- list_move(&evt->link, &ptp->evt_free_list);
- netif_warn(efx, hw, efx->net_dev,
- "PTP rx event dropped\n");
- }
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx,
+ link);
+ if (time_after(jiffies, evt->expiry)) {
+ list_move(&evt->link, &ptp->evt_free_list);
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP rx event dropped\n");
}
}
spin_unlock_bh(&ptp->evt_lock);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 219fb3a0c9d0..a7ea630bb5e6 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1018,6 +1018,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
.rx_push_rss_config = siena_rx_push_rss_config,
.rx_pull_rss_config = siena_rx_pull_rss_config,
.rx_probe = efx_farch_rx_probe,
@@ -1025,6 +1026,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
.ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini,
@@ -1096,4 +1098,5 @@ const struct efx_nic_type siena_a0_nic_type = {
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
.rx_hash_key_size = 16,
.check_caps = siena_check_caps,
+ .sensor_event = efx_mcdi_sensor_event,
};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 1bcf50ab95d9..727201d5eb24 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -284,7 +284,7 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
* Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function.
*/
-netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
bool xmit_more = netdev_xmit_more();
@@ -503,7 +503,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
}
tx_queue = efx_get_tx_queue(efx, index, type);
- return efx_enqueue_skb(tx_queue, skb);
+ return __efx_enqueue_skb(tx_queue, skb);
}
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index cbe995b024a6..bbab7f248250 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -40,4 +40,6 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+
+extern bool efx_separate_tx_channels;
#endif
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index c7fade836d83..5fbe9eae84d1 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -231,7 +231,7 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
ISM_DMB_BIT_OFFSET);
if (bit == ISM_NR_DMBS)
- return -ENOMEM;
+ return -ENOSPC;
dmb->sba_idx = bit;
}
diff --git a/include/uapi/linux/hsr_netlink.h b/include/uapi/linux/hsr_netlink.h
index c218ef9c35dd..d540ea9bbef4 100644
--- a/include/uapi/linux/hsr_netlink.h
+++ b/include/uapi/linux/hsr_netlink.h
@@ -17,7 +17,7 @@
/* Generic Netlink HSR family definition
*/
-/* attributes */
+/* attributes for HSR or PRP node */
enum {
HSR_A_UNSPEC,
HSR_A_NODE_ADDR,
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index af8f31987526..63af64646358 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -907,7 +907,14 @@ enum {
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
-/* HSR section */
+/* HSR/PRP section, both uses same interface */
+
+/* Different redundancy protocols for hsr device */
+enum {
+ HSR_PROTOCOL_HSR,
+ HSR_PROTOCOL_PRP,
+ HSR_PROTOCOL_MAX,
+};
enum {
IFLA_HSR_UNSPEC,
@@ -917,6 +924,9 @@ enum {
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
IFLA_HSR_VERSION, /* HSR version */
+ IFLA_HSR_PROTOCOL, /* Indicate different protocol than
+ * HSR. For example PRP.
+ */
__IFLA_HSR_MAX,
};
diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig
index 8095b034e76e..1b048c17b6c8 100644
--- a/net/hsr/Kconfig
+++ b/net/hsr/Kconfig
@@ -4,24 +4,35 @@
#
config HSR
- tristate "High-availability Seamless Redundancy (HSR)"
+ tristate "High-availability Seamless Redundancy (HSR & PRP)"
help
+ This enables IEC 62439 defined High-availability Seamless
+ Redundancy (HSR) and Parallel Redundancy Protocol (PRP).
+
If you say Y here, then your Linux box will be able to act as a
- DANH ("Doubly attached node implementing HSR"). For this to work,
- your Linux box needs (at least) two physical Ethernet interfaces,
- and it must be connected as a node in a ring network together with
- other HSR capable nodes.
+ DANH ("Doubly attached node implementing HSR") or DANP ("Doubly
+ attached node implementing PRP"). For this to work, your Linux box
+ needs (at least) two physical Ethernet interfaces.
+
+ For DANH, it must be connected as a node in a ring network together
+ with other HSR capable nodes. All Ethernet frames sent over the HSR
+ device will be sent in both directions on the ring (over both slave
+ ports), giving a redundant, instant fail-over network. Each HSR node
+ in the ring acts like a bridge for HSR frames, but filters frames
+ that have been forwarded earlier.
- All Ethernet frames sent over the hsr device will be sent in both
- directions on the ring (over both slave ports), giving a redundant,
- instant fail-over network. Each HSR node in the ring acts like a
- bridge for HSR frames, but filters frames that have been forwarded
- earlier.
+ For DANP, it must be connected as a node connecting to two
+ separate networks over the two slave interfaces. Like HSR, Ethernet
+ frames sent over the PRP device will be sent to both networks giving
+ a redundant, instant fail-over network. Unlike HSR, PRP networks
+ can have Singly Attached Nodes (SAN) such as PC, printer, bridges
+ etc and will be able to communicate with DANP nodes.
This code is a "best effort" to comply with the HSR standard as
described in IEC 62439-3:2010 (HSRv0) and IEC 62439-3:2012 (HSRv1),
- but no compliancy tests have been made. Use iproute2 to select
- the version you desire.
+ and PRP standard described in IEC 62439-4:2012 (PRP), but no
+ compliancy tests have been made. Use iproute2 to select the protocol
+ you would like to use.
You need to perform any and all necessary tests yourself before
relying on this code in a safety critical system!
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index 9787ef11ca71..3b6f675bd55a 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -1,5 +1,5 @@
/*
- * hsr_debugfs code
+ * debugfs code for HSR & PRP
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Author(s):
@@ -24,7 +24,7 @@ static struct dentry *hsr_debugfs_root_dir;
static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
{
- seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
+ seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x ",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
}
@@ -35,20 +35,32 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
struct hsr_node *node;
- seq_puts(sfp, "Node Table entries\n");
- seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], ");
- seq_puts(sfp, "time_in[B], Address-B port\n");
+ seq_printf(sfp, "Node Table entries for (%s) device\n",
+ (priv->prot_version == PRP_V1 ? "PRP" : "HSR"));
+ seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], ");
+ seq_puts(sfp, "time_in[B], Address-B port, ");
+ if (priv->prot_version == PRP_V1)
+ seq_puts(sfp, "SAN-A, SAN-B, DAN-P\n");
+ else
+ seq_puts(sfp, "DAN-H\n");
+
rcu_read_lock();
list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
/* skip self node */
if (hsr_addr_is_self(priv, node->macaddress_A))
continue;
print_mac_address(sfp, &node->macaddress_A[0]);
- seq_puts(sfp, " ");
print_mac_address(sfp, &node->macaddress_B[0]);
- seq_printf(sfp, "0x%lx, ", node->time_in[HSR_PT_SLAVE_A]);
- seq_printf(sfp, "0x%lx ", node->time_in[HSR_PT_SLAVE_B]);
- seq_printf(sfp, "0x%x\n", node->addr_B_port);
+ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
+ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
+ seq_printf(sfp, "%14x, ", node->addr_B_port);
+
+ if (priv->prot_version == PRP_V1)
+ seq_printf(sfp, "%5x, %5x, %5x\n",
+ node->san_a, node->san_b,
+ (node->san_a == 0 && node->san_b == 0));
+ else
+ seq_printf(sfp, "%5x\n", 1);
}
rcu_read_unlock();
return 0;
@@ -57,7 +69,8 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
/* hsr_node_table_open - Open the node_table file
*
* Description:
- * This routine opens a debugfs file node_table of specific hsr device
+ * This routine opens a debugfs file node_table of specific hsr
+ * or prp device
*/
static int
hsr_node_table_open(struct inode *inode, struct file *filp)
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 8a927b647829..ab953a1a0d6c 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -3,9 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
- *
* This file contains device methods for creating, using and destroying
- * virtual HSR devices.
+ * virtual HSR or PRP devices.
*/
#include <linux/netdevice.h>
@@ -231,66 +230,103 @@ static const struct header_ops hsr_header_ops = {
.parse = eth_header_parse,
};
-static void send_hsr_supervision_frame(struct hsr_port *master,
- u8 type, u8 hsr_ver)
+static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto)
{
+ struct hsr_priv *hsr = master->hsr;
struct sk_buff *skb;
int hlen, tlen;
- struct hsr_tag *hsr_tag;
- struct hsr_sup_tag *hsr_stag;
- struct hsr_sup_payload *hsr_sp;
- unsigned long irqflags;
hlen = LL_RESERVED_SPACE(master->dev);
tlen = master->dev->needed_tailroom;
+ /* skb size is same for PRP/HSR frames, only difference
+ * being, for PRP it is a trailer and for HSR it is a
+ * header
+ */
skb = dev_alloc_skb(sizeof(struct hsr_tag) +
sizeof(struct hsr_sup_tag) +
sizeof(struct hsr_sup_payload) + hlen + tlen);
if (!skb)
- return;
+ return skb;
skb_reserve(skb, hlen);
-
skb->dev = master->dev;
- skb->protocol = htons(hsr_ver ? ETH_P_HSR : ETH_P_PRP);
+ skb->protocol = htons(proto);
skb->priority = TC_PRIO_CONTROL;
- if (dev_hard_header(skb, skb->dev, (hsr_ver ? ETH_P_HSR : ETH_P_PRP),
- master->hsr->sup_multicast_addr,
+ if (dev_hard_header(skb, skb->dev, proto,
+ hsr->sup_multicast_addr,
skb->dev->dev_addr, skb->len) <= 0)
goto out;
+
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
- if (hsr_ver > 0) {
+ return skb;
+out:
+ kfree_skb(skb);
+
+ return NULL;
+}
+
+static void send_hsr_supervision_frame(struct hsr_port *master,
+ unsigned long *interval)
+{
+ struct hsr_priv *hsr = master->hsr;
+ __u8 type = HSR_TLV_LIFE_CHECK;
+ struct hsr_tag *hsr_tag = NULL;
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_sup_tag *hsr_stag;
+ unsigned long irqflags;
+ struct sk_buff *skb;
+ u16 proto;
+
+ *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ if (hsr->announce_count < 3 && hsr->prot_version == 0) {
+ type = HSR_TLV_ANNOUNCE;
+ *interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ hsr->announce_count++;
+ }
+
+ if (!hsr->prot_version)
+ proto = ETH_P_PRP;
+ else
+ proto = ETH_P_HSR;
+
+ skb = hsr_init_skb(master, proto);
+ if (!skb) {
+ WARN_ONCE(1, "HSR: Could not send supervision frame\n");
+ return;
+ }
+
+ if (hsr->prot_version > 0) {
hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
hsr_tag->encap_proto = htons(ETH_P_PRP);
set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
}
hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
- set_hsr_stag_path(hsr_stag, (hsr_ver ? 0x0 : 0xf));
- set_hsr_stag_HSR_ver(hsr_stag, hsr_ver);
+ set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
+ set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
/* From HSRv1 on we have separate supervision sequence numbers. */
spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
- if (hsr_ver > 0) {
- hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr);
- hsr_tag->sequence_nr = htons(master->hsr->sequence_nr);
- master->hsr->sup_sequence_nr++;
- master->hsr->sequence_nr++;
+ if (hsr->prot_version > 0) {
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ hsr->sup_sequence_nr++;
+ hsr_tag->sequence_nr = htons(hsr->sequence_nr);
+ hsr->sequence_nr++;
} else {
- hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
- master->hsr->sequence_nr++;
+ hsr_stag->sequence_nr = htons(hsr->sequence_nr);
+ hsr->sequence_nr++;
}
spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
hsr_stag->HSR_TLV_type = type;
/* TODO: Why 12 in HSRv0? */
- hsr_stag->HSR_TLV_length =
- hsr_ver ? sizeof(struct hsr_sup_payload) : 12;
+ hsr_stag->HSR_TLV_length = hsr->prot_version ?
+ sizeof(struct hsr_sup_payload) : 12;
/* Payload: MacAddressA */
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
@@ -300,11 +336,57 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
return;
hsr_forward_skb(skb, master);
+
return;
+}
-out:
- WARN_ONCE(1, "HSR: Could not send supervision frame\n");
- kfree_skb(skb);
+static void send_prp_supervision_frame(struct hsr_port *master,
+ unsigned long *interval)
+{
+ struct hsr_priv *hsr = master->hsr;
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_sup_tag *hsr_stag;
+ unsigned long irqflags;
+ struct sk_buff *skb;
+ struct prp_rct *rct;
+ u8 *tail;
+
+ skb = hsr_init_skb(master, ETH_P_PRP);
+ if (!skb) {
+ WARN_ONCE(1, "PRP: Could not send supervision frame\n");
+ return;
+ }
+
+ *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
+ set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
+ set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));
+
+ /* From HSRv1 on we have separate supervision sequence numbers. */
+ spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ hsr->sup_sequence_nr++;
+ hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
+ hsr_stag->HSR_TLV_length = sizeof(struct hsr_sup_payload);
+
+ /* Payload: MacAddressA */
+ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
+ ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+
+ if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) {
+ spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
+ return;
+ }
+
+ tail = skb_tail_pointer(skb) - HSR_HLEN;
+ rct = (struct prp_rct *)tail;
+ rct->PRP_suffix = htons(ETH_P_PRP);
+ set_prp_LSDU_size(rct, HSR_V1_SUP_LSDUSIZE);
+ rct->sequence_nr = htons(hsr->sequence_nr);
+ hsr->sequence_nr++;
+ spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
+
+ hsr_forward_skb(skb, master);
}
/* Announce (supervision frame) timer function
@@ -319,19 +401,7 @@ static void hsr_announce(struct timer_list *t)
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
-
- if (hsr->announce_count < 3 && hsr->prot_version == 0) {
- send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE,
- hsr->prot_version);
- hsr->announce_count++;
-
- interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
- } else {
- send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
- hsr->prot_version);
-
- interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
- }
+ hsr->proto_ops->send_sv_frame(master, &interval);
if (is_admin_up(master->dev))
mod_timer(&hsr->announce_timer, jiffies + interval);
@@ -368,6 +438,24 @@ static struct device_type hsr_type = {
.name = "hsr",
};
+static struct hsr_proto_ops hsr_ops = {
+ .send_sv_frame = send_hsr_supervision_frame,
+ .create_tagged_frame = hsr_create_tagged_frame,
+ .get_untagged_frame = hsr_get_untagged_frame,
+ .fill_frame_info = hsr_fill_frame_info,
+ .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
+};
+
+static struct hsr_proto_ops prp_ops = {
+ .send_sv_frame = send_prp_supervision_frame,
+ .create_tagged_frame = prp_create_tagged_frame,
+ .get_untagged_frame = prp_get_untagged_frame,
+ .drop_frame = prp_drop_frame,
+ .fill_frame_info = prp_fill_frame_info,
+ .handle_san_frame = prp_handle_san_frame,
+ .update_san_info = prp_update_san_info,
+};
+
void hsr_dev_setup(struct net_device *dev)
{
eth_hw_addr_random(dev);
@@ -427,6 +515,17 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
+ /* initialize protocol specific functions */
+ if (protocol_version == PRP_V1) {
+ /* For PRP, lan_id has most significant 3 bits holding
+ * the net_id of PRP_LAN_ID
+ */
+ hsr->net_id = PRP_LAN_ID << 1;
+ hsr->proto_ops = &prp_ops;
+ } else {
+ hsr->proto_ops = &hsr_ops;
+ }
+
/* Make sure we recognize frames from ourselves in hsr_rcv() */
res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
slave[1]->dev_addr);
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
index b8f9262ed101..868373822ee4 100644
--- a/net/hsr/hsr_device.h
+++ b/net/hsr/hsr_device.h
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
*/
#ifndef __HSR_DEVICE_H
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index ab8dca0c0b65..cadfccd7876e 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * Frame router for HSR and PRP.
*/
#include "hsr_forward.h"
@@ -15,18 +17,6 @@
struct hsr_node;
-struct hsr_frame_info {
- struct sk_buff *skb_std;
- struct sk_buff *skb_hsr;
- struct hsr_port *port_rcv;
- struct hsr_node *node_src;
- u16 sequence_nr;
- bool is_supervision;
- bool is_vlan;
- bool is_local_dest;
- bool is_local_exclusive;
-};
-
/* The uses I can see for these HSR supervision frames are:
* 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
* 22") to reset any sequence_nr counters belonging to that node. Useful if
@@ -74,7 +64,9 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
}
if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
- hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK)
+ hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
+ hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
+ hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
return false;
if (hsr_sup_tag->HSR_TLV_length != 12 &&
hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
@@ -83,8 +75,8 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
return true;
}
-static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
- struct hsr_frame_info *frame)
+static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
+ struct hsr_frame_info *frame)
{
struct sk_buff *skb;
int copylen;
@@ -112,38 +104,123 @@ static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
return skb;
}
-static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
- struct hsr_port *port)
+struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
{
- if (!frame->skb_std)
- frame->skb_std = create_stripped_skb(frame->skb_hsr, frame);
+ if (!frame->skb_std) {
+ if (frame->skb_hsr) {
+ frame->skb_std =
+ create_stripped_skb_hsr(frame->skb_hsr, frame);
+ } else {
+ /* Unexpected */
+ WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
+ __FILE__, __LINE__, port->dev->name);
+ return NULL;
+ }
+ }
+
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
+struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
+{
+ if (!frame->skb_std) {
+ if (frame->skb_prp) {
+ /* trim the skb by len - HSR_HLEN to exclude RCT */
+ skb_trim(frame->skb_prp,
+ frame->skb_prp->len - HSR_HLEN);
+ frame->skb_std =
+ __pskb_copy(frame->skb_prp,
+ skb_headroom(frame->skb_prp),
+ GFP_ATOMIC);
+ } else {
+ /* Unexpected */
+ WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
+ __FILE__, __LINE__, port->dev->name);
+ return NULL;
+ }
+ }
+
+ return skb_clone(frame->skb_std, GFP_ATOMIC);
+}
+
+static void prp_set_lan_id(struct prp_rct *trailer,
+ struct hsr_port *port)
+{
+ int lane_id;
+
+ if (port->type == HSR_PT_SLAVE_A)
+ lane_id = 0;
+ else
+ lane_id = 1;
+
+ /* Add net_id in the upper 3 bits of lane_id */
+ lane_id |= port->hsr->net_id;
+ set_prp_lan_id(trailer, lane_id);
+}
+
+/* Tailroom for PRP rct should have been created before calling this */
+static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
+ struct hsr_frame_info *frame,
+ struct hsr_port *port)
+{
+ struct prp_rct *trailer;
+ int min_size = ETH_ZLEN;
+ int lsdu_size;
+
+ if (!skb)
+ return skb;
+
+ if (frame->is_vlan)
+ min_size = VLAN_ETH_ZLEN;
+
+ if (skb_put_padto(skb, min_size))
+ return NULL;
+
+ trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
+ lsdu_size = skb->len - 14;
+ if (frame->is_vlan)
+ lsdu_size -= 4;
+ prp_set_lan_id(trailer, port);
+ set_prp_LSDU_size(trailer, lsdu_size);
+ trailer->sequence_nr = htons(frame->sequence_nr);
+ trailer->PRP_suffix = htons(ETH_P_PRP);
+
+ return skb;
+}
+
+static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
+ struct hsr_port *port)
+{
+ int path_id;
+
+ if (port->type == HSR_PT_SLAVE_A)
+ path_id = 0;
+ else
+ path_id = 1;
+
+ set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
+}
+
static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
struct hsr_frame_info *frame,
struct hsr_port *port, u8 proto_version)
{
struct hsr_ethhdr *hsr_ethhdr;
- int lane_id;
int lsdu_size;
/* pad to minimum packet size which is 60 + 6 (HSR tag) */
if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
return NULL;
- if (port->type == HSR_PT_SLAVE_A)
- lane_id = 0;
- else
- lane_id = 1;
-
lsdu_size = skb->len - 14;
if (frame->is_vlan)
lsdu_size -= 4;
hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
- set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id);
+ hsr_set_path_id(hsr_ethhdr, port);
set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
@@ -153,16 +230,28 @@ static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
return skb;
}
-static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
- struct hsr_frame_info *frame,
- struct hsr_port *port)
+/* If the original frame was an HSR tagged frame, just clone it to be sent
+ * unchanged. Otherwise, create a private frame especially tagged for 'port'.
+ */
+struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
{
- int movelen;
unsigned char *dst, *src;
struct sk_buff *skb;
+ int movelen;
+
+ if (frame->skb_hsr) {
+ struct hsr_ethhdr *hsr_ethhdr =
+ (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
+
+ /* set the lane id properly */
+ hsr_set_path_id(hsr_ethhdr, port);
+ return skb_clone(frame->skb_hsr, GFP_ATOMIC);
+ }
/* Create the new skb with enough headroom to fit the HSR tag */
- skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
+ skb = __pskb_copy(frame->skb_std,
+ skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
if (!skb)
return NULL;
skb_reset_mac_header(skb);
@@ -185,21 +274,29 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
}
-/* If the original frame was an HSR tagged frame, just clone it to be sent
- * unchanged. Otherwise, create a private frame especially tagged for 'port'.
- */
-static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
- struct hsr_port *port)
+struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
{
- if (frame->skb_hsr)
- return skb_clone(frame->skb_hsr, GFP_ATOMIC);
+ struct sk_buff *skb;
- if (port->type != HSR_PT_SLAVE_A && port->type != HSR_PT_SLAVE_B) {
- WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
- return NULL;
+ if (frame->skb_prp) {
+ struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
+
+ if (trailer) {
+ prp_set_lan_id(trailer, port);
+ } else {
+ WARN_ONCE(!trailer, "errored PRP skb");
+ return NULL;
+ }
+ return skb_clone(frame->skb_prp, GFP_ATOMIC);
}
- return create_tagged_skb(frame->skb_std, frame, port);
+ skb = skb_copy_expand(frame->skb_std, 0,
+ skb_tailroom(frame->skb_std) + HSR_HLEN,
+ GFP_ATOMIC);
+ prp_fill_rct(skb, frame, port);
+
+ return skb;
}
static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
@@ -236,9 +333,18 @@ static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
return dev_queue_xmit(skb);
}
+bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
+{
+ return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
+ port->type == HSR_PT_SLAVE_B) ||
+ (frame->port_rcv->type == HSR_PT_SLAVE_B &&
+ port->type == HSR_PT_SLAVE_A));
+}
+
/* Forward the frame through all devices except:
* - Back through the receiving device
* - If it's a HSR frame: through a device where it has passed before
+ * - if it's a PRP frame: through another PRP slave device (no bridge)
* - To the local HSR master only if the frame is directly addressed to it, or
* a non-supervision multicast or broadcast frame.
*
@@ -253,6 +359,7 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
struct sk_buff *skb;
hsr_for_each_port(frame->port_rcv->hsr, port) {
+ struct hsr_priv *hsr = port->hsr;
/* Don't send frame back the way it came */
if (port == frame->port_rcv)
continue;
@@ -265,24 +372,33 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
continue;
- /* Don't send frame over port where it has been sent before */
- if (hsr_register_frame_out(port, frame->node_src,
+ /* Don't send frame over port where it has been sent before.
+ * Also fro SAN, this shouldn't be done.
+ */
+ if (!frame->is_from_san &&
+ hsr_register_frame_out(port, frame->node_src,
frame->sequence_nr))
continue;
if (frame->is_supervision && port->type == HSR_PT_MASTER) {
- hsr_handle_sup_frame(frame->skb_hsr,
- frame->node_src,
- frame->port_rcv);
+ hsr_handle_sup_frame(frame);
continue;
}
+ /* Check if frame is to be dropped. Eg. for PRP no forward
+ * between ports.
+ */
+ if (hsr->proto_ops->drop_frame &&
+ hsr->proto_ops->drop_frame(frame, port))
+ continue;
+
if (port->type != HSR_PT_MASTER)
- skb = frame_get_tagged_skb(frame, port);
+ skb = hsr->proto_ops->create_tagged_frame(frame, port);
else
- skb = frame_get_stripped_skb(frame, port);
+ skb = hsr->proto_ops->get_untagged_frame(frame, port);
+
if (!skb) {
- /* FIXME: Record the dropped frame? */
+ frame->port_rcv->dev->stats.rx_dropped++;
continue;
}
@@ -313,40 +429,95 @@ static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
}
}
-static int hsr_fill_frame_info(struct hsr_frame_info *frame,
- struct sk_buff *skb, struct hsr_port *port)
+static void handle_std_frame(struct sk_buff *skb,
+ struct hsr_frame_info *frame)
{
- struct ethhdr *ethhdr;
+ struct hsr_port *port = frame->port_rcv;
+ struct hsr_priv *hsr = port->hsr;
unsigned long irqflags;
+ frame->skb_hsr = NULL;
+ frame->skb_prp = NULL;
+ frame->skb_std = skb;
+
+ if (port->type != HSR_PT_MASTER) {
+ frame->is_from_san = true;
+ } else {
+ /* Sequence nr for the master node */
+ spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
+ frame->sequence_nr = hsr->sequence_nr;
+ hsr->sequence_nr++;
+ spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
+ }
+}
+
+void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
+{
+ if (proto == htons(ETH_P_PRP) ||
+ proto == htons(ETH_P_HSR)) {
+ /* HSR tagged frame :- Data or Supervision */
+ frame->skb_std = NULL;
+ frame->skb_prp = NULL;
+ frame->skb_hsr = skb;
+ frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
+ return;
+ }
+
+ /* Standard frame or PRP from master port */
+ handle_std_frame(skb, frame);
+}
+
+void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
+{
+ /* Supervision frame */
+ struct prp_rct *rct = skb_get_PRP_rct(skb);
+
+ if (rct &&
+ prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
+ frame->skb_hsr = NULL;
+ frame->skb_std = NULL;
+ frame->skb_prp = skb;
+ frame->sequence_nr = prp_get_skb_sequence_nr(rct);
+ return;
+ }
+ handle_std_frame(skb, frame);
+}
+
+static int fill_frame_info(struct hsr_frame_info *frame,
+ struct sk_buff *skb, struct hsr_port *port)
+{
+ struct hsr_priv *hsr = port->hsr;
+ struct hsr_vlan_ethhdr *vlan_hdr;
+ struct ethhdr *ethhdr;
+ __be16 proto;
+
+ memset(frame, 0, sizeof(*frame));
frame->is_supervision = is_supervision_frame(port->hsr, skb);
- frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
+ frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
+ frame->is_supervision,
+ port->type);
if (!frame->node_src)
return -1; /* Unknown node and !is_supervision, or no mem */
ethhdr = (struct ethhdr *)skb_mac_header(skb);
frame->is_vlan = false;
- if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
+ proto = ethhdr->h_proto;
+
+ if (proto == htons(ETH_P_8021Q))
frame->is_vlan = true;
+
+ if (frame->is_vlan) {
+ vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
+ proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
/* FIXME: */
netdev_warn_once(skb->dev, "VLAN not yet supported");
}
- if (ethhdr->h_proto == htons(ETH_P_PRP) ||
- ethhdr->h_proto == htons(ETH_P_HSR)) {
- frame->skb_std = NULL;
- frame->skb_hsr = skb;
- frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
- } else {
- frame->skb_std = skb;
- frame->skb_hsr = NULL;
- /* Sequence nr for the master node */
- spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags);
- frame->sequence_nr = port->hsr->sequence_nr;
- port->hsr->sequence_nr++;
- spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags);
- }
+ frame->is_from_san = false;
frame->port_rcv = port;
+ hsr->proto_ops->fill_frame_info(proto, skb, frame);
check_local_dest(port->hsr, skb, frame);
return 0;
@@ -363,8 +534,9 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
goto out_drop;
}
- if (hsr_fill_frame_info(&frame, skb, port) < 0)
+ if (fill_frame_info(&frame, skb, port) < 0)
goto out_drop;
+
hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
hsr_forward_do(&frame);
/* Gets called for ingress frames as well as egress from master port.
@@ -376,6 +548,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
}
kfree_skb(frame.skb_hsr);
+ kfree_skb(frame.skb_prp);
kfree_skb(frame.skb_std);
return;
diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
index 51a69295566c..618140d484ad 100644
--- a/net/hsr/hsr_forward.h
+++ b/net/hsr/hsr_forward.h
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
*/
#ifndef __HSR_FORWARD_H
@@ -12,5 +14,17 @@
#include "hsr_main.h"
void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port);
-
+struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
+void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
+void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
#endif /* __HSR_FORWARD_H */
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 530de24b1fb5..5c97de459905 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -8,6 +8,7 @@
* interface. A frame is identified by its source MAC address and its HSR
* sequence number. This code keeps track of senders and their sequence numbers
* to allow filtering of duplicate frames, and to detect HSR ring errors.
+ * Same code handles filtering of duplicates for PRP as well.
*/
#include <linux/if_ether.h>
@@ -126,6 +127,19 @@ void hsr_del_nodes(struct list_head *node_db)
kfree(node);
}
+void prp_handle_san_frame(bool san, enum hsr_port_type port,
+ struct hsr_node *node)
+{
+ /* Mark if the SAN node is over LAN_A or LAN_B */
+ if (port == HSR_PT_SLAVE_A) {
+ node->san_a = true;
+ return;
+ }
+
+ if (port == HSR_PT_SLAVE_B)
+ node->san_b = true;
+}
+
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
* seq_out is used to initialize filtering of outgoing duplicate frames
* originating from the newly added node.
@@ -133,7 +147,8 @@ void hsr_del_nodes(struct list_head *node_db)
static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
struct list_head *node_db,
unsigned char addr[],
- u16 seq_out)
+ u16 seq_out, bool san,
+ enum hsr_port_type rx_port)
{
struct hsr_node *new_node, *node;
unsigned long now;
@@ -154,6 +169,9 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
for (i = 0; i < HSR_PT_PORTS; i++)
new_node->seq_out[i] = seq_out;
+ if (san && hsr->proto_ops->handle_san_frame)
+ hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
+
spin_lock_bh(&hsr->list_lock);
list_for_each_entry_rcu(node, node_db, mac_list,
lockdep_is_held(&hsr->list_lock)) {
@@ -171,15 +189,26 @@ out:
return node;
}
+void prp_update_san_info(struct hsr_node *node, bool is_sup)
+{
+ if (!is_sup)
+ return;
+
+ node->san_a = false;
+ node->san_b = false;
+}
+
/* Get the hsr_node from which 'skb' was sent.
*/
-struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
- bool is_sup)
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ struct sk_buff *skb, bool is_sup,
+ enum hsr_port_type rx_port)
{
- struct list_head *node_db = &port->hsr->node_db;
struct hsr_priv *hsr = port->hsr;
struct hsr_node *node;
struct ethhdr *ethhdr;
+ struct prp_rct *rct;
+ bool san = false;
u16 seq_out;
if (!skb_mac_header_was_set(skb))
@@ -188,14 +217,21 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
ethhdr = (struct ethhdr *)skb_mac_header(skb);
list_for_each_entry_rcu(node, node_db, mac_list) {
- if (ether_addr_equal(node->macaddress_A, ethhdr->h_source))
+ if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
+ if (hsr->proto_ops->update_san_info)
+ hsr->proto_ops->update_san_info(node, is_sup);
return node;
- if (ether_addr_equal(node->macaddress_B, ethhdr->h_source))
+ }
+ if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
+ if (hsr->proto_ops->update_san_info)
+ hsr->proto_ops->update_san_info(node, is_sup);
return node;
+ }
}
- /* Everyone may create a node entry, connected node to a HSR device. */
-
+ /* Everyone may create a node entry, connected node to a HSR/PRP
+ * device.
+ */
if (ethhdr->h_proto == htons(ETH_P_PRP) ||
ethhdr->h_proto == htons(ETH_P_HSR)) {
/* Use the existing sequence_nr from the tag as starting point
@@ -203,31 +239,47 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
*/
seq_out = hsr_get_skb_sequence_nr(skb) - 1;
} else {
- /* this is called also for frames from master port and
- * so warn only for non master ports
- */
- if (port->type != HSR_PT_MASTER)
- WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
- seq_out = HSR_SEQNR_START;
+ rct = skb_get_PRP_rct(skb);
+ if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
+ seq_out = prp_get_skb_sequence_nr(rct);
+ } else {
+ if (rx_port != HSR_PT_MASTER)
+ san = true;
+ seq_out = HSR_SEQNR_START;
+ }
}
- return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out);
+ return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
+ san, rx_port);
}
/* Use the Supervision frame's info about an eventual macaddress_B for merging
* nodes that has previously had their macaddress_B registered as a separate
* node.
*/
-void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
- struct hsr_port *port_rcv)
+void hsr_handle_sup_frame(struct hsr_frame_info *frame)
{
+ struct hsr_node *node_curr = frame->node_src;
+ struct hsr_port *port_rcv = frame->port_rcv;
struct hsr_priv *hsr = port_rcv->hsr;
struct hsr_sup_payload *hsr_sp;
struct hsr_node *node_real;
+ struct sk_buff *skb = NULL;
struct list_head *node_db;
struct ethhdr *ethhdr;
int i;
+ /* Here either frame->skb_hsr or frame->skb_prp should be
+ * valid as supervision frame always will have protocol
+ * header info.
+ */
+ if (frame->skb_hsr)
+ skb = frame->skb_hsr;
+ else if (frame->skb_prp)
+ skb = frame->skb_prp;
+ if (!skb)
+ return;
+
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* Leave the ethernet header. */
@@ -248,7 +300,8 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
if (!node_real)
/* No frame received from AddrA of this node yet */
node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
- HSR_SEQNR_START - 1);
+ HSR_SEQNR_START - 1, true,
+ port_rcv->type);
if (!node_real)
goto done; /* No mem */
if (node_real == node_curr)
@@ -274,7 +327,11 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
kfree_rcu(node_curr, rcu_head);
done:
- skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
+ /* PRP uses v0 header */
+ if (ethhdr->h_proto == htons(ETH_P_HSR))
+ skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
+ else
+ skb_push(skb, sizeof(struct hsrv0_ethhdr_sp));
}
/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 0f0fa12b4329..86b43f539f2c 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
*/
#ifndef __HSR_FRAMEREG_H
@@ -12,12 +14,26 @@
struct hsr_node;
+struct hsr_frame_info {
+ struct sk_buff *skb_std;
+ struct sk_buff *skb_hsr;
+ struct sk_buff *skb_prp;
+ struct hsr_port *port_rcv;
+ struct hsr_node *node_src;
+ u16 sequence_nr;
+ bool is_supervision;
+ bool is_vlan;
+ bool is_local_dest;
+ bool is_local_exclusive;
+ bool is_from_san;
+};
+
void hsr_del_self_node(struct hsr_priv *hsr);
void hsr_del_nodes(struct list_head *node_db);
-struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
- bool is_sup);
-void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
- struct hsr_port *port);
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ struct sk_buff *skb, bool is_sup,
+ enum hsr_port_type rx_port);
+void hsr_handle_sup_frame(struct hsr_frame_info *frame);
bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr);
void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
@@ -47,6 +63,10 @@ int hsr_get_node_data(struct hsr_priv *hsr,
int *if2_age,
u16 *if2_seq);
+void prp_handle_san_frame(bool san, enum hsr_port_type port,
+ struct hsr_node *node);
+void prp_update_san_info(struct hsr_node *node, bool is_sup);
+
struct hsr_node {
struct list_head mac_list;
unsigned char macaddress_A[ETH_ALEN];
@@ -55,6 +75,9 @@ struct hsr_node {
enum hsr_port_type addr_B_port;
unsigned long time_in[HSR_PT_PORTS];
bool time_in_stale[HSR_PT_PORTS];
+ /* if the node is a SAN */
+ bool san_a;
+ bool san_b;
u16 seq_out[HSR_PT_PORTS];
struct rcu_head rcu_head;
};
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 144da15f0a81..2fd1976e5b1c 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * Event handling for HSR and PRP devices.
*/
#include <linux/netdevice.h>
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index f74193465bf5..7dc92ce5a134 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
*/
#ifndef __HSR_PRIVATE_H
@@ -10,6 +12,7 @@
#include <linux/netdevice.h>
#include <linux/list.h>
+#include <linux/if_vlan.h>
/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
* Table 8.
@@ -33,6 +36,10 @@
#define HSR_TLV_ANNOUNCE 22
#define HSR_TLV_LIFE_CHECK 23
+/* PRP V1 life check for Duplicate discard */
+#define PRP_TLV_LIFE_CHECK_DD 20
+/* PRP V1 life check for Duplicate Accept */
+#define PRP_TLV_LIFE_CHECK_DA 21
/* HSR Tag.
* As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
@@ -80,7 +87,12 @@ struct hsr_ethhdr {
struct hsr_tag hsr_tag;
} __packed;
-/* HSR Supervision Frame data types.
+struct hsr_vlan_ethhdr {
+ struct vlan_ethhdr vlanhdr;
+ struct hsr_tag hsr_tag;
+} __packed;
+
+/* HSR/PRP Supervision Frame data types.
* Field names as defined in the IEC:2010 standard for HSR.
*/
struct hsr_sup_tag {
@@ -124,6 +136,34 @@ enum hsr_port_type {
HSR_PT_PORTS, /* This must be the last item in the enum */
};
+/* PRP Redunancy Control Trailor (RCT).
+ * As defined in IEC-62439-4:2012, the PRP RCT is really { sequence Nr,
+ * Lan indentifier (LanId), LSDU_size and PRP_suffix = 0x88FB }.
+ *
+ * Field names as defined in the IEC:2012 standard for PRP.
+ */
+struct prp_rct {
+ __be16 sequence_nr;
+ __be16 lan_id_and_LSDU_size;
+ __be16 PRP_suffix;
+} __packed;
+
+static inline u16 get_prp_LSDU_size(struct prp_rct *rct)
+{
+ return ntohs(rct->lan_id_and_LSDU_size) & 0x0FFF;
+}
+
+static inline void set_prp_lan_id(struct prp_rct *rct, u16 lan_id)
+{
+ rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
+ 0x0FFF) | (lan_id << 12));
+}
+static inline void set_prp_LSDU_size(struct prp_rct *rct, u16 LSDU_size)
+{
+ rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
+ 0xF000) | (LSDU_size & 0x0FFF));
+}
+
struct hsr_port {
struct list_head port_list;
struct net_device *dev;
@@ -131,6 +171,32 @@ struct hsr_port {
enum hsr_port_type type;
};
+/* used by driver internally to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+struct hsr_frame_info;
+struct hsr_node;
+
+struct hsr_proto_ops {
+ /* format and send supervision frame */
+ void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval);
+ void (*handle_san_frame)(bool san, enum hsr_port_type port,
+ struct hsr_node *node);
+ bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port);
+ struct sk_buff * (*get_untagged_frame)(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+ struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+ void (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
+ bool (*invalid_dan_ingress_frame)(__be16 protocol);
+ void (*update_san_info)(struct hsr_node *node, bool is_sup);
+};
+
struct hsr_priv {
struct rcu_head rcu_head;
struct list_head ports;
@@ -141,9 +207,16 @@ struct hsr_priv {
int announce_count;
u16 sequence_nr;
u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
- u8 prot_version; /* Indicate if HSRv0 or HSRv1. */
+ enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */
spinlock_t seqnr_lock; /* locking for sequence_nr */
spinlock_t list_lock; /* locking for node list */
+ struct hsr_proto_ops *proto_ops;
+#define PRP_LAN_ID 0x5 /* 0x1010 for A and 0x1011 for B. Bit 0 is set
+ * based on SLAVE_A or SLAVE_B
+ */
+ u8 net_id; /* for PRP, it occupies most significant 3 bits
+ * of lan_id
+ */
unsigned char sup_multicast_addr[ETH_ALEN];
#ifdef CONFIG_DEBUG_FS
struct dentry *node_tbl_root;
@@ -164,6 +237,49 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
}
+static inline struct prp_rct *skb_get_PRP_rct(struct sk_buff *skb)
+{
+ unsigned char *tail = skb_tail_pointer(skb) - HSR_HLEN;
+
+ struct prp_rct *rct = (struct prp_rct *)tail;
+
+ if (rct->PRP_suffix == htons(ETH_P_PRP))
+ return rct;
+
+ return NULL;
+}
+
+/* Assume caller has confirmed this skb is PRP suffixed */
+static inline u16 prp_get_skb_sequence_nr(struct prp_rct *rct)
+{
+ return ntohs(rct->sequence_nr);
+}
+
+static inline u16 get_prp_lan_id(struct prp_rct *rct)
+{
+ return ntohs(rct->lan_id_and_LSDU_size) >> 12;
+}
+
+/* assume there is a valid rct */
+static inline bool prp_check_lsdu_size(struct sk_buff *skb,
+ struct prp_rct *rct,
+ bool is_sup)
+{
+ struct ethhdr *ethhdr;
+ int expected_lsdu_size;
+
+ if (is_sup) {
+ expected_lsdu_size = HSR_V1_SUP_LSDUSIZE;
+ } else {
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ expected_lsdu_size = skb->len - 14;
+ if (ethhdr->h_proto == htons(ETH_P_8021Q))
+ expected_lsdu_size -= 4;
+ }
+
+ return (expected_lsdu_size == get_prp_LSDU_size(rct));
+}
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
void hsr_debugfs_rename(struct net_device *dev);
void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 6e14b7d22639..06c3cd988760 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -4,7 +4,7 @@
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
*
- * Routines for handling Netlink messages for HSR.
+ * Routines for handling Netlink messages for HSR and PRP.
*/
#include "hsr_netlink.h"
@@ -22,6 +22,7 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
[IFLA_HSR_VERSION] = { .type = NLA_U8 },
[IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
[IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
+ [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
};
/* Here, it seems a netdevice has already been allocated for us, and the
@@ -31,8 +32,10 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ enum hsr_version proto_version;
+ unsigned char multicast_spec;
+ u8 proto = HSR_PROTOCOL_HSR;
struct net_device *link[2];
- unsigned char multicast_spec, hsr_version;
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
@@ -69,18 +72,34 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
else
multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
+ if (data[IFLA_HSR_PROTOCOL])
+ proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
+
+ if (proto >= HSR_PROTOCOL_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol\n");
+ return -EINVAL;
+ }
+
if (!data[IFLA_HSR_VERSION]) {
- hsr_version = 0;
+ proto_version = HSR_V0;
} else {
- hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
- if (hsr_version > 1) {
+ if (proto == HSR_PROTOCOL_PRP) {
+ NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported\n");
+ return -EINVAL;
+ }
+
+ proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+ if (proto_version > HSR_V1) {
NL_SET_ERR_MSG_MOD(extack,
- "Only versions 0..1 are supported");
+ "Only HSR version 0/1 supported\n");
return -EINVAL;
}
}
- return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
+ if (proto == HSR_PROTOCOL_PRP)
+ proto_version = PRP_V1;
+
+ return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
}
static void hsr_dellink(struct net_device *dev, struct list_head *head)
@@ -102,6 +121,7 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head)
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct hsr_priv *hsr = netdev_priv(dev);
+ u8 proto = HSR_PROTOCOL_HSR;
struct hsr_port *port;
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
@@ -120,6 +140,10 @@ static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
hsr->sup_multicast_addr) ||
nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
goto nla_put_failure;
+ if (hsr->prot_version == PRP_V1)
+ proto = HSR_PROTOCOL_PRP;
+ if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
+ goto nla_put_failure;
return 0;
diff --git a/net/hsr/hsr_netlink.h b/net/hsr/hsr_netlink.h
index 1121bb192a18..501552d9753b 100644
--- a/net/hsr/hsr_netlink.h
+++ b/net/hsr/hsr_netlink.h
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
*/
#ifndef __HSR_NETLINK_H
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index 25b6ffba26cd..36d5fcf09c61 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -3,6 +3,8 @@
*
* Author(s):
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * Frame handler other utility functions for HSR and PRP.
*/
#include "hsr_slave.h"
@@ -14,12 +16,22 @@
#include "hsr_forward.h"
#include "hsr_framereg.h"
+bool hsr_invalid_dan_ingress_frame(__be16 protocol)
+{
+ return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
+}
+
static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct hsr_port *port;
+ struct hsr_priv *hsr;
__be16 protocol;
+ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ return RX_HANDLER_PASS;
+
if (!skb_mac_header_was_set(skb)) {
WARN_ONCE(1, "%s: skb invalid", __func__);
return RX_HANDLER_PASS;
@@ -28,6 +40,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
port = hsr_port_get_rcu(skb->dev);
if (!port)
goto finish_pass;
+ hsr = port->hsr;
if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
/* Directly kill frames sent by ourselves */
@@ -35,12 +48,23 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
goto finish_consume;
}
+ /* For HSR, only tagged frames are expected, but for PRP
+ * there could be non tagged frames as well from Single
+ * attached nodes (SANs).
+ */
protocol = eth_hdr(skb)->h_proto;
- if (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR))
+ if (hsr->proto_ops->invalid_dan_ingress_frame &&
+ hsr->proto_ops->invalid_dan_ingress_frame(protocol))
goto finish_pass;
skb_push(skb, ETH_HLEN);
+ if (skb_mac_header(skb) != skb->data) {
+ WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n",
+ __func__, __LINE__, port->dev->name);
+ goto finish_consume;
+ }
+
hsr_forward_skb(skb, port);
finish_consume:
diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h
index 8953ea279ce9..edc4612bb009 100644
--- a/net/hsr/hsr_slave.h
+++ b/net/hsr/hsr_slave.h
@@ -2,6 +2,8 @@
/* Copyright 2011-2014 Autronica Fire and Security AS
*
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
*/
#ifndef __HSR_SLAVE_H
@@ -30,4 +32,6 @@ static inline struct hsr_port *hsr_port_get_rcu(const struct net_device *dev)
rcu_dereference(dev->rx_handler_data) : NULL;
}
+bool hsr_invalid_dan_ingress_frame(__be16 protocol);
+
#endif /* __HSR_SLAVE_H */
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index cdf3a40f9ff5..876fd6ff1ff9 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1441,7 +1441,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
ret = -EINVAL;
break;
}
- if (copy_from_sockptr(&val, optval, sizeof(val))) {
+ if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) {
ret = -EFAULT;
break;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index fd30ea61336e..6fdd0c9f865a 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1584,21 +1584,10 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
unsigned int flags;
if (event == NETDEV_REGISTER) {
+ mdev = mpls_add_dev(dev);
+ if (IS_ERR(mdev))
+ return notifier_from_errno(PTR_ERR(mdev));
- /* For now just support Ethernet, IPGRE, IP6GRE, SIT and
- * IPIP devices
- */
- if (dev->type == ARPHRD_ETHER ||
- dev->type == ARPHRD_LOOPBACK ||
- dev->type == ARPHRD_IPGRE ||
- dev->type == ARPHRD_IP6GRE ||
- dev->type == ARPHRD_SIT ||
- dev->type == ARPHRD_TUNNEL ||
- dev->type == ARPHRD_TUNNEL6) {
- mdev = mpls_add_dev(dev);
- if (IS_ERR(mdev))
- return notifier_from_errno(PTR_ERR(mdev));
- }
return NOTIFY_OK;
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 832e36269b10..e7649bbc2b87 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -719,8 +719,11 @@ static int smc_connect_ism(struct smc_sock *smc,
}
/* Create send and receive buffers */
- if (smc_buf_create(smc, true))
- return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
+ rc = smc_buf_create(smc, true);
+ if (rc)
+ return smc_connect_abort(smc, (rc == -ENOSPC) ?
+ SMC_CLC_DECL_MAX_DMB :
+ SMC_CLC_DECL_MEM,
ini->cln_first_contact);
smc_conn_save_peer_info(smc, aclc);
@@ -1200,12 +1203,14 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
}
/* Create send and receive buffers */
- if (smc_buf_create(new_smc, true)) {
+ rc = smc_buf_create(new_smc, true);
+ if (rc) {
if (ini->cln_first_contact == SMC_FIRST_CONTACT)
smc_lgr_cleanup_early(&new_smc->conn);
else
smc_conn_free(&new_smc->conn);
- return SMC_CLC_DECL_MEM;
+ return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
+ SMC_CLC_DECL_MEM;
}
return 0;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 76c2b150d040..cf7b45306f4e 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -48,6 +48,7 @@
#define SMC_CLC_DECL_NOACTLINK 0x030a0000 /* no active smc-r link in lgr */
#define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */
#define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */
+#define SMC_CLC_DECL_MAX_DMB 0x030d0000 /* SMC-D DMB limit exceeded */
#define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */
#define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */
#define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index f82a2e599917..b42fa3b00d00 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1614,7 +1614,7 @@ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
if (rc) {
kfree(buf_desc);
- return ERR_PTR(-EAGAIN);
+ return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) : ERR_PTR(rc);
}
buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
/* CDC header stored in buf. So, pretend it was smaller */
@@ -1688,7 +1688,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
}
if (IS_ERR(buf_desc))
- return -ENOMEM;
+ return PTR_ERR(buf_desc);
if (!is_smcd) {
if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {