summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c208
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h76
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c52
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c19
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c15
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c294
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c200
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c93
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c259
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c173
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c229
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c21
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c16
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c197
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c41
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c22
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c56
-rw-r--r--drivers/net/ethernet/sun/niu.c3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h4
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c3
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c2
153 files changed, 2888 insertions, 762 deletions
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 7dff20350865..f19370c33444 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw)
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
+ if (hw->nic_type == athr_mt) {
+ hw->phy_configured = true;
+ return 0;
+ }
+
if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
(atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
dev_err(&pdev->dev, "Error get phy ID\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1a6ec1a12d53..b5d954cb409a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
/* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
+ rc = bnx2x_alloc_fw_stats_mem(bp);
+ if (rc)
LOAD_ERROR_EXIT(bp, load_error0);
/* request pf to initialize status blocks */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f56245eeef7b..8a97640cdfe7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -72,7 +72,8 @@
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
-#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
+#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
+ NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -365,6 +366,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id;
}
+static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ u16 prod)
+{
+ bnxt_db_write(bp, &txr->tx_db, prod);
+ txr->kick_pending = 0;
+}
+
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ struct netdev_queue *txq)
+{
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ netif_tx_wake_queue(txq);
+ return false;
+ }
+
+ return true;
+}
+
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -384,6 +412,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
+ atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -393,8 +422,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
- netif_tx_stop_queue(txq);
- return NETDEV_TX_BUSY;
+ /* We must have raced with NAPI cleanup */
+ if (net_ratelimit() && txr->kick_pending)
+ netif_warn(bp, tx_err, dev,
+ "bnxt: ring busy w/ flush pending!\n");
+ if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ return NETDEV_TX_BUSY;
}
length = skb->len;
@@ -426,7 +459,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
- if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
+ if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
+ &ptp->tx_hdr_off)) {
+ if (vlan_tag_flags)
+ ptp->tx_hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
} else {
@@ -514,21 +550,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx:
if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length;
- if (skb_pad(skb, pad)) {
+ if (skb_pad(skb, pad))
/* SKB already freed. */
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ goto tx_kick_pending;
length = BNXT_MIN_PKT_SIZE;
}
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_free;
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -615,24 +646,17 @@ normal_tx:
txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
- bnxt_db_write(bp, &txr->tx_db, prod);
+ bnxt_txr_db_kick(bp, txr, prod);
+ else
+ txr->kick_pending = 1;
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push)
- bnxt_db_write(bp, &txr->tx_db, prod);
-
- netif_tx_stop_queue(txq);
+ bnxt_txr_db_kick(bp, txr, prod);
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
- netif_tx_wake_queue(txq);
+ bnxt_txr_netif_try_stop_queue(bp, txr, txq);
}
return NETDEV_TX_OK;
@@ -645,7 +669,6 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
- tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod);
@@ -659,7 +682,13 @@ tx_dma_error:
PCI_DMA_TODEVICE);
}
+tx_free:
dev_kfree_skb_any(skb);
+tx_kick_pending:
+ if (txr->kick_pending)
+ bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+ txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -729,14 +758,9 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
- __netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_stopped(txq) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
- txr->dev_state != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
- __netif_tx_unlock(txq);
- }
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1671,11 +1695,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
(skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
- u16 vlan_proto = tpa_info->metadata >>
- RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ __be16 vlan_proto = htons(tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
}
skb_checksum_none_assert(skb);
@@ -1759,6 +1788,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1897,9 +1930,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
(skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ __be16 vlan_proto = htons(meta_data >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ goto next_rx;
+ }
}
skb_checksum_none_assert(skb);
@@ -1975,6 +2014,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -2440,6 +2483,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
@@ -7563,8 +7610,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
- if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
__bnxt_hwrm_ptp_qcfg(bp);
+ } else {
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -9110,10 +9161,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work);
-
- napi_disable(&bp->bnapi[i]->napi);
}
}
@@ -9147,9 +9197,11 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
}
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
/* Drop carrier first to prevent TX timeout */
netif_carrier_off(bp->dev);
/* Stop all TX queues */
@@ -9163,8 +9215,10 @@ void bnxt_tx_enable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = 0;
+ WRITE_ONCE(txr->dev_state, 0);
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
netif_tx_wake_all_queues(bp->dev);
if (bp->link_info.link_up)
netif_carrier_on(bp->dev);
@@ -10123,7 +10177,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- bnxt_ptp_start(bp);
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
@@ -10197,6 +10250,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
{
int rc = 0;
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
+ rc = -ENODEV;
+ goto half_open_err;
+ }
+
rc = bnxt_alloc_mem(bp, false);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -10256,9 +10315,16 @@ static int bnxt_open(struct net_device *dev)
rc = bnxt_hwrm_if_change(bp, true);
if (rc)
return rc;
+
+ if (bnxt_ptp_init(bp)) {
+ netdev_warn(dev, "PTP initialization failed.\n");
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
rc = __bnxt_open_nic(bp, true, true);
if (rc) {
bnxt_hwrm_if_change(bp, false);
+ bnxt_ptp_clear(bp);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -10349,6 +10415,7 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ bnxt_ptp_clear(bp);
bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
@@ -10737,6 +10804,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return true;
return false;
}
+ /* 212 firmware is broken for aRFS */
+ if (BNXT_FW_MAJ(bp) == 212)
+ return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -11335,6 +11405,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
}
+ bnxt_ptp_clear(bp);
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
bnxt_clear_int_mode(bp);
@@ -11959,10 +12030,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
(bp->fw_reset_max_dsecs * HZ / 10));
}
+static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
+{
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
+ bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_status_update(bp, false);
+ }
+ bp->fw_reset_state = 0;
+ dev_close(bp->dev);
+}
+
static void bnxt_fw_reset_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
- int rc;
+ int rc = 0;
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
@@ -11992,6 +12074,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
bp->fw_reset_timestamp = jiffies;
rtnl_lock();
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+ return;
+ }
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
@@ -12039,6 +12126,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (val == 0xffff) {
if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ rc = -ETIMEDOUT;
goto fw_reset_abort;
}
bnxt_queue_fw_reset_work(bp, HZ / 1000);
@@ -12048,6 +12136,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
if (pci_enable_device(bp->pdev)) {
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+ rc = -ENODEV;
goto fw_reset_abort;
}
pci_set_master(bp->pdev);
@@ -12074,18 +12163,18 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
rc = bnxt_open(bp->dev);
if (rc) {
- netdev_err(bp->dev, "bnxt_open_nic() failed\n");
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+ return;
}
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, rc);
- if (!rc)
- bnxt_reenable_sriov(bp);
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
bnxt_dl_health_recovery_done(bp);
@@ -12103,12 +12192,8 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
- bnxt_dl_health_status_update(bp, false);
- bp->fw_reset_state = 0;
rtnl_lock();
- dev_close(bp->dev);
+ bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
}
@@ -12662,7 +12747,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
- bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -13246,11 +13330,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc);
}
- if (bnxt_ptp_init(bp)) {
- netdev_warn(dev, "PTP initialization failed.\n");
- kfree(bp->ptp_cfg);
- bp->ptp_cfg = NULL;
- }
bnxt_inv_fw_health_reg(bp);
bnxt_dl_register(bp);
@@ -13436,7 +13515,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bcf8d00b8c80..ba4e0fc38520 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
+ u8 kick_pending;
struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 8e90224c43a2..8a68df4d9e59 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -433,6 +433,7 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
+ bool zero = false;
u8 max_tc = 0;
int i;
@@ -453,13 +454,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
break;
case IEEE_8021QAZ_TSA_ETS:
total_ets_bw += ets->tc_tx_bw[i];
+ zero = zero || !ets->tc_tx_bw[i];
break;
default:
return -ENOTSUPP;
}
}
- if (total_ets_bw > 100)
+ if (total_ets_bw > 100) {
+ netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n");
return -EINVAL;
+ }
+ if (zero && total_ets_bw == 100) {
+ netdev_warn(bp->dev, "rejecting ETS config starving a TC\n");
+ return -EINVAL;
+ }
if (max_tc >= bp->max_tc)
*tc = bp->max_tc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 3fc6781c5b98..94d07a9f7034 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -368,6 +368,7 @@ struct cmd_nums {
#define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
#define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
#define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -531,8 +532,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 47
-#define HWRM_VERSION_STR "1.10.2.47"
+#define HWRM_VERSION_RSVD 52
+#define HWRM_VERSION_STR "1.10.2.52"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -585,6 +586,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -886,7 +888,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
@@ -1236,13 +1239,14 @@ struct hwrm_async_event_cmpl_error_report_base {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1446,6 +1450,8 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
#define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
#define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
@@ -1469,7 +1475,8 @@ struct hwrm_func_vf_cfg_input {
__le16 num_vnics;
__le16 num_stat_ctxs;
__le16 num_hw_ring_grps;
- u8 unused_0[4];
+ __le16 num_tx_key_ctxs;
+ __le16 num_rx_key_ctxs;
};
/* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1493,7 +1500,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:704b/88B) */
+/* hwrm_func_qcaps_output (size:768b/96B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1587,7 +1594,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
- u8 unused_1;
+ __le16 max_key_ctxs_alloc;
+ u8 unused_1[7];
u8 valid;
};
@@ -1602,7 +1610,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:832b/104B) */
+/* hwrm_func_qcfg_output (size:896b/112B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1749,11 +1757,13 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- u8 unused_3;
+ __le16 alloc_tx_key_ctxs;
+ __le16 alloc_rx_key_ctxs;
+ u8 unused_3[5];
u8 valid;
};
-/* hwrm_func_cfg_input (size:832b/104B) */
+/* hwrm_func_cfg_input (size:896b/112B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1820,6 +1830,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
#define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
#define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
__le16 admin_mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1929,6 +1941,9 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__be16 tpid;
__le16 host_mtu;
+ __le16 num_tx_key_ctxs;
+ __le16 num_rx_key_ctxs;
+ u8 unused_0[4];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2099,6 +2114,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
#define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -2268,7 +2284,7 @@ struct hwrm_func_resource_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+/* hwrm_func_resource_qcaps_output (size:512b/64B) */
struct hwrm_func_resource_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2300,11 +2316,15 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_tx_scheduler_inputs;
__le16 flags;
#define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_tx_key_ctxs;
+ __le16 max_tx_key_ctxs;
+ __le16 min_rx_key_ctxs;
+ __le16 max_rx_key_ctxs;
u8 unused_0[5];
u8 valid;
};
-/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
struct hwrm_func_vf_resource_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2331,6 +2351,10 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_hw_ring_grps;
__le16 flags;
#define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_tx_key_ctxs;
+ __le16 max_tx_key_ctxs;
+ __le16 min_rx_key_ctxs;
+ __le16 max_rx_key_ctxs;
u8 unused_0[2];
};
@@ -2348,7 +2372,9 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- u8 unused_0[7];
+ __le16 reserved_tx_key_ctxs;
+ __le16 reserved_rx_key_ctxs;
+ u8 unused_0[3];
u8 valid;
};
@@ -4220,7 +4246,7 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
-/* hwrm_port_ts_query_input (size:256b/32B) */
+/* hwrm_port_ts_query_input (size:320b/40B) */
struct hwrm_port_ts_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -4238,8 +4264,11 @@ struct hwrm_port_ts_query_input {
__le16 enables;
#define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
#define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
__le16 ts_req_timeout;
__le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
};
/* hwrm_port_ts_query_output (size:192b/24B) */
@@ -8172,6 +8201,7 @@ struct hwrm_fw_reset_input {
u8 host_idx;
u8 flags;
#define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
u8 unused_0[4];
};
@@ -8952,7 +8982,7 @@ struct hwrm_nvm_get_dir_info_output {
u8 valid;
};
-/* hwrm_nvm_write_input (size:384b/48B) */
+/* hwrm_nvm_write_input (size:448b/56B) */
struct hwrm_nvm_write_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8968,7 +8998,11 @@ struct hwrm_nvm_write_input {
__le16 option;
__le16 flags;
#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
__le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
__le32 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index f698b6bd4ff8..81f40ab748f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -20,7 +20,7 @@
#include "bnxt.h"
#include "bnxt_ptp.h"
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
{
unsigned int ptp_class;
struct ptp_header *hdr;
@@ -34,6 +34,7 @@ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
if (!hdr)
return -EINVAL;
+ *hdr_off = (u8 *)hdr - skb->data;
*seq_id = ntohs(hdr->sequence_id);
return 0;
default:
@@ -91,6 +92,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+ req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
}
mutex_lock(&bp->hwrm_cmd_lock);
@@ -353,6 +355,12 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
+ if (time_after_eq(now, ptp->next_overflow_check)) {
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
+ }
return HZ;
}
@@ -385,22 +393,6 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
return 0;
}
-void bnxt_ptp_start(struct bnxt *bp)
-{
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
-
- if (!ptp)
- return;
-
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- spin_lock_bh(&ptp->ptp_lock);
- ptp->current_time = bnxt_refclk_read(bp, NULL);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
- ptp_schedule_worker(ptp->ptp_clock, 0);
- }
-}
-
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -439,6 +431,7 @@ int bnxt_ptp_init(struct bnxt *bp)
ptp->cc.shift = 0;
ptp->cc.mult = 1;
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
ptp->ptp_info = bnxt_ptp_caps;
@@ -450,7 +443,13 @@ int bnxt_ptp_init(struct bnxt *bp)
bnxt_unmap_ptp_regs(bp);
return err;
}
-
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ spin_lock_bh(&ptp->ptp_lock);
+ ptp->current_time = bnxt_refclk_read(bp, NULL);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 6b6245750e20..524f1c272054 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -10,8 +10,8 @@
#ifndef BNXT_PTP_H
#define BNXT_PTP_H
-#define BNXT_PTP_GRC_WIN 5
-#define BNXT_PTP_GRC_WIN_BASE 0x5000
+#define BNXT_PTP_GRC_WIN 6
+#define BNXT_PTP_GRC_WIN_BASE 0x6000
#define BNXT_MAX_PHC_DRIFT 31000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
@@ -19,7 +19,8 @@
#define BNXT_PTP_QTS_TIMEOUT 1000
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
- PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
+ PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
@@ -32,7 +33,12 @@ struct bnxt_ptp_cfg {
u64 current_time;
u64 old_time;
unsigned long next_period;
+ unsigned long next_overflow_check;
+ /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
+ #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
+
u16 tx_seqid;
+ u16 tx_hdr_off;
struct bnxt *bp;
atomic_t tx_avail;
#define BNXT_MAX_TX_TS 1
@@ -70,12 +76,11 @@ do { \
((dst) = READ_ONCE(src))
#endif
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-void bnxt_ptp_start(struct bnxt *bp);
int bnxt_ptp_init(struct bnxt *bp);
void bnxt_ptp_clear(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index a918e374f3c5..187ff643ad2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
if (!edev)
return ERR_PTR(-ENOMEM);
edev->en_ops = &bnxt_en_ops_tbl;
- if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
- edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
- if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
- edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
bp->edev = edev;
}
+ edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
return bp->edev;
}
EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 41f7f078cd27..db74241935ab 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
+ EXT_ENERGY_DET_MASK);
if (GENET_IS_V5(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
@@ -3237,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
+ unsigned int i;
u32 reg;
u32 dma_ctrl;
/* disable DMA */
dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
@@ -3292,7 +3299,6 @@ static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned long dma_ctrl;
- u32 reg;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3318,12 +3324,6 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -4139,7 +4139,6 @@ static int bcmgenet_resume(struct device *d)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
unsigned long dma_ctrl;
- u32 reg;
int ret;
if (!netif_running(dev))
@@ -4176,12 +4175,6 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index facde824bcaa..e31a5a397f11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- if (priv->hw_params->flags & GENET_HAS_EXT) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg &= ~EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 4cddd628d41b..9ed3d1ab2ca5 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
- reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+ reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
/* for VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 9a2b166d651e..dbf9a0e6601d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
@@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_uld(adapter)) {
- detach_ulds(adapter);
- t4_uld_clean_up(adapter);
- }
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ unregister_netdev(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
- for_each_port(adapter, i)
- if (adapter->port[i]->reg_state == NETREG_REGISTERED)
- unregister_netdev(adapter->port[i]);
-
debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 743af9e654aa..17faac715882 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index f6ff1f76eacb..1876f15dd827 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
void __iomem *ioaddr;
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) return i;
pci_set_master(pdev);
@@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
if (!ioaddr)
- goto err_out_free_res;
+ goto err_out_netdev;
for (i = 0; i < 3; i++)
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
@@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_cleardev:
pci_iounmap(pdev, ioaddr);
-err_out_free_res:
- pci_release_regions(pdev);
err_out_netdev:
free_netdev (dev);
return -ENODEV;
@@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev)
if (dev) {
struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
- pci_release_regions(pdev);
pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index f3d12d0714fb..98cc0133c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
if (err)
return err;
- err = dpaa2_switch_seed_bp(ethsw);
- if (err)
- goto err_free_dpbp;
-
err = dpaa2_switch_alloc_rings(ethsw);
if (err)
- goto err_drain_dpbp;
+ goto err_free_dpbp;
err = dpaa2_switch_setup_dpio(ethsw);
if (err)
goto err_destroy_rings;
+ err = dpaa2_switch_seed_bp(ethsw);
+ if (err)
+ goto err_deregister_dpio;
+
err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err) {
dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
- goto err_deregister_dpio;
+ goto err_drain_dpbp;
}
return 0;
+err_drain_dpbp:
+ dpaa2_switch_drain_bp(ethsw);
err_deregister_dpio:
dpaa2_switch_free_dpio(ethsw);
err_destroy_rings:
dpaa2_switch_destroy_rings(ethsw);
-err_drain_dpbp:
- dpaa2_switch_drain_bp(ethsw);
err_free_dpbp:
dpaa2_switch_free_dpbp(ethsw);
@@ -3038,26 +3038,30 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
-static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+ dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ dpaa2_switch_free_dpio(ethsw);
+ dpaa2_switch_destroy_rings(ethsw);
+ dpaa2_switch_drain_bp(ethsw);
+ dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
+ dpaa2_switch_ctrl_if_teardown(ethsw);
+
+ destroy_workqueue(ethsw->workqueue);
+
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
-static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
-{
- dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
- dpaa2_switch_free_dpio(ethsw);
- dpaa2_switch_destroy_rings(ethsw);
- dpaa2_switch_drain_bp(ethsw);
- dpaa2_switch_free_dpbp(ethsw);
-}
-
static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
{
struct ethsw_port_priv *port_priv;
@@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
dev = &sw_dev->dev;
ethsw = dev_get_drvdata(dev);
- dpaa2_switch_ctrl_if_teardown(ethsw);
-
dpaa2_switch_teardown_irqs(sw_dev);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
kfree(ethsw->acls);
kfree(ethsw->ports);
- dpaa2_switch_takedown(sw_dev);
-
- destroy_workqueue(ethsw->workqueue);
+ dpaa2_switch_teardown(sw_dev);
fsl_mc_portal_free(ethsw->mc_io);
@@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
GFP_KERNEL);
if (!(ethsw->ports)) {
err = -ENOMEM;
- goto err_takedown;
+ goto err_teardown;
}
ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@@ -3270,8 +3270,8 @@ err_free_fdbs:
err_free_ports:
kfree(ethsw->ports);
-err_takedown:
- dpaa2_switch_takedown(sw_dev);
+err_teardown:
+ dpaa2_switch_teardown(sw_dev);
err_free_cmdport:
fsl_mc_portal_free(ethsw->mc_io);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8aea707a65a7..7e4c4980ced7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3843,13 +3843,13 @@ fec_drv_remove(struct platform_device *pdev)
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
- free_netdev(ndev);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 46ecb42f2ef8..d9fc5c456bf3 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -524,6 +524,7 @@ static void setup_memac(struct mac_device *mac_dev)
| SUPPORTED_Autoneg \
| SUPPORTED_Pause \
| SUPPORTED_Asym_Pause \
+ | SUPPORTED_FIBRE \
| SUPPORTED_MII)
static DEFINE_MUTEX(eth_lock);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 867e87af3432..099a2bc5ae67 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err)
- return -ENXIO;
+ return err;
err = pci_request_regions(pdev, "gvnic-cfg");
if (err)
@@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
goto abort_with_pci_region;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev,
- "Failed to set consistent dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
if (!reg_bar) {
dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
dev_err(&pdev->dev, "could not allocate netdev\n");
+ err = -ENOMEM;
goto abort_with_db_bar;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev);
if (err)
- goto abort_with_wq;
+ goto abort_with_gve_init;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
@@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_work(priv->gve_wq, &priv->service_task);
return 0;
+abort_with_gve_init:
+ gve_teardown_priv_resources(priv);
+
abort_with_wq:
destroy_workqueue(priv->gve_wq);
@@ -1590,7 +1587,7 @@ abort_with_pci_region:
abort_with_enabled:
pci_disable_device(pdev);
- return -ENXIO;
+ return err;
}
static void gve_remove(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 77bb8227f89b..8500621b2cd4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- /* Prefetch the payload header. */
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
-#if L1_CACHE_BYTES < 128
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
- L1_CACHE_BYTES);
-#endif
-
if (eop && buf_len <= priv->rx_copybreak) {
rx->skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 12f6c2442a7a..e53512f6878a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -131,7 +131,7 @@
/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
#define PPE_BUF_SIZE_SHIFT 6
#define PPE_TX_BUF_HOLD BIT(31)
-#define CACHE_LINE_MASK 0x3F
+#define SOC_CACHE_LINE_MASK 0x3F
#else
#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
@@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#if defined(CONFIG_HI13X1_GMAC)
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
| TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
- desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
- desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
+ desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
+ desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
#else
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
desc->send_addr = (__force u32)cpu_to_be32(phys);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 0a6cda309b24..aa86a81c8f4a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -98,6 +98,7 @@ struct hclgevf_mbx_resp_status {
u32 origin_mbx_msg;
bool received_resp;
int resp_status;
+ u16 match_id;
u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
};
@@ -143,7 +144,8 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 mbx_need_resp;
u8 rsv1[1];
u8 msg_len;
- u8 rsv2[3];
+ u8 rsv2;
+ u16 match_id;
struct hclge_vf_to_pf_msg msg;
};
@@ -153,7 +155,8 @@ struct hclge_mbx_pf_to_vf_cmd {
u8 dest_vfid;
u8 rsv[3];
u8 msg_len;
- u8 rsv1[3];
+ u8 rsv1;
+ u16 match_id;
struct hclge_pf_to_vf_msg msg;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index dd3354a57c62..ebeaf12e409b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9552,13 +9552,17 @@ static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
if (ret)
return ret;
- if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
+ if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
!enable);
- else if (!vport->vport_id)
+ } else if (!vport->vport_id) {
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ enable = false;
+
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
HCLGE_FILTER_FE_INGRESS,
enable, 0);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index e10a2c36b706..c0a478ae9583 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+ resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 3b1f84502e36..befa9bcc2f2f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -5,9 +5,27 @@
#include "hclge_main.h"
#include "hnae3.h"
+static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
+ HCLGE_PTP_CYCLE_QUO_MASK;
+ ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+
+ if (ptp->cycle.den == 0) {
+ dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
u64 adj_val, adj_base, diff;
unsigned long flags;
bool is_neg = false;
@@ -18,7 +36,7 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
is_neg = true;
}
- adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
+ adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
adj_val = adj_base * ppb;
diff = div_u64(adj_val, 1000000000ULL);
@@ -29,16 +47,16 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/* This clock cycle is defined by three part: quotient, numerator
* and denominator. For example, 2.5ns, the quotient is 2,
- * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
- * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
+ * denominator is fixed to ptp->cycle.den, and numerator
+ * is 0.5 * ptp->cycle.den.
*/
- quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
+ quo = div_u64_rem(adj_val, cycle->den, &numerator);
spin_lock_irqsave(&hdev->ptp->lock, flags);
- writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+ writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
- writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
- hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+ writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
writel(HCLGE_PTP_CYCLE_ADJ_EN,
hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
spin_unlock_irqrestore(&hdev->ptp->lock, flags);
@@ -475,6 +493,10 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_create_clock(hdev);
if (ret)
return ret;
+
+ ret = hclge_ptp_get_cycle(hdev);
+ if (ret)
+ return ret;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 5a202b775471..dbf5f4c08019 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -29,6 +29,7 @@
#define HCLGE_PTP_TIME_ADJ_REG 0x60
#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
#define HCLGE_PTP_CYCLE_QUO_REG 0x64
+#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0)
#define HCLGE_PTP_CYCLE_DEN_REG 0x68
#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
#define HCLGE_PTP_CYCLE_CFG_REG 0x70
@@ -37,9 +38,7 @@
#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
-#define HCLGE_PTP_CYCLE_ADJ_BASE 2
#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
-#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000
#define HCLGE_PTP_SEC_H_OFFSET 32u
#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
@@ -47,6 +46,12 @@
#define HCLGE_PTP_FLAG_TX_EN 1
#define HCLGE_PTP_FLAG_RX_EN 2
+struct hclge_ptp_cycle {
+ u32 quo;
+ u32 numer;
+ u32 den;
+};
+
struct hclge_ptp {
struct hclge_dev *hdev;
struct ptp_clock *clock;
@@ -58,6 +63,7 @@ struct hclge_ptp {
spinlock_t lock; /* protects ptp registers */
u32 ptp_cfg;
u32 last_tx_seqid;
+ struct hclge_ptp_cycle cycle;
unsigned long tx_start;
unsigned long tx_cnt;
unsigned long tx_skipped;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 52eaf82b7cd7..8784d61e833f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2641,6 +2641,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to enable rx vlan offload, ret = %d\n", ret);
+ return ret;
+ }
+
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 9b17735b9f4c..772b2f8acd2e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -13,6 +13,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
return resp_code ? -resp_code : 0;
}
+#define HCLGEVF_MBX_MATCH_ID_START 1
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
@@ -21,6 +22,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
hdev->mbx_resp.resp_status = 0;
+ hdev->mbx_resp.match_id++;
+ /* Update match_id and ensure the value of match_id is not zero */
+ if (hdev->mbx_resp.match_id == 0)
+ hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START;
memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
}
@@ -115,6 +120,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex);
hclgevf_reset_mbx_resp_status(hdev);
+ req->match_id = hdev->mbx_resp.match_id;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) {
dev_err(&hdev->pdev->dev,
@@ -211,6 +217,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
resp->additional_info[i] = *temp;
temp++;
}
+
+ /* If match_id is not zero, it means PF support
+ * match_id. If the match_id is right, VF get the
+ * right response, otherwise ignore the response.
+ * Driver will clear hdev->mbx_resp when send
+ * next message which need response.
+ */
+ if (req->match_id) {
+ if (req->match_id == resp->match_id)
+ resp->received_resp = true;
+ } else {
+ resp->received_resp = true;
+ }
break;
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 374a75d4faea..a775c69e4fd7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1731,7 +1731,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1753,6 +1752,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
ret = NETDEV_TX_OK;
goto out;
}
@@ -2420,9 +2420,10 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
- struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
bool saved_state = false;
+ struct ibmvnic_rwi *tmprwi;
+ struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
int rc = 0;
@@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work)
} else {
rc = do_reset(adapter, rwi, reset_state);
}
- kfree(rwi);
+ tmprwi = rwi;
adapter->last_reset_time = jiffies;
if (rc)
@@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
+ /*
+ * If there is another reset queued, free the previous rwi
+ * and process the new reset even if previous reset failed
+ * (the previous reset could have failed because of a fail
+ * over for instance, so process the fail over).
+ *
+ * If there are no resets queued and the previous reset failed,
+ * the adapter would be in an undefined state. So retry the
+ * previous reset as a hard reset.
+ */
+ if (rwi)
+ kfree(tmprwi);
+ else if (rc)
+ rwi = tmprwi;
+
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
- rwi->reset_reason == VNIC_RESET_MOBILITY))
+ rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
adapter->force_reset_recovery = true;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d150dade06cf..757a54c39eef 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7664,6 +7664,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index dbcae92bb18d..adfa2768f024 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2227,6 +2227,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 3e822bad4851..2c9e4eeb7270 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
default:
/* if we got here and link is up something bad is afoot */
netdev_info(netdev,
- "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+ "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
hw_link_info->phy_type);
}
@@ -5294,6 +5294,10 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
+ case I40E_AQ_RC_EAGAIN:
+ dev_warn(&pf->pdev->dev,
+ "Stop FW LLDP agent command is still being processed, please try again in a second.\n");
+ return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %s, %s\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 861e59a350bd..1d1f52756a93 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4454,11 +4454,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
}
/**
- * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * i40e_vsi_enable_tx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
@@ -4467,7 +4466,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q,
- false /*is xdp*/, enable);
+ false /*is xdp*/, true);
if (ret)
break;
@@ -4476,7 +4475,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q + vsi->alloc_queue_pairs,
- true /*is xdp*/, enable);
+ true /*is xdp*/, true);
if (ret)
break;
}
@@ -4574,32 +4573,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
- * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * i40e_vsi_enable_rx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- ret = i40e_control_wait_rx_q(pf, pf_q, enable);
+ ret = i40e_control_wait_rx_q(pf, pf_q, true);
if (ret) {
dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ "VSI seid %d Rx ring %d enable timeout\n",
+ vsi->seid, pf_q);
break;
}
}
- /* Due to HW errata, on Rx disable only, the register can indicate done
- * before it really is. Needs 50ms to be sure
- */
- if (!enable)
- mdelay(50);
-
return ret;
}
@@ -4612,29 +4604,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
int ret = 0;
/* do rx first for enable and last for disable */
- ret = i40e_vsi_control_rx(vsi, true);
+ ret = i40e_vsi_enable_rx(vsi);
if (ret)
return ret;
- ret = i40e_vsi_control_tx(vsi, true);
+ ret = i40e_vsi_enable_tx(vsi);
return ret;
}
+#define I40E_DISABLE_TX_GAP_MSEC 50
+
/**
* i40e_vsi_stop_rings - Stop a VSI's rings
* @vsi: the VSI being configured
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
+ struct i40e_pf *pf = vsi->back;
+ int pf_q, err, q_end;
+
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- /* do rx first for enable and last for disable
- * Ignore return value, we need to shutdown whatever we can
- */
- i40e_vsi_control_tx(vsi, false);
- i40e_vsi_control_rx(vsi, false);
+ q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
+ err = i40e_control_wait_rx_q(pf, pf_q, false);
+ if (err)
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d dissable timeout\n",
+ vsi->seid, pf_q);
+ }
+
+ msleep(I40E_DISABLE_TX_GAP_MSEC);
+ pf_q = vsi->base_queue;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
+
+ i40e_vsi_wait_queues_disabled(vsi);
}
/**
@@ -7280,6 +7290,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
}
if (vsi->num_queue_pairs <
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to create traffic channel, insufficient number of queues.\n");
return -EINVAL;
}
if (sum_max_rate > i40e_get_link_speed(vsi)) {
@@ -13261,6 +13273,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
+ .ndo_select_queue = i40e_lan_select_queue,
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 38eb8151ee9a..10a83e5385c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3631,6 +3631,55 @@ dma_error:
return -1;
}
+static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 num_tx_queues)
+{
+ u32 jhash_initval_salt = 0xd631614b;
+ u32 hash;
+
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16)skb->protocol ^ skb->hash;
+
+ hash = jhash_1word(hash, jhash_initval_salt);
+
+ return (u16)(((u64)hash * num_tx_queues) >> 32);
+}
+
+u16 i40e_lan_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device __always_unused *sb_dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw;
+ u16 qoffset;
+ u16 qcount;
+ u8 tclass;
+ u16 hash;
+ u8 prio;
+
+ /* is DCB enabled at all? */
+ if (vsi->tc_config.numtc == 1)
+ return netdev_pick_tx(netdev, skb, sb_dev);
+
+ prio = skb->priority;
+ hw = &vsi->back->hw;
+ tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
+ /* sanity check */
+ if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
+ tclass = 0;
+
+ /* select a queue assigned for the given TC */
+ qcount = vsi->tc_config.tc_info[tclass].qcount;
+ hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
+
+ qoffset = vsi->tc_config.tc_info[tclass].qoffset;
+ return qoffset + hash;
+}
+
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
* @xdpf: data to transmit
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 86fed05b4f19..bfc2845c99d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -451,6 +451,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e8bd04100ecd..90793b36126e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -136,6 +136,7 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
+ bool is_new_mac; /* filter is new, wait for PF decision */
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e612c24fa384..606a01ce4073 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->is_new_mac = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -1506,11 +1507,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_map_rings_to_vectors(adapter);
-
- if (RSS_AQ(adapter))
- adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
- else
- err = iavf_init_rss(adapter);
err:
return err;
}
@@ -2200,6 +2196,14 @@ continue_reset:
goto reset_err;
}
+ if (RSS_AQ(adapter)) {
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+ } else {
+ err = iavf_init_rss(adapter);
+ if (err)
+ goto reset_err;
+ }
+
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
@@ -3798,6 +3802,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 0eab3c43bdc5..3c735968e1b8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -541,6 +541,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
}
/**
+ * iavf_mac_add_ok
+ * @adapter: adapter structure
+ *
+ * Submit list of filters based on PF response.
+ **/
+static void iavf_mac_add_ok(struct iavf_adapter *adapter)
+{
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ f->is_new_mac = false;
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_mac_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove filters from list based on PF response.
+ **/
+static void iavf_mac_add_reject(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
+ f->remove = false;
+
+ if (f->is_new_mac) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
+ iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break;
@@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
switch (v_opcode) {
- case VIRTCHNL_OP_ADD_ETH_ADDR: {
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ if (!v_retval)
+ iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
- }
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a450343fbb92..eadcb9958346 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -234,6 +234,7 @@ enum ice_pf_state {
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
ICE_VF_DIS,
+ ICE_VF_DEINIT_IN_PROGRESS,
ICE_CFG_BUSY,
ICE_SERVICE_SCHED,
ICE_SERVICE_DIS,
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index ef8d1815af56..fe2ded775f25 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -191,6 +191,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ /* Under some circumstances, we might receive a request to delete our
+ * own device address from our uc list. Because we store the device
+ * address in the VSI's MAC filter list, we need to ignore such
+ * requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
ICE_FWD_TO_VSI))
return -EINVAL;
@@ -4194,6 +4202,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct ice_hw *hw;
int i, err;
+ if (pdev->is_virtfn) {
+ dev_err(dev, "can't probe a virtual function\n");
+ return -EINVAL;
+ }
+
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
*/
@@ -5119,7 +5132,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, mac)) {
- netdev_warn(netdev, "already using mac %pM\n", mac);
+ netdev_dbg(netdev, "already using mac %pM\n", mac);
return 0;
}
@@ -5130,6 +5143,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
+ netif_addr_lock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
@@ -5139,30 +5153,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ if (status == ICE_ERR_ALREADY_EXISTS)
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- return 0;
- }
-
- /* error if the new filter addition failed */
- if (status)
+ else if (status)
+ /* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
+ netif_addr_unlock_bh(netdev);
return err;
}
/* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ netif_addr_unlock_bh(netdev);
netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
netdev->dev_addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 5d5207b56ca9..9e3ddb9b8b51 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
* maintaining phase
*/
if (start_time < current_time)
- start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+ start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase;
start_time -= E810_OUT_PROP_DELAY_NS;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 2826570dab51..e93430ab37f1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
unsigned int tmp, i;
+ set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
+
if (!pf->vf)
return;
@@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf)
i);
clear_bit(ICE_VF_DIS, pf->state);
+ clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
@@ -4415,6 +4418,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
struct device *dev;
int err = 0;
+ /* if de-init is underway, don't process messages from VF */
+ if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
+ return;
+
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id)) {
err = -EINVAL;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 7e6435dc7e80..171a7a629b20 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0;
@@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
**/
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
- struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
+ ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
@@ -3615,6 +3623,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 9e0bbb2e55e3..5901ed9fb545 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
- return 0;
+ return -EOPNOTSUPP;
}
void igc_reinit_locked(struct igc_adapter *);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 95323095094d..e29aadbc6744 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@@ -6054,6 +6056,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ffff69efd78a..14aea40da50f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
if (ring_uses_build_skb(rx_ring)) {
- unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+ unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
+ unsigned long offset = (unsigned long)(skb->data) & mask;
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
@@ -11067,6 +11068,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 96dd1a4f956a..b1d22e4d5ec9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
/* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
- if (err)
+ if (err) {
+ clear_bit(qid, adapter->af_xdp_zc_qps);
+ xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index caaea2c920a6..e3e4676af9e4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
**/
static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
int ret;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
xs->id.proto);
@@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
**/
static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 361bc4fbe20b..76a7777c746d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
+
+ /* last fragment */
+ if (len == *size) {
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ sinfo->nr_frags = xdp_sinfo->nr_frags;
+ memcpy(sinfo->frags, xdp_sinfo->frags,
+ sinfo->nr_frags * sizeof(skb_frag_t));
+ }
*size -= len;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index b9fbc9f000f2..cf8acabb90ac 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_BM_COOKIE_POOL_OFFS 8
#define MVPP2_BM_COOKIE_CPU_OFFS 24
-#define MVPP2_BM_SHORT_FRAME_SIZE 704 /* frame size 128 */
+#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */
#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */
#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */
/* BM short pool packet size
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1a3455620b38..cc8ac36cf687 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
- rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
+ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index fac6474ad694..544c96c8fe1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
return test_bit(lmac_id, &cgx->lmac_bmap);
}
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
struct mac_ops *get_mac_ops(void *cgxd)
{
if (!cgxd)
@@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index, id;
u64 cfg;
+ /* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
+
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
- cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index;
u64 cfg;
+ int id;
mac_ops = cgx_dev->mac_ops;
- cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ int index, i;
u64 cfg = 0;
+ int id;
if (!cgx)
return;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 +
+ index * 0x8),
+ cfg);
+ }
+ }
}
}
@@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx)
}
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ return err;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1243,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */
cgx->lmac_idmap[lmac->lmac_id] = lmac;
- cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
continue;
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 12521262164a..237ba2b56210 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -23,6 +23,7 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
@@ -46,10 +47,12 @@
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
@@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
unsigned long cgx_get_lmac_bmap(void *cgxd);
void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 45706fd87120..a8b7b1c7a1d5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -10,17 +10,19 @@
#include "rvu.h"
#include "cgx.h"
/**
- * struct lmac
+ * struct lmac - per lmac locks and properties
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
* @cgx: parent cgx port
+ * @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
* @name: lmac port name
*/
struct lmac {
@@ -29,12 +31,14 @@ struct lmac {
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
- bool cmd_pend;
struct cgx *cgx;
+ u8 mcast_filters_count;
u8 lmac_id;
+ bool cmd_pend;
char *name;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 770d86262838..f5ec39de026a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
- /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
@@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+};
+
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
#define RVU_MAC_VERSION BIT_ULL(2)
@@ -1278,6 +1326,14 @@ struct set_vf_perm {
u64 flags;
};
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 19bad9a59c8f..243cf8070e77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -151,7 +151,10 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+
enum npc_pkind_type {
+ NPC_RX_LBK_PKIND = 0ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
NPC_RX_CPT_HDR_PKIND,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 0b092949d7ac..5fe277e354f7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -391,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
/* Get numVFs attached to this PF and first HWVF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
- *numvfs = (cfg >> 12) & 0xFF;
- *hwvf = cfg & 0xFFF;
+ if (numvfs)
+ *numvfs = (cfg >> 12) & 0xFF;
+ if (hwvf)
+ *hwvf = cfg & 0xFFF;
}
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
@@ -1314,7 +1316,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
-static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr = BLKADDR_NIX0, vf;
@@ -2333,6 +2335,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
@@ -2858,6 +2861,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (!vfs)
return 0;
+ /* LBK channel number 63 is used for switching packets between
+ * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+ */
+ if (vfs > 62)
+ vfs = 62;
+
/* Save VFs number for reference in VF interrupts handlers.
* Since interrupts might start arriving during SRIOV enablement
* ordinary API cannot be used to get number of enabled VFs.
@@ -3000,6 +3009,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initialize debugfs */
rvu_dbg_init(rvu);
+ mutex_init(&rvu->rswitch.switch_lock);
+
return 0;
err_dl:
rvu_unregister_dl(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 9e5d9ba6f01e..91503fb2762c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -243,6 +243,7 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
unsigned long flags;
};
@@ -414,6 +415,16 @@ struct npc_kpu_profile_adapter {
size_t kpus;
};
+#define RVU_SWITCH_LBK_CHAN 63
+
+struct rvu_switch {
+ struct mutex switch_lock; /* Serialize flow installation */
+ u32 used_entries;
+ u16 *entry2pcifunc;
+ u16 mode;
+ u16 start_entry;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -444,6 +455,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
@@ -476,6 +488,9 @@ struct rvu {
struct rvu_debugfs rvu_dbg;
#endif
struct rvu_devlink *rvu_dl;
+
+ /* RVU switch implementation over NPC with DMAC rules */
+ struct rvu_switch rswitch;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -656,6 +671,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@@ -688,6 +705,7 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_cn10k_aq_enq_req *aq_req,
struct nix_cn10k_aq_enq_rsp *aq_rsp,
u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -741,6 +759,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
@@ -754,6 +773,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);
@@ -761,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu);
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 6e2bf4fcd29c..fe99ac4a4dd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
unsigned long lmac_bmap;
int size, free_pkind;
int cgx, lmac, iter;
+ int numvfs, hwvfs;
if (!cgx_cnt_max)
return 0;
@@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
pf++;
}
}
@@ -454,6 +457,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -557,6 +585,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp
+ *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@@ -953,3 +1038,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
return 0;
}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7d9e71c6965f..8d48b64485c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -10,6 +10,206 @@
#include "cgx.h"
#include "rvu_reg.h"
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ return err;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ return -EIO;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+
+ return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ u64 lmt_addr, val;
+ u32 pri_tbl_idx;
+ int err = 0;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ return err;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+}
+
int rvu_set_channels_base(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 3cc3c6fd1d84..9b2dfbf90e51 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
}
@@ -2041,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
int entry_acnt, entry_ecnt;
int cntr_acnt, cntr_ecnt;
- /* Skip PF0 */
- if (!pcifunc)
- return;
rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
&entry_acnt, &entry_ecnt);
rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -2226,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
- if (rule->intf == NIX_INTF_TX) {
+ if (is_npc_intf_tx(rule->intf)) {
switch (rule->tx_action.op) {
case NIX_TX_ACTIONOP_DROP:
seq_puts(s, "\taction: Drop\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 10a98bcb7c54..2688186066d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_nix_health_reporters_destroy(rvu_dl);
}
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ *mode = rswitch->mode;
+
+ return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (rswitch->mode == mode)
+ return 0;
+ rswitch->mode = mode;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
@@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req
static const struct devlink_ops rvu_devlink_ops = {
.info_get = rvu_devlink_info_get,
+ .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+ .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
};
int rvu_register_dl(struct rvu *rvu)
@@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu)
struct devlink *dl;
int err;
- rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL);
- if (!rvu_dl)
- return -ENOMEM;
-
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
if (!dl) {
dev_warn(rvu->dev, "devlink_alloc failed\n");
- kfree(rvu_dl);
return -ENOMEM;
}
@@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu)
if (err) {
dev_err(rvu->dev, "devlink register failed with error %d\n", err);
devlink_free(dl);
- kfree(rvu_dl);
return err;
}
+ rvu_dl = devlink_priv(dl);
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
rvu->rvu_dl = rvu_dl;
@@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu)
rvu_health_reporters_destroy(rvu);
devlink_unregister(dl);
devlink_free(dl);
- kfree(rvu_dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d6f8210652c5..4bfbbdf38770 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -196,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
- /*Sync all in flight RX packets to LLC/DRAM */
+ /* Sync all in flight RX packets to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
- dev_err(rvu->dev, "NIX RX software sync failed\n");
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -298,6 +309,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
@@ -346,6 +358,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
/* Free and disable any MCAM entries used by this NIX LF */
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
@@ -1949,6 +1964,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, struct nix_txsch *txsch)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lbk_link_start, lbk_links;
+ u8 pf = rvu_get_pf(pcifunc);
+ int schq;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ lbk_link_start = hw->cgx_links;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ /* Enable all LBK links with channel 63 by default so that
+ * packets can be sent to LBK with a NPC TX MCAM rule
+ */
+ lbk_links = hw->lbk_links;
+ while (lbk_links--)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ lbk_link_start +
+ lbk_links),
+ BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+ }
+}
+
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
@@ -2037,6 +2081,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_write64(rvu, blkaddr, reg, regval);
}
+ rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+ &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+
return 0;
}
@@ -3177,6 +3224,8 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
ether_addr_copy(pfvf->default_mac, req->mac_addr);
+ rvu_switch_update_rules(rvu, pcifunc);
+
return 0;
}
@@ -3805,7 +3854,6 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
vlan = &nix_hw->txvlan;
kfree(vlan->rsrc.bmap);
mutex_destroy(&vlan->rsrc_lock);
- devm_kfree(rvu->dev, vlan->entry2pfvf_map);
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
@@ -3846,6 +3894,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ rvu_switch_update_rules(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 3612e0a2cab3..52b255426c22 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -442,7 +442,8 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
- if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
+ if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ (owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@@ -468,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
{
int bank = npc_get_bank(mcam, index);
int kw = 0, actbank, actindex;
+ u8 tx_intf_mask = ~intf & 0x3;
+ u8 tx_intf = intf;
u64 cam0, cam1;
actbank = bank; /* Save bank id, to set action later on */
@@ -488,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
*/
for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
/* Interface should be set in all banks */
+ if (is_npc_intf_tx(intf)) {
+ /* Last bit must be set and rest don't care
+ * for TX interfaces
+ */
+ tx_intf_mask = 0x1;
+ tx_intf = intf & tx_intf_mask;
+ tx_intf_mask = ~tx_intf & tx_intf_mask;
+ }
+
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
- intf);
+ tx_intf);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
- ~intf & 0x3);
+ tx_intf_mask);
/* Set the match key */
npc_get_keyword(entry, kw, &cam0, &cam1);
@@ -650,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
eth_broadcast_addr((u8 *)&req.mask.dmac);
req.features = BIT_ULL(NPC_DMAC);
req.channel = chan;
+ req.chan_mask = 0xFFFU;
req.intf = pfvf->nix_rx_intf;
req.op = action.op;
req.hdr.pcifunc = 0; /* AF is requester */
@@ -799,6 +812,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
eth_broadcast_addr((u8 *)&req.mask.dmac);
req.features = BIT_ULL(NPC_DMAC);
req.channel = chan;
+ req.chan_mask = 0xFFFU;
req.intf = pfvf->nix_rx_intf;
req.entry = index;
req.hdr.pcifunc = 0; /* AF is requester */
@@ -1707,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int num_pkinds, num_kpus, idx;
- struct npc_pkind *pkind;
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -1725,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
- pkind = &hw->pkind;
num_pkinds = rvu->kpu.pkinds;
- num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1745,6 +1757,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
int rsvd, err;
+ u16 index;
+ int cntr;
u64 cfg;
/* Actual number of MCAM entries vary by entry size */
@@ -1845,6 +1859,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (!mcam->entry2target_pffunc)
goto free_mem;
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++)
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
mutex_init(&mcam->lock);
return 0;
@@ -1867,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
if (npc_const1 & BIT_ULL(63))
npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
- pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
hw->npc_intfs = npc_const & 0xFULL;
@@ -1978,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu)
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
/* Allocate mem for pkind to PF and channel mapping info */
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
@@ -2562,7 +2589,7 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
}
/* Alloc request from PFFUNC with no NIXLF attached should be denied */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_ALLOC_DENIED;
return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
@@ -2582,7 +2609,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* Free request from PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
@@ -2594,7 +2621,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
if (rc)
goto exit;
- mcam->entry2pfvf_map[req->entry] = 0;
+ mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
mcam->entry2target_pffunc[req->entry] = 0x0;
npc_mcam_clear_bit(mcam, req->entry);
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -2679,13 +2706,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- pcifunc)) {
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
@@ -2836,7 +2864,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* If the request is from a PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Since list of allocated counter IDs needs to be sent to requester,
@@ -3081,7 +3109,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (rc) {
/* Free allocated MCAM entry */
mutex_lock(&mcam->lock);
- mcam->entry2pfvf_map[entry] = 0;
+ mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
npc_mcam_clear_bit(mcam, entry);
mutex_unlock(&mcam->lock);
return rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 68633145a8b8..5c01cf4a9c5b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -910,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
- struct npc_install_flow_req *req, u16 target)
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
{
+ struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action;
- u64 chan_mask;
- chan_mask = req->chan_mask ? req->chan_mask : ~0ULL;
- npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0,
- NIX_INTF_RX);
+ if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+ req->chan_mask = 0x0; /* Do not care channel */
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+ 0, NIX_INTF_RX);
*(u64 *)&action = 0x00;
action.pf_func = target;
@@ -949,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct npc_install_flow_req *req, u16 target)
{
struct nix_tx_action action;
+ u64 mask = ~0ULL;
+
+ /* If AF is installing then do not care about
+ * PF_FUNC in Send Descriptor
+ */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ mask = 0;
npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
- 0, ~0ULL, 0, NIX_INTF_TX);
+ 0, mask, 0, NIX_INTF_TX);
*(u64 *)&action = 0x00;
action.op = req->op;
@@ -1002,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
req->intf);
if (is_npc_intf_rx(req->intf))
- npc_update_rx_entry(rvu, pfvf, entry, req, target);
+ npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
else
npc_update_tx_entry(rvu, pfvf, entry, req, target);
@@ -1164,7 +1174,9 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (err)
return err;
- if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+ /* Skip channel validation if AF is installing */
+ if (!is_pffunc_af(req->hdr.pcifunc) &&
+ npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, target);
@@ -1180,6 +1192,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
eth_broadcast_addr((u8 *)&req->mask.dmac);
}
+ /* Proceed if NIXLF is attached or not for TX rules */
err = nix_get_nixlf(rvu, target, &nixlf, NULL);
if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 76837d5e19c6..8b01ef6e2c99 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -49,6 +49,11 @@
#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT1 (0x6030)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -692,4 +697,9 @@
#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 14aa8e37ea41..5bbe6727d11d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -35,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NPA0 = 0xeULL,
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
- BLK_COUNT = 0x12ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644
index 000000000000..820adf390b8e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 chan_mask)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = pfvf->rx_chan_base;
+ req.chan_mask = chan_mask;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ req.default_rule = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.entry = entry;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+ * address and NIX RX and TX interfaces for a pcifunc.
+ * Generally it is called during attach call of a pcifunc but it
+ * is called here since we are pre-installing rules before
+ * nixlfs are attached
+ */
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* MCAM RX rule for a PF/VF already exists as default unicast
+ * rules installed by AF. Hence change the channel in those
+ * rules to ignore channel so that packets with the required
+ * DMAC received from LBK(by other PF/VFs in system) or from
+ * external world (from wire) are accepted.
+ */
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+ if (err) {
+ dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev,
+ "RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc,
+ start + entry);
+ if (err) {
+ dev_err(rvu->dev,
+ "TX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ }
+ }
+
+ return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+ int ret;
+
+ alloc_req.contig = true;
+ alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (ret) {
+ dev_err(rvu->dev,
+ "Unable to allocate MCAM entries\n");
+ goto exit;
+ }
+
+ if (alloc_rsp.count != alloc_req.count) {
+ dev_err(rvu->dev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ alloc_req.count, alloc_rsp.count);
+ goto free_entries;
+ }
+
+ rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+ GFP_KERNEL);
+ if (!rswitch->entry2pcifunc)
+ goto free_entries;
+
+ rswitch->used_entries = alloc_rsp.count;
+ rswitch->start_entry = alloc_rsp.entry;
+
+ ret = rvu_switch_install_rules(rvu);
+ if (ret)
+ goto uninstall_rules;
+
+ return;
+
+uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ kfree(rswitch->entry2pcifunc);
+free_entries:
+ free_req.all = 1;
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+ return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs;
+ struct msg_rsp rsp;
+ u16 pcifunc;
+ int err;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%d failed(%d)\n",
+ pf, err);
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ }
+ }
+
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u32 max = rswitch->used_entries;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ break;
+ }
+
+ if (entry >= max)
+ return;
+
+ rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+ rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 457c94793e63..3254b02205ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
+ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 1b08896b46d2..184de9466286 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
};
-int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
- int size, num_lines;
- u64 base;
- if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- pf->hw_ops = &otx2_hw_ops;
+ struct lmtst_tbl_setup_req *req;
+ int qcount, err;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
return 0;
}
- pf->hw_ops = &cn10k_hw_ops;
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- pf->hw.lmt_base = ioremap(base, size);
+ pfvf->hw_ops = &cn10k_hw_ops;
+ qcount = pfvf->hw.max_queues;
+ /* LMTST lines allocation
+ * qcount = num_online_cpus();
+ * NPA = TX + RX + XDP.
+ * NIX = TX * 32 (For Burst SQE flush).
+ */
+ pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
+ pfvf->npa_lmt_lines = qcount * 3;
+ pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
- if (!pf->hw.lmt_base) {
- dev_err(pf->dev, "Unable to map PF LMTST region\n");
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* FIXME: Get the num of LMTST lines from LMT table */
- pf->tot_lmt_lines = size / LMT_LINE_SIZE;
- num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
- pf->hw.tx_queues;
- /* Number of LMT lines per SQ queues */
- pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
-
- pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
- return 0;
-}
+ req->use_local_lmt_region = true;
-int cn10k_vf_lmtst_init(struct otx2_nic *vf)
-{
- int size, num_lines;
-
- if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
- vf->hw_ops = &otx2_hw_ops;
- return 0;
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
}
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
- vf->hw_ops = &cn10k_hw_ops;
- size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
- vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
- PCI_MBOX_BAR_NUM),
- size);
- if (!vf->hw.lmt_base) {
- dev_err(vf->dev, "Unable to map VF LMTST region\n");
- return -ENOMEM;
- }
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
- vf->tot_lmt_lines = size / LMT_LINE_SIZE;
- /* LMTST lines per SQ */
- num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
- vf->hw.tx_queues;
- vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
- vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
-EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
@@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
struct otx2_snd_queue *sq;
sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+ sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
(qidx * pfvf->nix_lmt_size));
+ sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
+
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
@@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
- struct otx2_nic *pfvf = dev;
- int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (lmt_id & 0x7FF);
+ val = (sq->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index 71292a4cf1f3..1a1ae334477d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -12,8 +12,7 @@
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_pf_lmtst_init(struct otx2_nic *pf);
-int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index cf7875d51d87..70fcc1fd962f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
/* update dmac field in vlan offload rule */
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
} else {
return -EPERM;
}
@@ -921,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
- /* Enable receive CQ backpressure */
- aq->cq.bp_ena = 1;
- aq->cq.bpid = pfvf->bpid[0];
+ if (!is_otx2_lbkvf(pfvf->pdev)) {
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
- /* Set backpressure level is same as cq pass level */
- aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ }
}
/* Fill AQ info */
@@ -1183,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
/* Enable backpressure for RQ aura */
- if (aura_id < pfvf->hw.rqpool_cnt) {
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
aq->aura.bp_ena = 0;
aq->aura.nix0_bpid = pfvf->bpid[0];
/* Set backpressure level for RQ's Aura */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 234b330f3183..8fd58cd07f50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -218,8 +218,8 @@ struct otx2_hw {
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
-#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
- void __iomem *lmt_base;
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
u64 *npa_lmt_base;
u64 *nix_lmt_base;
};
@@ -288,6 +288,9 @@ struct otx2_flow_config {
u16 tc_flower_offset;
u16 ntuple_max_flows;
u16 tc_max_flows;
+ u8 dmacflt_max_flows;
+ u8 *bmap_to_dmacindex;
+ unsigned long dmacflt_bmap;
struct list_head flow_list;
};
@@ -329,6 +332,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
struct otx2_qset qset;
@@ -363,8 +367,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
/* LMTST Lines info */
+ struct qmem *dync_lmt;
u16 tot_lmt_lines;
- u16 nix_lmt_lines;
+ u16 npa_lmt_lines;
u32 nix_lmt_size;
struct otx2_ptp *ptp;
@@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000000..383a6b5cb698
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u8 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+ u8 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u8 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u8 bit_pos)
+{
+ u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 8df748e0677b..b906a0eb6e0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -298,15 +298,14 @@ static int otx2_set_channels(struct net_device *dev,
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
if (err)
- goto fail;
+ return err;
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
-fail:
if (if_up)
- dev->netdev_ops->ndo_open(dev);
+ err = dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -410,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->rqe_cnt = rx_count;
if (if_up)
- netdev->netdev_ops->ndo_open(netdev);
+ return netdev->netdev_ops->ndo_open(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 8c97106bdd1c..4d9de525802d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,6 +18,12 @@ struct otx2_flow {
bool is_vf;
u8 rss_ctx_id;
int vf;
+ bool dmac_filter;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
};
static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
@@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->mac_table)
return -ENOMEM;
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
return 0;
}
@@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
return otx2_do_add_macfilter(pf, mac);
}
@@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
+static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->ntuple_max_flows;
+}
+
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
{
struct otx2_flow *iter;
- if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
return -EINVAL;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
int idx = 0;
int err = 0;
- nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
while ((!err || err == -ENOENT) && idx < rule_cnt) {
err = otx2_get_flow(pfvf, nfc, location);
if (!err)
@@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return 0;
}
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
@@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct otx2_flow *pf_mac;
+ struct ethhdr *eth_hdr;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->dmac_filter = true;
+ pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
bool new = false;
+ int err = 0;
u32 ring;
- int err;
ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (fsp->location >= flow_cfg->ntuple_max_flows)
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, fsp->location);
if (!flow) {
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
- flow->entry = flow_cfg->flow_ent[flow->location];
new = true;
}
/* struct copy */
@@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (fsp->flow_type & FLOW_RSS)
flow->rss_ctx_id = nfc->rss_context;
- err = otx2_add_flow_msg(pfvf, flow);
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->dmac_filter)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->dmac_filter = true;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->ntuple_max_flows - 1);
+ err = -EINVAL;
+ } else {
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
if (err) {
if (new)
kfree(flow);
@@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
return err;
}
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_flow *flow;
int err;
- if (location >= flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, location);
if (!flow)
return -ENOENT;
- err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (flow->dmac_filter) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
if (err)
return err;
@@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
mutex_unlock(&pf->mbox.lock);
return rsp_hdr->rc;
}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 59912f73417b..2c24944a4dba 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
+ if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev)
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
/* Reserve LMT lines for NPA AURA batch free */
- pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+ pf->hw.npa_lmt_base = pf->hw.lmt_base;
/* Reserve LMT lines for NIX TX */
- pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
- (NIX_LMTID_BASE * LMT_LINE_SIZE));
+ pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
+ (pf->npa_lmt_lines * LMT_LINE_SIZE));
}
err = otx2_init_hw_resources(pf);
@@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev)
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
@@ -1653,6 +1662,7 @@ int otx2_open(struct net_device *netdev)
err_tx_stop_queues:
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
err_free_cints:
otx2_free_cints(pf, qidx);
vec = pci_irq_vector(pf->pdev,
@@ -1680,6 +1690,10 @@ int otx2_stop(struct net_device *netdev)
struct otx2_rss_info *rss;
int qidx, vec, wrk;
+ /* If the DOWN flag is set resources are already freed */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
@@ -2526,7 +2540,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_pf_lmtst_init(pf);
+ err = cn10k_lmtst_init(pf);
if (err)
goto err_detach_rsrc;
@@ -2630,8 +2644,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@@ -2772,9 +2786,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
- if (pf->hw.lmt_base)
- iounmap(pf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 905fc02a7dfe..972b202b9884 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps;
+ bool pps = false;
u64 rate;
int i;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 52486c1f0973..2f144e2cf436 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -83,6 +83,7 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
+ u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 13a908f75ba0..a8bee5aefec1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_vf_lmtst_init(vf);
+ err = cn10k_lmtst_init(vf);
if (err)
goto err_detach_rsrc;
@@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
-
- if (vf->hw.lmt_base)
- iounmap(vf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index d12e21db9fd6..fa7a0682ad1e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -530,6 +530,8 @@ err_trap_register:
prestera_trap = &prestera_trap_items_arr[i];
devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
}
+ devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
+ groups_count);
err_groups_register:
kfree(trap_data->trap_items_arr);
err_trap_items_alloc:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 0b3e8f2db294..9a309169dbae 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -748,7 +748,7 @@ static void
prestera_fdb_offload_notify(struct prestera_port *port,
struct switchdev_notifier_fdb_info *info)
{
- struct switchdev_notifier_fdb_info send_info;
+ struct switchdev_notifier_fdb_info send_info = {};
send_info.addr = info->addr;
send_info.vid = info->vid;
@@ -1123,7 +1123,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
static void prestera_fdb_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
struct net_device *dev = NULL;
struct prestera_port *port;
struct prestera_lag *lag;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 00c84656b2e7..28ac4693da3c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3535,6 +3535,7 @@ slave_start:
if (!SRIOV_VALID_STATE(dev->flags)) {
mlx4_err(dev, "Invalid SRIOV state\n");
+ err = -EINVAL;
goto err_close;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index df3e4938ecdd..360e093874d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn);
cq->uar = dev->priv.uar;
+ cq->irqn = eq->core.irqn;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ceebfc20f65e..def2156e50ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1;
}
-/* This function is called with two flows:
- * 1. During initialization of mlx5_core_dev and we don't need to lock it.
- * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
- */
+/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
struct auxiliary_device *adev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 01a1d02dcf15..3f8a98093f8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
mlx5_eq_notifier_register(dev, &tracer->nb);
- mlx5_fw_tracer_start(tracer);
-
+ err = mlx5_fw_tracer_start(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
+ goto err_notifier_unregister;
+ }
return 0;
+err_notifier_unregister:
+ mlx5_eq_notifier_unregister(dev, &tracer->nb);
+ mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+ cancel_work_sync(&tracer->read_fw_strings_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 150c8e82c738..2cbf18c967f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
+static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
+ MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+
+ return ro && params->lro_en ?
+ MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
+}
+
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 778e229310a9..efef4adce086 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
params->log_sq_size = orig->log_sq_size;
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
}
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ /* RQ */
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ params->vlan_strip_disable = orig->vlan_strip_disable;
mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+ }
}
static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
@@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
int err;
rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
+ rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 8f79f04eccd6..1e2d117082d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
if (IS_ERR(rt))
return PTR_ERR(rt);
+ if (rt->rt_type != RTN_UNICAST) {
+ ret = -ENETUNREACH;
+ goto err_rt_release;
+ }
+
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
ret = -ENETUNREACH;
goto err_rt_release;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 86ab4e864fe6..7f94508594fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
struct mlx5e_priv *priv = t->priv;
rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
+ rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d09e65557e75..24f919ef9b8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1535,15 +1535,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
- int eqn_not_used;
- unsigned int irqn;
int err;
u32 i;
- err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
- if (err)
- return err;
-
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
@@ -1557,7 +1551,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
- mcq->irqn = irqn;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1605,11 +1598,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
void *in;
void *cqc;
int inlen;
- unsigned int irqn_not_used;
int eqn;
int err;
- err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
if (err)
return err;
@@ -1891,30 +1883,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq;
+ err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+ if (err)
+ goto err_close_sqs;
+
if (c->xdp) {
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
&c->rq_xdpsq, false);
if (err)
- goto err_close_sqs;
+ goto err_close_rq;
}
- err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
- if (err)
- goto err_close_xdp_sq;
-
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
if (err)
- goto err_close_rq;
+ goto err_close_xdp_sq;
return 0;
-err_close_rq:
- mlx5e_close_rq(&c->rq);
-
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+err_close_rq:
+ mlx5e_close_rq(&c->rq);
+
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1949,9 +1941,9 @@ err_close_async_icosq_cq:
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
mlx5e_close_xdpsq(&c->xdpsq);
- mlx5e_close_rq(&c->rq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+ mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mlx5e_close_icosq(&c->async_icosq);
@@ -1983,9 +1975,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel *c;
unsigned int irq;
int err;
- int eqn;
- err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+ err = mlx5_vector2irqn(priv->mdev, ix, &irq);
if (err)
return err;
@@ -3384,7 +3375,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
- int err = 0;
+ int err;
int i;
for (i = 0; i < chs->num; i++) {
@@ -3392,6 +3383,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
if (err)
return err;
}
+ if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
+ return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
return 0;
}
@@ -3829,6 +3822,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
+static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ features &= ~NETIF_F_HW_TLS_RX;
+ if (netdev->features & NETIF_F_HW_TLS_RX)
+ netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_HW_TLS_TX;
+ if (netdev->features & NETIF_F_HW_TLS_TX)
+ netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_NTUPLE;
+ if (netdev->features & NETIF_F_NTUPLE)
+ netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+
+ return features;
+}
+
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -3860,15 +3871,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}
- if (mlx5e_is_uplink_rep(priv)) {
- features &= ~NETIF_F_HW_TLS_RX;
- if (netdev->features & NETIF_F_HW_TLS_RX)
- netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
-
- features &= ~NETIF_F_HW_TLS_TX;
- if (netdev->features & NETIF_F_HW_TLS_TX)
- netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
- }
+ if (mlx5e_is_uplink_rep(priv))
+ features = mlx5e_fix_uplink_rep_features(netdev, features);
mutex_unlock(&priv->state_lock);
@@ -4859,6 +4863,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (MLX5_CAP_ETH(mdev, scatter_fcs))
netdev->hw_features |= NETIF_F_RXFCS;
+ if (mlx5_qos_is_supported(mdev))
+ netdev->hw_features |= NETIF_F_HW_TC;
+
netdev->features = netdev->hw_features;
/* Defaults */
@@ -4879,8 +4886,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
- if (mlx5_qos_is_supported(mdev))
- netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 629a61e8022f..d273758255c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{
+ struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_priv *priv;
- netdev = __dev_get_by_index(net, ifindex);
+ netdev = dev_get_by_index(net, ifindex);
+ if (!netdev)
+ return ERR_PTR(-ENODEV);
+
priv = netdev_priv(netdev);
- return priv->mdev;
+ mdev = priv->mdev;
+ dev_put(netdev);
+
+ /* Mirred tc action holds a refcount on the ifindex net_device (see
+ * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
+ * after dev_put(netdev), while we're in the context of adding a tc flow.
+ *
+ * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
+ * stored in a hairpin object, which exists until all flows, that refer to it, get
+ * removed.
+ *
+ * On the other hand, after a hairpin object has been created, the peer net_device may
+ * be removed/unbound while there are still some hairpin flows that are using it. This
+ * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
+ * NETDEV_UNREGISTER event of the peer net_device.
+ */
+ return mdev;
}
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
@@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (IS_ERR(peer_mdev)) {
+ err = PTR_ERR(peer_mdev);
+ goto create_pair_err;
+ }
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) {
@@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
int err;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (IS_ERR(peer_mdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
+ return PTR_ERR(peer_mdev);
+ }
+
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 6e074cc457de..605c8ecc3610 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -855,8 +855,8 @@ clean:
return err;
}
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn)
+static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+ unsigned int *irqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq, *n;
@@ -865,8 +865,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
if (i++ == vector) {
- *eqn = eq->core.eqn;
- *irqn = eq->core.irqn;
+ if (irqn)
+ *irqn = eq->core.irqn;
+ if (eqn)
+ *eqn = eq->core.eqn;
err = 0;
break;
}
@@ -874,8 +876,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
return err;
}
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
+{
+ return vector2eqnirqn(dev, vector, eqn, NULL);
+}
EXPORT_SYMBOL(mlx5_vector2eqn);
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+{
+ return vector2eqnirqn(dev, vector, NULL, irqn);
+}
+
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
{
return dev->priv.eq_table->num_comp_eqs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index a6e1d4f78268..69a3630818d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -69,7 +69,7 @@ static void
mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
unsigned long val)
{
- struct switchdev_notifier_fdb_info send_info;
+ struct switchdev_notifier_fdb_info send_info = {};
send_info.addr = addr;
send_info.vid = vid;
@@ -579,7 +579,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
xa_init(&bridge->vports);
bridge->ifindex = ifindex;
bridge->refcnt = 1;
- bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
+ bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
list_add(&bridge->list, &br_offloads->bridges);
return bridge;
@@ -1006,7 +1006,7 @@ int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswit
if (!vport->bridge)
return -EINVAL;
- vport->bridge->ageing_time = ageing_time;
+ vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
index 794012c5c476..d3ad78aa9d45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
@@ -501,6 +501,7 @@ err_sampler:
err_offload_rule:
mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
err_default_tbl:
+ kfree(sample_flow);
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 48cac5bf606d..d562edf5b0bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
};
struct mlx5_vport_tbl_attr {
- u16 chain;
+ u32 chain;
u16 prio;
u16 vport;
const struct esw_vport_tbl_namespace *vport_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7579f3402776..3bb71a186004 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -48,6 +48,7 @@
#include "lib/fs_chains.h"
#include "en_tc.h"
#include "en/mapping.h"
+#include "devlink.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -382,10 +383,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
- dest[dest_idx].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ }
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -2367,6 +2369,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
+ if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
+ break;
+
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
@@ -2997,12 +3002,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+ if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change mode while devlink traps are active");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
err = esw_offloads_start(esw, extack);
- else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
- else
+ } else {
err = -EINVAL;
+ }
unlock:
mlx5_esw_unlock(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index bd66ab2af5b5..d5da4ab65766 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
struct mlx5_wq_param wqp;
struct mlx5_cqe64 *cqe;
int inlen, err, eqn;
- unsigned int irqn;
void *cqc, *in;
__be64 *pas;
u32 i;
@@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto err_cqwq;
}
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d7bf0a3e4a52..c0697e1b7118 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
- struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_table *next_ft, *first_ft;
int err = 0;
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
- if (list_empty(&prio->node.children)) {
+ first_ft = list_first_entry_or_null(&prio->node.children,
+ struct mlx5_flow_table, node.list);
+ if (!first_ft || first_ft->level > ft->level) {
err = connect_prev_fts(dev, ft, prio);
if (err)
return err;
- next_ft = find_next_chained_ft(prio);
+ next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
err = connect_fwd_rules(dev, ft, next_ft);
if (err)
return err;
@@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
node.list) == ft))
return 0;
- next_ft = find_next_chained_ft(prio);
+ next_ft = find_next_ft(ft);
err = connect_fwd_rules(dev, next_ft, ft);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9ff163c5bcde..9abeb80ffa31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
fw_reporter_ctx.err_synd = health->synd;
fw_reporter_ctx.miss_counter = health->miss_counter;
- devlink_health_report(health->fw_fatal_reporter,
- "FW fatal error reported", &fw_reporter_ctx);
+ if (devlink_health_report(health->fw_fatal_reporter,
+ "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
+ /* If recovery wasn't performed, due to grace period,
+ * unload the driver. This ensures that the driver
+ * closes all its resources and it is not subjected to
+ * requests from the kernel.
+ */
+ mlx5_core_err(dev, "Driver is in error state. Unloading\n");
+ mlx5_unload_one(dev);
+ }
}
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 624cedebb510..d3d628b862f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -104,4 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index eb1b316560a8..c84ad87c99bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1784,16 +1784,14 @@ static int __init init(void)
if (err)
goto err_sf;
-#ifdef CONFIG_MLX5_CORE_EN
err = mlx5e_init();
- if (err) {
- pci_unregister_driver(&mlx5_core_driver);
- goto err_debug;
- }
-#endif
+ if (err)
+ goto err_en;
return 0;
+err_en:
+ mlx5_sf_driver_unregister();
err_sf:
pci_unregister_driver(&mlx5_core_driver);
err_debug:
@@ -1803,9 +1801,7 @@ err_debug:
static void __exit cleanup(void)
{
-#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
-#endif
mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
mlx5_unregister_debugfs();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 343807ac2036..da365b8f0141 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -206,8 +206,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
u32 *running_ver, u32 *stored_ver);
+#ifdef CONFIG_MLX5_CORE_EN
int mlx5e_init(void);
void mlx5e_cleanup(void);
+#else
+static inline int mlx5e_init(void){ return 0; }
+static inline void mlx5e_cleanup(void){}
+#endif
static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index b25f764daa08..3465b363fc2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -214,6 +214,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
err = -ENOMEM;
goto err_cpumask;
}
+ irq->pool = pool;
kref_init(&irq->kref);
irq->index = i;
err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
@@ -222,7 +223,6 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
irq->index, err);
goto err_xa;
}
- irq->pool = pool;
return irq;
err_xa:
free_cpumask_var(irq->mask);
@@ -251,8 +251,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
{
+ int err = 0;
+
+ err = atomic_notifier_chain_unregister(&irq->nh, nb);
irq_put(irq);
- return atomic_notifier_chain_unregister(&irq->nh, nb);
+ return err;
}
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
@@ -437,6 +440,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
if (!pool)
return ERR_PTR(-ENOMEM);
pool->dev = dev;
+ mutex_init(&pool->lock);
xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
pool->xa_num_irqs.min = start;
pool->xa_num_irqs.max = start + size - 1;
@@ -445,7 +449,6 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
name);
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
- mutex_init(&pool->lock);
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
name, size, start);
return pool;
@@ -459,6 +462,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
xa_for_each(&pool->irqs, index, irq)
irq_release(&irq->kref);
xa_destroy(&pool->irqs);
+ mutex_destroy(&pool->lock);
kvfree(pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 12cf323a5943..9df0e73d1c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_cqe64 *cqe;
struct mlx5dr_cq *cq;
int inlen, err, eqn;
- unsigned int irqn;
void *cqc, *in;
__be64 *pas;
int vector;
@@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
goto err_cqwq;
vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
- err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
+ err = mlx5_vector2eqn(mdev, vector, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
cq->mcq.vector = 0;
- cq->mcq.irqn = irqn;
cq->mcq.uar = uar;
return cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index f1950e4968da..e4dd4eed5aee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_DECAP);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
}
static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
@@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_L3_DECAP);
MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
}
static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7e221ef01437..f69cbb3852d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -9079,7 +9079,7 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
struct net_device *dev;
dev = br_fdb_find_port(rif->dev, mac, 0);
@@ -9127,8 +9127,8 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
{
+ struct switchdev_notifier_fdb_info info = {};
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
- struct switchdev_notifier_fdb_info info;
struct net_device *br_dev;
struct net_device *dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index c5ef9aa64efe..8f90cd323d5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2508,7 +2508,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
struct net_device *dev, bool offloaded)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = mac;
info.vid = vid;
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index a80419d8d4b5..7bdbb2d09a14 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -2,6 +2,8 @@ config SPARX5_SWITCH
tristate "Sparx5 switch driver"
depends on NET_SWITCHDEV
depends on HAS_IOMEM
+ depends on OF
+ depends on ARCH_SPARX5 || COMPILE_TEST
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 0443f66b5550..9a8e4f201eb1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -277,7 +277,7 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
struct net_device *dev, bool offloaded)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = mac;
info.vid = vid;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index 9d485a9d1f1f..cb68eaaac881 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -13,19 +13,26 @@
*/
#define VSTAX 73
-static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+#define ifh_encode_bitfield(ifh, value, pos, _width) \
+ ({ \
+ u32 width = (_width); \
+ \
+ /* Max width is 5 bytes - 40 bits. In worst case this will
+ * spread over 6 bytes - 48 bits
+ */ \
+ compiletime_assert(width <= 40, \
+ "Unsupported width, must be <= 40"); \
+ __ifh_encode_bitfield((ifh), (value), (pos), width); \
+ })
+
+static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
{
u8 *ifh_hdr = ifh;
/* Calculate the Start IFH byte position of this IFH bit position */
u32 byte = (35 - (pos / 8));
/* Calculate the Start bit position in the Start IFH byte */
u32 bit = (pos % 8);
- u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
-
- /* Max width is 5 bytes - 40 bits. In worst case this will
- * spread over 6 bytes - 48 bits
- */
- compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+ u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
/* The b0-b7 goes into the start IFH byte */
if (encode & 0xFF)
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5249b64f4fc5..49def6934cad 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
- if (ret) {
- free_netdev(ndev);
+ if (ret)
goto init_fail;
- }
netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
__func__, ndev->irq, ndev->dev_addr);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index adfb9781799e..2948d731a1c1 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
mask &= ~ocelot_get_bond_mask(ocelot, bond,
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index ea4e83410fe4..7390fa3980ec 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
ocelot->map[target][reg & REG_MASK] + offset, &val);
return val;
}
-EXPORT_SYMBOL(__ocelot_read_ix);
+EXPORT_SYMBOL_GPL(__ocelot_read_ix);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
{
@@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
regmap_write(ocelot->targets[target],
ocelot->map[target][reg & REG_MASK] + offset, val);
}
-EXPORT_SYMBOL(__ocelot_write_ix);
+EXPORT_SYMBOL_GPL(__ocelot_write_ix);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
u32 offset)
@@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
ocelot->map[target][reg & REG_MASK] + offset,
mask, val);
}
-EXPORT_SYMBOL(__ocelot_rmw_ix);
+EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
{
@@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
return val;
}
-EXPORT_SYMBOL(ocelot_port_readl);
+EXPORT_SYMBOL_GPL(ocelot_port_readl);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
{
@@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
}
-EXPORT_SYMBOL(ocelot_port_writel);
+EXPORT_SYMBOL_GPL(ocelot_port_writel);
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
{
@@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
}
-EXPORT_SYMBOL(ocelot_port_rmwl);
+EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
@@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
return 0;
}
-EXPORT_SYMBOL(ocelot_regfields_init);
+EXPORT_SYMBOL_GPL(ocelot_regfields_init);
static struct regmap_config ocelot_regmap_config = {
.reg_bits = 32,
@@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
}
-EXPORT_SYMBOL(ocelot_regmap_init);
+EXPORT_SYMBOL_GPL(ocelot_regmap_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 3e89e34f86d5..e9d260d84bf3 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
}
static int ocelot_netdevice_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
@@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = ocelot_netdevice_bridge_join(dev, dev,
+ err = ocelot_netdevice_bridge_join(dev, brport_dev,
info->upper_dev,
extack);
else
- err = ocelot_netdevice_bridge_leave(dev, dev,
+ err = ocelot_netdevice_bridge_leave(dev, brport_dev,
info->upper_dev);
}
if (netif_is_lag_master(info->upper_dev)) {
@@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
if (ocelot_port->bond != dev)
return NOTIFY_OK;
- err = ocelot_netdevice_changeupper(lower, info);
+ err = ocelot_netdevice_changeupper(lower, dev, info);
if (err)
return notifier_from_errno(err);
}
@@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev))
- return ocelot_netdevice_changeupper(dev, info);
+ return ocelot_netdevice_changeupper(dev, dev, info);
if (netif_is_lag_master(dev))
return ocelot_netdevice_lag_changeupper(dev, info);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 51b4b25d15ad..84f7dbe9edff 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(version);
#endif
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) return i;
/* natsemi has a non-standard PM control register
@@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = ioremap(iostart, iosize);
if (!ioaddr) {
i = -ENOMEM;
- goto err_ioremap;
+ goto err_pci_request_regions;
}
/* Work around the dropped serial bit. */
@@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_register_netdev:
iounmap(ioaddr);
- err_ioremap:
- pci_release_regions(pdev);
-
err_pci_request_regions:
free_netdev(dev);
return i;
@@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
unregister_netdev (dev);
- pci_release_regions (pdev);
iounmap(ioaddr);
free_netdev (dev);
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 82eef4c72f01..7abd13e69471 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
kfree(vdev->vpaths);
- /* we are safe to free it now */
- free_netdev(dev);
-
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
buf);
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
__func__, __LINE__);
+
+ /* we are safe to free it now */
+ free_netdev(dev);
}
/*
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 273d529d43c2..062bb2db68bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_fl_ct_clean_flow_entry(ct_entry);
kfree(ct_map_ent);
- /* If this is the last pre_ct_rule it means that it is
- * very likely that the nft table will be cleaned up next,
- * as this happens on the removal of the last act_ct flow.
- * However we cannot deregister the callback on the removal
- * of the last nft flow as this runs into a deadlock situation.
- * So deregister the callback on removal of the last pre_ct flow
- * and remove any remaining nft flow entries. We also cannot
- * save this state and delete the callback later since the
- * nft table would already have been freed at that time.
- */
if (!zt->pre_ct_count) {
- nf_flow_table_offload_del_cb(zt->nft,
- nfp_fl_ct_handle_nft_flow,
- zt);
zt->nft = NULL;
nfp_fl_ct_clean_nft_entries(zt);
}
@@ -1172,6 +1159,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_ct_map_params);
nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
kfree(ct_map_ent);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b482446536d..8803faadd302 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index af3a5368529c..e795fa63ca12 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -29,7 +29,7 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
*/
};
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
+static void ionic_lif_rx_mode(struct ionic_lif *lif);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
static void ionic_link_status_check(struct ionic_lif *lif);
@@ -53,7 +53,19 @@ static void ionic_dim_work(struct work_struct *work)
cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
- qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+ new_coal = new_coal ? new_coal : 1;
+
+ if (qcq->intr.dim_coal_hw != new_coal) {
+ unsigned int qi = qcq->cq.bound_q->index;
+ struct ionic_lif *lif = qcq->q.lif;
+
+ qcq->intr.dim_coal_hw = new_coal;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[qi]->intr.index,
+ qcq->intr.dim_coal_hw);
+ }
+
dim->state = DIM_START_MEASURE;
}
@@ -77,7 +89,7 @@ static void ionic_lif_deferred_work(struct work_struct *work)
switch (w->type) {
case IONIC_DW_TYPE_RX_MODE:
- ionic_lif_rx_mode(lif, w->rx_mode);
+ ionic_lif_rx_mode(lif);
break;
case IONIC_DW_TYPE_RX_ADDR_ADD:
ionic_lif_addr_add(lif, w->addr);
@@ -1301,10 +1313,8 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return 0;
}
-static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
- bool can_sleep)
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
{
- struct ionic_deferred_work *work;
unsigned int nmfilters;
unsigned int nufilters;
@@ -1330,97 +1340,46 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
lif->nucast--;
}
- if (!can_sleep) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
- work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
- IONIC_DW_TYPE_RX_ADDR_DEL;
- memcpy(work->addr, addr, ETH_ALEN);
- netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
- add ? "add" : "del", addr);
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
- add ? "add" : "del", addr);
- if (add)
- return ionic_lif_addr_add(lif, addr);
- else
- return ionic_lif_addr_del(lif, addr);
- }
+ netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
+ add ? "add" : "del", addr);
+ if (add)
+ return ionic_lif_addr_add(lif, addr);
+ else
+ return ionic_lif_addr_del(lif, addr);
return 0;
}
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
-}
-
-static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
-{
- return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
+ return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR);
}
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
+ return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR);
}
-static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
+static void ionic_lif_rx_mode(struct ionic_lif *lif)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
-}
-
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_mode_set = {
- .opcode = IONIC_CMD_RX_MODE_SET,
- .lif_index = cpu_to_le16(lif->index),
- .rx_mode = cpu_to_le16(rx_mode),
- },
- };
+ struct net_device *netdev = lif->netdev;
+ unsigned int nfilters;
+ unsigned int nd_flags;
char buf[128];
- int err;
+ u16 rx_mode;
int i;
#define REMAIN(__x) (sizeof(buf) - (__x))
- i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
- lif->rx_mode, rx_mode);
- if (rx_mode & IONIC_RX_MODE_F_UNICAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
- if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
- if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
- if (rx_mode & IONIC_RX_MODE_F_PROMISC)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
- if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
- netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
-
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err)
- netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
- rx_mode, err);
- else
- lif->rx_mode = rx_mode;
-}
+ mutex_lock(&lif->config_lock);
-static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_deferred_work *work;
- unsigned int nfilters;
- unsigned int rx_mode;
+ /* grab the flags once for local use */
+ nd_flags = netdev->flags;
rx_mode = IONIC_RX_MODE_F_UNICAST;
- rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
- rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
- rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
- rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
+ rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
+ rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
+ rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
+ rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
/* sync unicast addresses
* next check to see if we're in an overflow state
@@ -1429,49 +1388,83 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
* we remove our overflow flag and check the netdev flags
* to see if we can disable NIC PROMISC
*/
- if (can_sleep)
- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
- else
- __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if (netdev_uc_count(netdev) + 1 > nfilters) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
lif->uc_overflow = true;
} else if (lif->uc_overflow) {
lif->uc_overflow = false;
- if (!(netdev->flags & IFF_PROMISC))
+ if (!(nd_flags & IFF_PROMISC))
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
}
/* same for multicast */
- if (can_sleep)
- __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
- else
- __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
if (netdev_mc_count(netdev) > nfilters) {
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
lif->mc_overflow = true;
} else if (lif->mc_overflow) {
lif->mc_overflow = false;
- if (!(netdev->flags & IFF_ALLMULTI))
+ if (!(nd_flags & IFF_ALLMULTI))
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
}
+ i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+ lif->rx_mode, rx_mode);
+ if (rx_mode & IONIC_RX_MODE_F_UNICAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+ if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+ if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+ if (rx_mode & IONIC_RX_MODE_F_PROMISC)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+ if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+ if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
+ netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
+
if (lif->rx_mode != rx_mode) {
- if (!can_sleep) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "rxmode change dropped\n");
- return;
- }
- work->type = IONIC_DW_TYPE_RX_MODE;
- work->rx_mode = rx_mode;
- netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- ionic_lif_rx_mode(lif, rx_mode);
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_mode_set = {
+ .opcode = IONIC_CMD_RX_MODE_SET,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ int err;
+
+ ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
+ rx_mode, err);
+ else
+ lif->rx_mode = rx_mode;
+ }
+
+ mutex_unlock(&lif->config_lock);
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_deferred_work *work;
+
+ if (!can_sleep) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "rxmode change dropped\n");
+ return;
}
+ work->type = IONIC_DW_TYPE_RX_MODE;
+ netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ ionic_lif_rx_mode(lif);
}
}
@@ -3058,6 +3051,7 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -3185,7 +3179,7 @@ static int ionic_station_set(struct ionic_lif *lif)
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
- ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -3202,7 +3196,7 @@ static int ionic_station_set(struct ionic_lif *lif)
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
return 0;
}
@@ -3225,6 +3219,7 @@ int ionic_lif_init(struct ionic_lif *lif)
lif->hw_index = le16_to_cpu(comp.hw_index);
mutex_init(&lif->queue_lock);
+ mutex_init(&lif->config_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 346506f01715..69ab59fedb6c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -108,7 +108,6 @@ struct ionic_deferred_work {
struct list_head list;
enum ionic_deferred_work_type type;
union {
- unsigned int rx_mode;
u8 addr[ETH_ALEN];
u8 fw_status;
};
@@ -179,6 +178,7 @@ struct ionic_lif {
unsigned int index;
unsigned int hw_index;
struct mutex queue_lock; /* lock for queue structures */
+ struct mutex config_lock; /* lock for config actions */
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
@@ -199,7 +199,7 @@ struct ionic_lif {
unsigned int nrxq_descs;
u32 rx_copybreak;
u64 rxq_features;
- unsigned int rx_mode;
+ u16 rx_mode;
u64 hw_features;
bool registered;
bool mc_overflow;
@@ -302,7 +302,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
int ionic_lif_size(struct ionic *ionic);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif);
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
@@ -311,10 +311,7 @@ void ionic_lif_unregister_phc(struct ionic_lif *lif);
void ionic_lif_alloc_phc(struct ionic_lif *lif);
void ionic_lif_free_phc(struct ionic_lif *lif);
#else
-static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
-{
- return -EOPNOTSUPP;
-}
+static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index a87c87e86aef..6e2403c71608 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -188,6 +188,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
+ if (!lif->phc || !lif->phc->ptp)
+ return -EOPNOTSUPP;
+
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -203,15 +206,16 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
return 0;
}
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
{
int err;
+ if (!lif->phc || !lif->phc->ptp)
+ return;
+
err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
if (err)
netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
-
- return err;
}
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 08934888575c..08870190e4d2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -274,12 +274,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (likely(netdev->features & NETIF_F_RXCSUM)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
- stats->csum_complete++;
- }
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
+ stats->csum_complete++;
} else {
stats->csum_none++;
}
@@ -451,11 +450,12 @@ void ionic_rx_empty(struct ionic_queue *q)
q->tail_idx = 0;
}
-static void ionic_dim_update(struct ionic_qcq *qcq)
+static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
{
struct dim_sample dim_sample;
struct ionic_lif *lif;
unsigned int qi;
+ u64 pkts, bytes;
if (!qcq->intr.dim_coal_hw)
return;
@@ -463,14 +463,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
lif = qcq->q.lif;
qi = qcq->cq.bound_q->index;
- ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[qi]->intr.index,
- qcq->intr.dim_coal_hw);
+ switch (napi_mode) {
+ case IONIC_LIF_F_TX_DIM_INTR:
+ pkts = lif->txqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes;
+ break;
+ case IONIC_LIF_F_RX_DIM_INTR:
+ pkts = lif->rxqstats[qi].pkts;
+ bytes = lif->rxqstats[qi].bytes;
+ break;
+ default:
+ pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
+ break;
+ }
dim_update_sample(qcq->cq.bound_intr->rearm_count,
- lif->txqstats[qi].pkts,
- lif->txqstats[qi].bytes,
- &dim_sample);
+ pkts, bytes, &dim_sample);
net_dim(&qcq->dim, dim_sample);
}
@@ -491,7 +500,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
ionic_tx_service, NULL, NULL);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -530,7 +539,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -576,7 +585,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a4610d9330..c46a7f756ed5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
unsigned long flags;
int rc = -EINVAL;
+ if (!p_ll2_conn)
+ return rc;
+
spin_lock_irqsave(&p_tx->lock, flags);
if (p_tx->b_completing_packet) {
rc = -EBUSY;
@@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
unsigned long flags = 0;
int rc = 0;
+ if (!p_ll2_conn)
+ return rc;
+
spin_lock_irqsave(&p_rx->lock, flags);
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return 0;
+ }
+
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
@@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
+ if (!p_ll2_conn)
+ return 0;
+
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
@@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
+ if (!p_ll2_conn)
+ return 0;
+
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
@@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
if (!p_ll2_conn)
return -EINVAL;
p_rx = &p_ll2_conn->rx_queue;
+ if (!p_rx->set_prod_addr)
+ return -EIO;
spin_lock_irqsave(&p_rx->lock, flags);
if (!list_empty(&p_rx->free_descq))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index da864d12916b..4f4b79250a2b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
if (!rdma_cxt || !in_params || !out_params ||
!p_hwfn->p_rdma_info->active) {
- DP_ERR(p_hwfn->cdev,
- "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params);
return NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 2e62a2c4eb63..5630008f38b7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -501,6 +501,7 @@ struct qede_fastpath {
#define QEDE_SP_HW_ERR 4
#define QEDE_SP_ARFS_CONFIG 5
#define QEDE_SP_AER 7
+#define QEDE_SP_DISABLE 8
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c59b72c90293..a2e4dfb5cb44 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -831,7 +831,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev)
int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_vlan *vlan = NULL;
+ struct qede_vlan *vlan;
int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
@@ -842,7 +842,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
if (vlan->vid == vid)
break;
- if (!vlan || (vlan->vid != vid)) {
+ if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Vlan isn't configured\n");
goto out;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 01ac1e93d27a..7c6064baeba2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work)
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ /* Disable execution of this deferred work once
+ * qede removal is in progress, this stop any future
+ * scheduling of sp_task.
+ */
+ if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+ return;
+
/* The locking scheme depends on the specific flag:
* In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
* ensure that ongoing flows are ended and new ones are not started.
@@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
if (mode != QEDE_REMOVE_RECOVERY) {
+ set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2376b2729633..c00ad57575ea 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -154,7 +154,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n");
return 1;
}
- ssleep(1);
+ mdelay(1000);
} while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
@@ -3274,7 +3274,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
if ((value & ISP_CONTROL_SR) == 0)
break;
- ssleep(1);
+ mdelay(1000);
} while ((--max_wait_time));
/*
@@ -3310,7 +3310,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
ispControlStatus);
if ((value & ISP_CONTROL_FSR) == 0)
break;
- ssleep(1);
+ mdelay(1000);
} while ((--max_wait_time));
}
if (max_wait_time == 0)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d8882d0b6b49..d51bac7ba5af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = QLCRD32(adapter, indirect_addr, &err);
- if (err == -EIO)
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
return err;
+ }
word = ret;
*(u32 *)p_data = word;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 8543bf3c3484..ad655f0a4965 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev)
put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
- free_netdev(netdev);
if (adpt->phy.digital)
iounmap(adpt->phy.digital);
iounmap(adpt->phy.base);
+ free_netdev(netdev);
+
return 0;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index f744557c33a3..4d8e337f5085 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3502,12 +3502,16 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ /* The default value is 0x13. Change it to 0x2f */
+ rtl_csi_access_enable(tp, 0x2f);
+
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
/* disable EEE */
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
rtl_pcie_state_l2l3_disable(tp);
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
@@ -5084,7 +5088,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
+ pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 86a1eb0634e8..80e62ca2e3d3 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -864,7 +864,7 @@ enum GECMR_BIT {
/* The Ethernet AVB descriptor definitions. */
struct ravb_desc {
- __le16 ds; /* Descriptor size */
+ __le16 ds; /* Descriptor size */
u8 cc; /* Content control MSBs (reserved) */
u8 die_dt; /* Descriptor interrupt enable and type */
__le32 dptr; /* Descriptor pointer */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 69c50f81e1cb..805397088850 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -920,7 +920,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (ravb_rx(ndev, &quota, q))
goto out;
- /* Processing RX Descriptor Ring */
+ /* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index a46633606cae..1f06b92ee5bb 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2715,7 +2715,7 @@ static void
rocker_fdb_offload_notify(struct rocker_port *rocker_port,
struct switchdev_notifier_fdb_info *recv_info)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = recv_info->addr;
info.vid = recv_info->vid;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 967a634ee9ac..e33a9d283a4e 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1822,7 +1822,7 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
container_of(work, struct ofdpa_fdb_learn_work, work);
bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = lw->addr;
info.vid = lw->vid;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index a3ca406a3561..e5b0d795c301 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* maximum size.
*/
tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
+ tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
@@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
@@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
n_xdp_tx, n_channels, efx->max_vis);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
+ efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
@@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
+
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
- if (xdp_queue_number < efx->xdp_tx_queue_count)
+ if (xdp_queue_number < efx->xdp_tx_queue_count) {
+ netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+ channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
+ xdp_queue_number++;
+ }
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx)
}
}
}
- if (xdp_queue_number)
- efx->xdp_tx_queue_count = xdp_queue_number;
+ WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index ca9c00b7f588..cff87de9178a 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
#endif
/* setup various bits in PCI command register */
- ret = pci_enable_device(pci_dev);
+ ret = pcim_enable_device(pci_dev);
if(ret) return ret;
i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
@@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ioaddr) {
ret = -ENOMEM;
- goto err_out_cleardev;
+ goto err_out;
}
sis_priv = netdev_priv(net_dev);
@@ -581,8 +581,6 @@ err_unmap_tx:
sis_priv->tx_ring_dma);
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
-err_out_cleardev:
- pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
return ret;
@@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
sis_priv->tx_ring_dma);
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
- pci_release_regions(pci_dev);
}
static int __maybe_unused sis900_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index e108b0d2bd28..4c9a37dd0d3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
- bool mdio = false;
- int ret, i;
struct device_node *np;
+ int ret, i, phy_mode;
+ bool mdio = false;
np = dev_of_node(&pdev->dev);
@@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (plat->bus_id < 0)
plat->bus_id = pci_dev_id(pdev);
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
dev_err(&pdev->dev, "phy_mode not found\n");
+ plat->phy_interface = phy_mode;
plat->interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 67ba083eb90c..b21745368983 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1249,6 +1249,7 @@ const struct stmmac_ops dwmac410_ops = {
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
@@ -1300,6 +1301,7 @@ const struct stmmac_ops dwmac510_ops = {
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e735134e8487..fcdb1d20389b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8d9d6ecf8c63..7b8404a21544 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->rx_queues_to_use, false);
stmmac_fpe_handshake(priv, false);
+ stmmac_fpe_stop_wq(priv);
}
priv->speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 072eff8079d0..5ca710844cc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ int phy_mode;
void *ret;
int rc;
@@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
eth_zero_addr(mac);
}
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
+ return ERR_PTR(phy_mode);
+ plat->phy_interface = phy_mode;
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
plat->interface = plat->phy_interface;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 4e86cdf2bc9f..580cc035536b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
- bool xmac;
+ bool xmac, est_rst = false;
+ int ret;
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
@@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
sec = quotient;
nsec = reminder;
+ /* If EST is enabled, disabled it before adjust ptp time. */
+ if (priv->plat->est && priv->plat->est->enable) {
+ est_rst = true;
+ mutex_lock(&priv->plat->est->lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ }
+
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ /* Caculate new basetime and re-configured EST after PTP time adjust. */
+ if (est_rst) {
+ struct timespec64 current_time, time;
+ ktime_t current_time_ns, basetime;
+ u64 cycle_time;
+
+ mutex_lock(&priv->plat->est->lock);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ time.tv_nsec = priv->plat->est->btr_reserve[0];
+ time.tv_sec = priv->plat->est->btr_reserve[1];
+ basetime = timespec64_to_ktime(time);
+ cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ priv->plat->est->ctr[0];
+ time = stmmac_calc_tas_basetime(basetime,
+ current_time_ns,
+ cycle_time);
+
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->plat->est->enable = true;
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ if (ret)
+ netdev_err(priv->dev, "failed to configure EST\n");
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 92dab609d4f8..4f3b6437b114 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time)
+{
+ struct timespec64 time;
+
+ if (ktime_after(old_base_time, current_time)) {
+ time = ktime_to_timespec64(old_base_time);
+ } else {
+ s64 n;
+ ktime_t base_time;
+
+ n = div64_s64(ktime_sub_ns(current_time, old_base_time),
+ cycle_time);
+ base_time = ktime_add_ns(old_base_time,
+ (n + 1) * cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
+ return time;
+}
+
static int tc_setup_taprio(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time, current_time;
+ struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
@@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
GFP_KERNEL);
if (!plat->est)
return -ENOMEM;
+
+ mutex_init(&priv->plat->est->lock);
} else {
memset(plat->est, 0, sizeof(*plat->est));
}
size = qopt->num_entries;
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->enable;
+ mutex_unlock(&priv->plat->est->lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
+ mutex_lock(&priv->plat->est->lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
- if (ktime_after(qopt->base_time, current_time_ns)) {
- time = ktime_to_timespec64(qopt->base_time);
- } else {
- ktime_t base_time;
- s64 n;
-
- n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
- qopt->cycle_time);
- base_time = ktime_add_ns(qopt->base_time,
- (n + 1) * qopt->cycle_time);
-
- time = ktime_to_timespec64(base_time);
- }
+ time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
+ qopt->cycle_time);
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
+ qopt_time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+
ctr = qopt->cycle_time;
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
- if (fpe && !priv->dma_cap.fpesel)
+ if (fpe && !priv->dma_cap.fpesel) {
+ mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
+ }
/* Actual FPE register configuration will be done after FPE handshake
* is success.
@@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return 0;
disable:
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
priv->plat->fpe_cfg->enable = false;
stmmac_fpe_configure(priv, priv->ioaddr,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 74e748662ec0..860644d182ab 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
err = niu_pci_vpd_scan_props(np, here, end);
if (err < 0)
return err;
+ /* ret == 1 is not an error */
if (err == 1)
- return -EINVAL;
+ return 0;
}
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 718539cdd2f2..67a08cbba859 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2060,8 +2060,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm
for (i = 1; i <= common->port_num; i++) {
struct am65_cpsw_port *port = am65_common_get_port(common, i);
- struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+ struct am65_cpsw_ndev_priv *priv;
+ if (!port->ndev)
+ continue;
+
+ priv = am65_ndev_to_priv(port->ndev);
priv->offload_fwd_mark = set_val;
}
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index 9c29b363e9ae..599708a3e81d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -358,7 +358,7 @@ static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
struct switchdev_notifier_fdb_info *rcv)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = rcv->addr;
info.vid = rcv->vid;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 57d279fdcc9f..d1d02001cef6 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpdma_chan *txch;
int ret, q_idx;
- if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
cpsw_err(priv, tx_err, "packet pad failed\n");
ndev->stats.tx_dropped++;
return NET_XMIT_DROP;
@@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
for (i = 0; i < n; i++) {
xdpf = frames[i];
- if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+ if (xdpf->len < READ_ONCE(priv->tx_packet_min))
break;
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
@@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
priv->dev = dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->emac_port = i + 1;
+ priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
if (is_valid_ether_addr(slave_data->mac_addr)) {
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
@@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
priv = netdev_priv(sl_ndev);
slave->port_vlan = vlan;
+ WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
if (netif_running(sl_ndev))
cpsw_port_add_switch_def_ale_entries(priv,
slave);
@@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
priv = netdev_priv(slave->ndev);
slave->port_vlan = slave->data->dual_emac_res_vlan;
+ WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index a323bea54faa..2951fb7b9dae 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -89,7 +89,8 @@ do { \
#define CPSW_POLL_WEIGHT 64
#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
-#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN)
#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
ETH_FCS_LEN +\
CPSW_RX_VLAN_ENCAP_HDR_SIZE)
@@ -380,6 +381,7 @@ struct cpsw_priv {
u32 emac_port;
struct cpsw_common *cpsw;
int offload_fwd_mark;
+ u32 tx_packet_min;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index f7fb6e17dadd..a7d97d429e06 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -368,7 +368,7 @@ static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
static void cpsw_fdb_offload_notify(struct net_device *ndev,
struct switchdev_notifier_fdb_info *rcv)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = rcv->addr;
info.vid = rcv->vid;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 0b2ce4bdc2c3..e0cb713193ea 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
#endif
- free_netdev(dev);
-
cancel_work_sync(&priv->tlan_tqueue);
+ free_netdev(dev);
}
static void tlan_start(struct net_device *dev)
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 99d4d9439d05..a6fb88fd42f7 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -14,6 +14,8 @@
#include <linux/kernel.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/soc/ixp4xx/cpu.h>
+#include <linux/module.h>
+#include <mach/ixp4xx-regs.h>
#include "ixp46x_ts.h"